diff --git a/README.md b/README.md index 51f7dbb9f..c781a44b9 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,10 @@ Qibocal key features: Qibocal documentation is available [here](https://qibo.science/qibocal/stable/). +>[!NOTE] +> Qibocal `main` contains some breaking changes compared to `0.1` versions. +> A small guide to make the transition as smooth as possible can be found [`here`](changes.md). + ## Installation The package can be installed by source: diff --git a/calibration_scripts/rx_calibration.py b/calibration_scripts/rx_calibration.py index f71fd20f5..8ae09d17e 100644 --- a/calibration_scripts/rx_calibration.py +++ b/calibration_scripts/rx_calibration.py @@ -13,10 +13,10 @@ e.platform.settings.nshots = 2000 rabi_output = e.rabi_amplitude( - min_amp_factor=0.5, - max_amp_factor=1.5, - step_amp_factor=0.01, - pulse_length=e.platform.qubits[target].native_gates.RX.duration, + min_amp=0.0, + max_amp=2, + step_amp=0.01, + pulse_length=e.platform.natives.single_qubit[target].RX[0][1].duration, ) # update only if chi2 is satisfied if rabi_output.results.chi2[target][0] > 2: @@ -43,10 +43,10 @@ ramsey_output.update_platform(e.platform) rabi_output_2 = e.rabi_amplitude( - min_amp_factor=0.5, - max_amp_factor=1.5, - step_amp_factor=0.01, - pulse_length=e.platform.qubits[target].native_gates.RX.duration, + min_amp=0, + max_amp=0.2, + step_amp=0.01, + pulse_length=e.platform.natives.single_qubit[target].RX[0][1].duration, ) # update only if chi2 is satisfied if rabi_output_2.results.chi2[target][0] > 2: @@ -61,10 +61,10 @@ ) rabi_output_3 = e.rabi_amplitude( - min_amp_factor=0.5, - max_amp_factor=1.5, - step_amp_factor=0.01, - pulse_length=e.platform.qubits[target].native_gates.RX.duration, + min_amp=0, + max_amp=0.2, + step_amp=0.01, + pulse_length=e.platform.natives.single_qubit[target].RX[0][1].duration, ) # update only if chi2 is satisfied if rabi_output_3.results.chi2[target][0] > 2: diff --git a/changes.md b/changes.md new file mode 100644 index 000000000..cb90d7e36 --- /dev/null +++ b/changes.md @@ -0,0 +1,14 @@ +## Changes in `main` +After merging [#990](https://github.com/qiboteam/qibocal/pull/990), Qibocal is now compatible with Qibolab 0.2.1. +For this reason, a small internal refactoring and some breaking changes were required. +The main differences concern the acquisition functions and the protocol parameters: +- The amplitudes are no longer relative to the values defined in the platform, but to +the maximum value the instruments reach (internally stored in `Qibolab`). +It implies renaming the amplitude parameters and converting the new amplitude level accordingly. +- The platform parameters that were previously in the `parameters.json` +but required by `Qibolab` to execute circuits and pulse sequences (like $E_J$, $T_1$ and $T_2$), +are moved to the `calibration.json` stored inside each platform folder. +- In the Rabi and flipping experiment, Qibocal provides the possibility to calibrate the $RX(\pi/2)$. +- Small changes in the report template. +- Some protocols are not supported anymore(https://github.com/qiboteam/qibocal/pull/990#issue-2559341729 + for a list of them). diff --git a/doc/source/getting-started/interface.rst b/doc/source/getting-started/interface.rst index 2350dcc84..f88fc2430 100644 --- a/doc/source/getting-started/interface.rst +++ b/doc/source/getting-started/interface.rst @@ -105,7 +105,7 @@ This program will upload your report to the server and generate an unique URL. ``qq compare`` -^^^^^^^^^^^^^ +^^^^^^^^^^^^^^ Using ``qq compare`` it is possible to compare together two ``Qibocal`` reports. diff --git a/doc/source/protocols/avoided_crossing.png b/doc/source/protocols/avoided_crossing.png deleted file mode 100644 index 94b1b9196..000000000 Binary files a/doc/source/protocols/avoided_crossing.png and /dev/null differ diff --git a/doc/source/protocols/avoided_crossing.rst b/doc/source/protocols/avoided_crossing.rst deleted file mode 100644 index 5ee12050d..000000000 --- a/doc/source/protocols/avoided_crossing.rst +++ /dev/null @@ -1,55 +0,0 @@ -Avoided crossing -================ - -In the avoided crossing experiment the goal is to study the qubit-flux dependency -of a couple of qubits to precisely tune the interaction between them at specific -frequencies in order to calibrate the CZ and the iSWAP gates. - -In the avoided crossing experiment for CZ qubit gates, the interaction between -two qubits is controlled by tuning their energy levels such that the :math:`\ket{11}` -(both qubits in the excited state) and :math:`\ket{02}` (one qubit in the ground state and -the other in the second excited state) states come into resonance. -At this resonance point, the energy levels of these states experience an avoided -crossing, a key phenomenon that enables the controlled-Z (CZ) gate operation. -By observing the avoided crossing, one can confirm that the coupling between the -qubits is strong enough to facilitate the necessary interaction for the CZ gate. -Hence, precise tuning of these states is essential for achieving the correct gate -operation. - -In the avoided crossing experiment for iSWAP qubit gates, the key focus is on -the interaction between the :math:`\ket{10}` and :math:`\ket{01}` states. -When tuning the qubits' energy levels, these two states come into resonance, -creating an avoided crossing, which is the fundamental operation of -the iSWAP gate. - -In this protocol, for each qubit pair we execute a qubit flux dependency of the -01 and 02 transitions on the qubit with higher frequency and we fit the data to -find the flux-frequency relationship that we use to estimate the bias needed to -reach the CZ and iSWAP interaction points. - -Parameters -^^^^^^^^^^ - -.. autoclass:: - qibocal.protocols.flux_dependence.avoided_crossing.AvoidedCrossingParameters - :noindex: - -Example -^^^^^^^ - -It follows a runcard example of this experiment. - -.. code-block:: yaml - - - id: avoided crossing - operation: avoided_crossing - parameters: - bias_step: 0.01 - bias_width: 0.2 - drive_amplitude: 0.5 - freq_step: 500000 - freq_width: 100000000 - -The expected output is the following: - -.. image:: avoided_crossing.png diff --git a/doc/source/protocols/chevron.rst b/doc/source/protocols/chevron.rst index 9565cad43..d4a95cff3 100644 --- a/doc/source/protocols/chevron.rst +++ b/doc/source/protocols/chevron.rst @@ -1,6 +1,20 @@ Chevron ======= +In order to implement a two-qubit gate in superconducting quantum computing, it is necessary to bring the two qubits near resonance using specific pulse sequences. +The Chevron protocol implemented in Qibocal can be used to calibrate both CZ and iSWAP gates. + +The pulse sequence used to calibrate the iSWAP gate consists of a :math:`\pi` pulse followed by a flux pulse of varying amplitude and duration, applied to the qubit with the highest frequency in the pair. +The initial :math:`pi` pulse brings the qubit into the state :math:`\ket{1}` while the flux pulse detunes its frequency near resonance with the second qubit. The implementation of the iSWAP gate leverages the avoided crossing between the states :math:`\ket{10}` and :math:`\ket{01}`. + +The expected population oscillation pattern follows: +.. math:: + p_e(t, \delta) = \frac{\Delta^2}{\Delta^2 + 4g^2} + {4g^2}{\Delta^2 + 4g^2}\cos^2\left(\frac{\sqrt{\Delta^2 + 4g^2}}{2}t\right) + +where :math:`\Delta=\omega_1 - \omega_2`, and :math:`g` is the coupling constant for the two qubits. + +The pulse sequence used to calibrate the CZ gate is the same as the one for the iSWAP gate, with the addition of an initial :math:`\pi` pulse applied to the qubit with the lower frequency so that both qubits are initially prepared in the :math:`\ket{1}`. With this sequence the CZ gate is implemented leveraging the avoided crossing between the states :math:`\ket{11}` and :math:`\ket{20}`. + Parameters ^^^^^^^^^^ @@ -18,9 +32,9 @@ Below is an example runcard for this experiment. - id: chevron operation: chevron parameters: - amplitude_max_factor: 1.1 - amplitude_min_factor: 0.9 - amplitude_step_factor: 0.01 + amplitude_max: 1.1 + amplitude_min: 0.9 + amplitude_step: 0.01 duration_max: 51 duration_min: 4 duration_step: 2 @@ -28,3 +42,8 @@ Below is an example runcard for this experiment. The expected output is the following: .. image:: chevron.png + +The plot represents the probability of measuring qubit 1 in the excited state as a function of the flux pulse parameters. +The characteristic shape of the plot, known as a Chevron pattern, appears as a consequence of the interaction of the two qubits through their coupling, leading to population exchange. + +Before running the Chevron routine it may be useful to run a Cryoscope experiment in order to correct possible distortions in the flux pulse. diff --git a/doc/source/protocols/coupler/chevron.png b/doc/source/protocols/coupler/chevron.png deleted file mode 100644 index 8cd3900e3..000000000 Binary files a/doc/source/protocols/coupler/chevron.png and /dev/null differ diff --git a/doc/source/protocols/coupler/chevron.rst b/doc/source/protocols/coupler/chevron.rst deleted file mode 100644 index 9c2579200..000000000 --- a/doc/source/protocols/coupler/chevron.rst +++ /dev/null @@ -1,35 +0,0 @@ -Coupler Chevron -=============== - -Parameters -^^^^^^^^^^ - -.. autoclass:: - qibocal.protocols.two_qubit_interaction.chevron.chevron.ChevronParameters - :noindex: - - -Example -^^^^^^^ - -Below is an example runcard for this experiment. - -.. code-block:: yaml - - - id: coupler chevron - operation: coupler_chevron - parameters: - amplitude_max_factor: 1.5 - amplitude_min_factor: 0.4 - amplitude_step_factor: 0.005 - duration_max: 100 - duration_min: 10 - duration_step: 2 - native: CZ - nshots: 256 - relaxation_time: 100000 - - -The expected output is the following: - -.. image:: chevron.png diff --git a/doc/source/protocols/cpmg/cpmg.png b/doc/source/protocols/cpmg/cpmg.png new file mode 100644 index 000000000..eaf7c175a Binary files /dev/null and b/doc/source/protocols/cpmg/cpmg.png differ diff --git a/doc/source/protocols/cpmg/cpmg.rst b/doc/source/protocols/cpmg/cpmg.rst new file mode 100644 index 000000000..bde28739c --- /dev/null +++ b/doc/source/protocols/cpmg/cpmg.rst @@ -0,0 +1,51 @@ +CPMG sequence +============= + +In this section we show how to run the dynamical decoupling sequence CPMG. + +The CPMG sequence consists in applying N equally spaced :math:`\pi` pulses +within two :math:`\pi / 2` pulses. By increasing the number of :math:`\pi` pulses :math:`T_2` +should increase since the estimation is less sensitive to noises of the type :math:`1/f` +eventually reaching the :math:`2 T_1` limit. + + +The fit is again a dumped exponential of the following form: + +.. math:: + + p_e(t) = A + B e^{ - t / T^{(N)}_2} + + +Parameters +^^^^^^^^^^ + +.. autoclass:: qibocal.protocols.coherence.cpmg.CpmgParameters + :noindex: + +Example +^^^^^^^ + +A possible runcard to launch a CPMG experiment could be the following: + +.. code-block:: yaml + + - id: CPMG + operation: cpmg + parameters: + delay_between_pulses_end: 100000 + delay_between_pulses_start: 4 + delay_between_pulses_step: 1000 + n: 10 + nshots: 1000 + +The expected output is the following: + +.. image:: cpmg.png + +:math:`T_2` is determined by fitting the output signal using +the formula presented above. + +Requirements +^^^^^^^^^^^^ + +- :ref:`single-shot` diff --git a/doc/source/protocols/drag/drag.rst b/doc/source/protocols/drag/drag.rst new file mode 100644 index 000000000..f05fdb6ba --- /dev/null +++ b/doc/source/protocols/drag/drag.rst @@ -0,0 +1,94 @@ +DRAG experiments +================ + +In this section we show how to run DRAG experiments using Qibocal + +.. _drag: + + +DRAG :cite:p:`Motzoi_2009, Gambetta_2011`: pulses can be used to lower both phase and leakage errors. +It consists of adding a quadrature component to the pulse which is proportional +to the time derivative of the in-phase component. Given a pulse with an in-phase component :math:`\Omega_x` +the quadrature component :math:`\Omega_y` is evaluated as + +.. math:: + + \Omega_y (t) = \beta \frac{d\Omega_x}{dt} , + +where :math:`\beta` is a scaling parameter. + +Qibocal provides two separate protocols to calibrate :math:`\beta`. + + +Method 1 +-------- + +:math:`\beta` can be extracted by playing the pulse sequence composed of +:math:`[R_X(\pi) - R_X(-\pi)]^N` for different values of :math:`\beta` as shown in :cite:p:`Sheldon_2016`. +The post-processing consists of measuring the probability of :math:`\ket{0}` for every :math:`\beta` +and fitting the curve with a cosine. The correct :math:`\beta` value is the one which maximizes +the curve. + +Parameters +^^^^^^^^^^ + +.. autoclass:: qibocal.protocols.drag.DragTuningParameters + :noindex: + +Example +^^^^^^^ + +.. code-block:: yaml + + - id: drag tuning + operation: drag_tuning + parameters: + beta_start: -1 + beta_end: 1 + beta_step: 0.1 + nflips: 5 + unrolling: true + + +Running this protocol you should get something like this: + +.. image:: drag_tuning.png + + +Method 2 +-------- + +The second method consists of playing two different sequences +:math:`R_Y(\pi) R_X(\pi/2)` and :math:`R_X(\pi) R_Y(\pi/2)`. These are two +of the AllXY sequences which exhibit opposite sign of phase error as highlighted +in :cite:p:`reed2013entanglementquantumerrorcorrection`. +The post-processing consists of measuring the probability of :math:`\ket{1}` for every :math:`\beta` +and performing a linear fit for both sequences. The correct :math:`\beta` value is the one where the two lines +cross. + +Parameters +^^^^^^^^^^ + +.. autoclass:: qibocal.protocols.drag_simple.DragTuningSimpleParameters + :noindex: + +Example +^^^^^^^ + +.. code-block:: yaml + + - id: drag simple + operation: drag_simple + parameters: + beta_start: -1 + beta_end: 1 + beta_step: 0.1 + unrolling: true + +.. image:: drag_simple.png + + +Requirements +^^^^^^^^^^^^ + +- :ref:`single-shot` diff --git a/doc/source/protocols/drag/drag_simple.png b/doc/source/protocols/drag/drag_simple.png new file mode 100644 index 000000000..8cdb0c3df Binary files /dev/null and b/doc/source/protocols/drag/drag_simple.png differ diff --git a/doc/source/protocols/drag/drag_tuning.png b/doc/source/protocols/drag/drag_tuning.png new file mode 100644 index 000000000..5077f1cf4 Binary files /dev/null and b/doc/source/protocols/drag/drag_tuning.png differ diff --git a/doc/source/protocols/flipping.rst b/doc/source/protocols/flipping.rst index a83a7ea5e..0ac31d154 100644 --- a/doc/source/protocols/flipping.rst +++ b/doc/source/protocols/flipping.rst @@ -1,8 +1,8 @@ Flipping ======== -The flipping experiment corrects the amplitude in the qubit drive pulse. In this experiment, -we applying an :math:`R_x(\pi/2)` rotation followed by :math:`N` flips (two :math:`R_x(\pi)` rotations) +The flipping experiment corrects the amplitude in the qubit drive pulse for :math:`R_x(\pi)` rotations. In this experiment, +we apply an :math:`R_x(\pi/2)` rotation followed by :math:`N` flips (two :math:`R_x(\pi)` rotations) and we measure the qubit state. The first :math:`R_x(\pi/2)` is necessary to discriminate the over rotations and under rotations of the :math:`R_x(\pi)` pulse: without it the difference between the two cases is just a global phase, i.e., the @@ -10,7 +10,11 @@ probabilities are the same. With the :math:`R_x(\pi/2)` pulse, in case of under after the initial flip, in the over rotations one the final state will be closer to :math:`\ket{1}`. By fitting the resulting data with a sinusoidal function, we can determine the delta amplitude, which allows us to refine the -:math:`\pi` pulse amplitue. +:math:`\pi` pulse amplitude. + +We implemented also a version of the flipping protocol to calibrate the drive pulse amplitude of the :math:`R_x(\pi/2)` rotations, +in this case each :math:`R_x(\pi)` rotation is replaced by two :math:`R_x(\pi/2)` rotations. +The main reasons for implementing protocols to fine tune the `R_x(\pi/2)` rotations are explained in :ref:`rabi`. Parameters ^^^^^^^^^^ @@ -24,30 +28,28 @@ It follows a runcard example of this experiment. .. code-block:: yaml - - id: flipping - operation: flipping - parameters: - detuning: 0.05 - nflips_max: 30 - nflips_step: 1 + - id: flipping + operation: flipping + parameters: + delta_amplitude: 0.05 + nflips_max: 30 + nflips_step: 1 The expected output is the following: .. image:: flipping.png -Qibocal provides also a "signal" version of this routine, it follows a possible runcard -with its report. +If the same experiment is run setting the `rx90: True` the flipping is performed to calibrate the amplitude of the :math:`R_x(\pi/2)` rotation .. code-block:: yaml - id: flipping - operation: flipping_signal + operation: flipping parameters: - detuning: -0.5 - nflips_max: 20 + delta_amplitude: 0.05 + nflips_max: 30 nflips_step: 1 - -.. image:: flipping_signal.png + rx90: True Requirements ^^^^^^^^^^^^ diff --git a/doc/source/protocols/flipping_signal.png b/doc/source/protocols/flipping_signal.png deleted file mode 100644 index 9427aa7fa..000000000 Binary files a/doc/source/protocols/flipping_signal.png and /dev/null differ diff --git a/doc/source/protocols/flux/crosstalk.rst b/doc/source/protocols/flux/crosstalk.rst index dc5226c58..32a1bbd6c 100644 --- a/doc/source/protocols/flux/crosstalk.rst +++ b/doc/source/protocols/flux/crosstalk.rst @@ -62,50 +62,3 @@ Requirements ^^^^^^^^^^^^ - :ref:`qubit_flux` - -.. _resonator_crosstalk: - -Resonator crosstalk -------------------- - -In a similar fashion it is possible to repeat the previous experiment -by sweeping the readout frequency. Note that in this case it will be -necessary to bias the qubit away from its sweetspot more to observe -significant variations. - -Parameters -^^^^^^^^^^ - -.. autoclass:: qibocal.protocols.flux_dependence.resonator_crosstalk.ResCrosstalkParameters - :noindex: - -Example -^^^^^^^ - -.. code-block:: yaml - - - id: resonator crosstalk - operation: resonator_crosstalk - targets: [2] - parameters: - bias_point: - 2: 0.5 - bias_step: 0.01 - bias_width: 0.4 - flux_qubits: [0, 3] - freq_step: 100000 - freq_width: 6000000 - nshots: 2000 - -.. image:: resonator_crosstalk.png - -As we can see, even by biasing the qubit away from its sweetspot we are not able to see -a dependence ( a deviation from the straight line) but only a shift. - -The protocols aims at extracting the crosstalk coefficients -:math:`C_{20}` and :math:`C_{23}`. - -Requirements -^^^^^^^^^^^^ - -- :ref:`resonator_flux` diff --git a/doc/source/protocols/flux/resonator_crosstalk.png b/doc/source/protocols/flux/resonator_crosstalk.png deleted file mode 100644 index 1024aaa93..000000000 Binary files a/doc/source/protocols/flux/resonator_crosstalk.png and /dev/null differ diff --git a/doc/source/protocols/flux/single.rst b/doc/source/protocols/flux/single.rst index 9e1746722..ce77c4219 100644 --- a/doc/source/protocols/flux/single.rst +++ b/doc/source/protocols/flux/single.rst @@ -127,12 +127,6 @@ A possible runcard to assess how the resonator frequency changes by varying flux relaxation_time: 1000 -From this protocol it is possible to extract both the bare and the dressed resonator frequency -as well as an estimate for the coupling :math:`g_0`. -It is suggested to run this protocol only after executing the qubit flux dependence experiment -since some of the coefficients required can be computed with that experiment. - - .. image:: resonator_flux.png Requirements @@ -140,41 +134,3 @@ Requirements - :ref:`resonator_punchout` - :ref:`qubit_flux` - -Qubit flux dependence (tracking) --------------------------------- - -As we saw above both the resonator and the qubit are affected by an external flux. -In the qubit flux dependence the measurement is performed at fixed readout frequency. -To take into account how the transmon shifts in frequency when probing the qubit we have -another experiment `qubit_flux_tracking`. - -Parameters -^^^^^^^^^^ - -.. autoclass:: qibocal.protocols.flux_dependence.qubit_flux_tracking.QubitFluxTrackParameters - :noindex: - -Example -^^^^^^^ - -Here is a possible runcard: - -.. code-block:: yaml - - - id: qubit flux dependence - operation: qubit_flux_tracking - parameters: - bias_step: 0.001 - bias_width: 0.05 - drive_amplitude: 0.002 - drive_duration: 4000 - freq_step: 200000 - freq_width: 10000000 - nshots: 1024 - relaxation_time: 20000 - -Requirements -^^^^^^^^^^^^ - -- :ref:`resonator_flux` diff --git a/doc/source/protocols/index.rst b/doc/source/protocols/index.rst index 565dc8293..6705a7215 100644 --- a/doc/source/protocols/index.rst +++ b/doc/source/protocols/index.rst @@ -1,3 +1,4 @@ +.. _calibration_routines: Protocols ========= @@ -21,18 +22,18 @@ In this section we introduce the basics of all protocols supported by ``qibocal` t1/t1 t2/t2 t2_echo/t2_echo + cpmg/cpmg flux/single flux/crosstalk singleshot dispersive_shift allxy flipping + drag/drag readout_mitigation_matrix - avoided_crossing readout_optimization standard_rb chevron virtual_z state_tomographies - coupler/chevron references diff --git a/doc/source/protocols/qubit_spectroscopy/qubit_spectroscopy.rst b/doc/source/protocols/qubit_spectroscopy/qubit_spectroscopy.rst index b891ed095..e81cb81ee 100644 --- a/doc/source/protocols/qubit_spectroscopy/qubit_spectroscopy.rst +++ b/doc/source/protocols/qubit_spectroscopy/qubit_spectroscopy.rst @@ -53,10 +53,6 @@ Here is the corresponding plot: To extract the qubit frequency a Lorentzian fit is performed. -After the post-processing the following parameters will be updated: - -* qubit.drive_frequency -* qubit.native_gates.RX.frequency Requirements ^^^^^^^^^^^^ diff --git a/doc/source/protocols/rabi/rabi.rst b/doc/source/protocols/rabi/rabi.rst index 5a3de5c1e..a1b096675 100644 --- a/doc/source/protocols/rabi/rabi.rst +++ b/doc/source/protocols/rabi/rabi.rst @@ -29,6 +29,17 @@ Rabi rate is larger than the decay and the pure dephasing rate, where :math:`\Omega_R` is the Rabi frequency and :math:`\tau` the decay time. + +Since many routines and protocols in quantum computing are based on `R_x(\pi/2)` rotations, in qibocal we implemented +also another version of the Rabi experiment which can be used to tune the amplitude (duration) of the drive pulse in order +to excite the qubit from the ground state up to state :math:`\frac{\ket{0}-i\ket{1}}{\sqrt{2}}`. + +The possibility to calibrate an `R_x(\pi/2)` rotation as native gate allows us to remove the errors that could arise from assuming that the `R_x(\pi/2)` amplitude (duration) +is exactly half that of the `R_x(\pi)` amplitude (duration). This assumption presumes a perfectly linear response of the qubit to the drive pulse, which is +often not the case due to nonlinearities in the qubit's response or imperfections in the pulse shaping :cite:p:`Chen2018MetrologyOQ`. + +In this case the pulse sequence is the same as before with the only difference that instead of a single `R_x(\pi)` pulse we use two concatenated `R_x(\pi/2)` pulses. + Parameters ^^^^^^^^^^ @@ -49,9 +60,9 @@ It follows an example of the experiment parameters. - id: Rabi amplitude operation: rabi_amplitude parameters: - min_amp_factor: 0.1 - max_amp_factor: 1. - step_amp_factor: 0.01 + min_amp: 0.1 + max_amp: 1. + step_amp: 0.01 pulse_length: 40 nshots: 3000 relaxation_time: 50000 @@ -94,15 +105,38 @@ It follows an example runcard and plot for the signal exepriment - id: Rabi signal operation: rabi_amplitude_signal parameters: - min_amp_factor: 0.2 - max_amp_factor: 1. - step_amp_factor: 0.01 + min_amp: 0.2 + max_amp: 1. + step_amp: 0.01 pulse_length: 40 nshots: 3000 relaxation_time: 50000 .. image:: rabi_signal.png +In all the previous examples we run Rabi experiments for calibrating the amplitude (duration) of the drive pulse +to excite the qubit from the ground state up to state :math:`\ket{1}`. +All these example runcards can be modified to calibrate the amplitude (duration) of the drive pulse +to excite the qubit from the ground state up to state :math:`\frac{\ket{0}-i\ket{1}}{\sqrt{2}}` by simply setting the `rx90` parameter to `True`. + +In the following we show an example runcard for the amplitude calibration of the `R_x(\pi/2)`. + +.. code-block:: yaml + + + - id: Rabi signal + operation: rabi_amplitude_signal + parameters: + min_amp: 0.01 + max_amp: 0.16 + step_amp: 0.002 + pulse_length: 40 + nshots: 1024 + relaxation_time: 50000 + rx90: True + +.. image:: rabi_amplitude_rx90 + Requirements ^^^^^^^^^^^^ - :ref:`qubit-spectroscopy` @@ -125,9 +159,9 @@ It follows an example of runcard and a generated report. - id: Rabi ef operation: rabi_amplitude_ef parameters: - min_amp_factor: 0.2 - max_amp_factor: 1. - step_amp_factor: 0.01 + min_amp: 0.2 + max_amp: 1. + step_amp: 0.01 pulse_length: 400 nshots: 3000 relaxation_time: 50000 diff --git a/doc/source/protocols/rabi/rabi_amplitude_rx90.png b/doc/source/protocols/rabi/rabi_amplitude_rx90.png new file mode 100644 index 000000000..ac6c53ebd Binary files /dev/null and b/doc/source/protocols/rabi/rabi_amplitude_rx90.png differ diff --git a/doc/source/protocols/resonator_punchout.rst b/doc/source/protocols/resonator_punchout.rst index f910667e3..9c6bc1c76 100644 --- a/doc/source/protocols/resonator_punchout.rst +++ b/doc/source/protocols/resonator_punchout.rst @@ -36,10 +36,9 @@ Example parameters: freq_width: 40_000_000 freq_step: 500_000 - amplitude: 0.03 - min_amp_factor: 0.1 - max_amp_factor: 2.4 - step_amp_factor: 0.3 + min_amp: 0.1 + max_amp: 2.4 + step_amp: 0.3 nshots: 2048 relaxation_time: 5000 @@ -106,35 +105,6 @@ approximate value, that can later be used to check, in other experiments, that w not exciting the qubit by error (if we see a change in amplitude, then maybe the qubit state has changed). -It is also possible to run the punchout experiment with attenuation: - -Parameters -^^^^^^^^^^ - -.. autoclass:: qibocal.protocols.resonator_punchout_attenuation.ResonatorPunchoutAttenuationParameters - :noindex: - -Example -^^^^^^^ - -.. code-block:: yaml - - - id: resonator_punchout_attenuation - - operation: resonator_punchout_attenuation - parameters: - freq_width: 10_000_000 - freq_step: 500_000 - max_att: 60 - min_att: 4 - nshots: 1000 - step_att: 4 - nshots: 2048 - -.. image:: resonator_punchout_attenuation.png - -Requirements -^^^^^^^^^^^^ - :ref:`Time Of Flight` - :ref:`resonator_spectroscopy` (high power) diff --git a/doc/source/protocols/resonator_punchout_attenuation.png b/doc/source/protocols/resonator_punchout_attenuation.png deleted file mode 100644 index 6a0579599..000000000 Binary files a/doc/source/protocols/resonator_punchout_attenuation.png and /dev/null differ diff --git a/doc/source/protocols/signal/time_of_flight.rst b/doc/source/protocols/signal/time_of_flight.rst index 0d2561b60..8fc93c626 100644 --- a/doc/source/protocols/signal/time_of_flight.rst +++ b/doc/source/protocols/signal/time_of_flight.rst @@ -37,7 +37,7 @@ Acquisition .. testcode:: :hide: - from qibolab.execution_parameters import AcquisitionType + from qibolab import AcquisitionType It is important to note that this experiment makes use of the RAW acquisition mode (see `Qibolab documentation `_), which may require some specific care depending on the instrument employed (for some devices demodulation could be used, or this mode could be available for just a single qubit at a time). diff --git a/doc/source/protocols/t1/t1.rst b/doc/source/protocols/t1/t1.rst index b051c26ba..28c6ccb31 100644 --- a/doc/source/protocols/t1/t1.rst +++ b/doc/source/protocols/t1/t1.rst @@ -99,36 +99,3 @@ Requirements ^^^^^^^^^^^^ - :ref:`rabi` - -T1 with for loops ------------------ - -If the instrument driver is not able to perform duration sweepers in real time, we provide -a protocol where the different waiting time are swept through a python for loop. Such -execution will be usually slower compared to the one where sweepers are supported but it -could be useful for debugging purposes. - - -Here is a possible runcard: - -.. code-block:: yaml - - - id: T1 with for loops - operation: t1_sequences - parameters: - delay_before_readout_end: 100008 - delay_before_readout_start: 16 - delay_before_readout_step: 1000 - nshots: 2048 - - -.. note:: - - For ``t1_sequences`` on the y-axis it will be shown the raw output from instruments, - not the population of :math:`\ket{1}`. - - -Requirements -^^^^^^^^^^^^ - -- :ref:`rabi` diff --git a/doc/source/protocols/t2/t2.rst b/doc/source/protocols/t2/t2.rst index 92da06b35..32c7bc694 100644 --- a/doc/source/protocols/t2/t2.rst +++ b/doc/source/protocols/t2/t2.rst @@ -93,30 +93,3 @@ Requirements ^^^^^^^^^^^^ - :ref:`rabi` - -T2 with for loops ------------------ - -If the instrument driver is not able to perform duration sweepers in real time, we provide -a protocol where the different waiting time are swept through a python for loop. Such -execution will be usually slower compared to the one where sweepers are supported but it -could be useful for debugging purposes. - -Here is a possible runcard: - -.. code-block:: yaml - - - id: T2 with for loops - operation: t2_sequences - parameters: - delay_between_pulses_end: 200000 - delay_between_pulses_start: 4 - delay_between_pulses_step: 4000 - nshots: 5000 - relaxation_time: 300000 - - -.. note:: - - For ``t2_sequences`` on the y-axis it will be shown the raw output from instruments, - not the population of :math:`\ket{1}`. diff --git a/doc/source/refs.bib b/doc/source/refs.bib index b971ff08d..a6fb06f20 100644 --- a/doc/source/refs.bib +++ b/doc/source/refs.bib @@ -190,3 +190,53 @@ @article{Nielsen_2002 author={Nielsen, Michael A}, year={2002}, month=oct, pages={249–252} } + +@article{Motzoi_2009, + title={Simple Pulses for Elimination of Leakage in Weakly Nonlinear Qubits}, + volume={103}, + ISSN={1079-7114}, + url={http://dx.doi.org/10.1103/PhysRevLett.103.110501}, + DOI={10.1103/physrevlett.103.110501}, + number={11}, + journal={Physical Review Letters}, + publisher={American Physical Society (APS)}, + author={Motzoi, F. and Gambetta, J. M. and Rebentrost, P. and Wilhelm, F. K.}, + year={2009}, + month=sep } + + +@article{Gambetta_2011, + title={Analytic control methods for high-fidelity unitary operations in a weakly nonlinear oscillator}, + volume={83}, + ISSN={1094-1622}, + url={http://dx.doi.org/10.1103/PhysRevA.83.012308}, + DOI={10.1103/physreva.83.012308}, + number={1}, + journal={Physical Review A}, + publisher={American Physical Society (APS)}, + author={Gambetta, J. M. and Motzoi, F. and Merkel, S. T. and Wilhelm, F. K.}, + year={2011}, + month=jan } + +@article{Sheldon_2016, + title={Characterizing errors on qubit operations via iterative randomized benchmarking}, + volume={93}, + ISSN={2469-9934}, + url={http://dx.doi.org/10.1103/PhysRevA.93.012301}, + DOI={10.1103/physreva.93.012301}, + number={1}, + journal={Physical Review A}, + publisher={American Physical Society (APS)}, + author={Sheldon, Sarah and Bishop, Lev S. and Magesan, Easwar and Filipp, Stefan and Chow, Jerry M. and Gambetta, Jay M.}, + year={2016}, + month=jan } + +@misc{reed2013entanglementquantumerrorcorrection, + title={Entanglement and Quantum Error Correction with Superconducting Qubits}, + author={Matthew Reed}, + year={2013}, + eprint={1311.6759}, + archivePrefix={arXiv}, + primaryClass={quant-ph}, + url={https://arxiv.org/abs/1311.6759}, +} diff --git a/doc/source/tutorials/advanced.rst b/doc/source/tutorials/advanced.rst index 45a43bdc1..ac150a88a 100644 --- a/doc/source/tutorials/advanced.rst +++ b/doc/source/tutorials/advanced.rst @@ -167,8 +167,8 @@ In the acquisition function we are going to perform the experiment. .. code-block:: python - from qibolab.platform import Platform - from qibolab.qubits import QubitId, QubitPairId + from qibolab import Platform + from qibocal.auto.operation import QubitId, QubitPairId from typing import Union def acquisition(params: RoutineParameters, platform: Platform, targets: Union[list[QubitId], list[QubitPairId], list[list[QubitId]]]) -> RoutineData @@ -177,8 +177,8 @@ In the acquisition function we are going to perform the experiment. .. code-block:: python - from qibolab.platform import Platform - from qibolab.qubits import QubitId + from qibolab import Platform + from qibocal.auto.operation import QubitId def acquisition( params: RotationParameters, @@ -229,7 +229,7 @@ parameters for each qubit. .. code-block:: python - from qibolab.qubits import QubitId + from qibocal.auto.operation import QubitId @dataclass class RotationResults(Results): @@ -321,7 +321,7 @@ Here is the plotting function for the protocol that we are coding: .. code-block:: python import plotly.graph_objects as go - from qibolab.qubits import QubitId + from qibocal.auto.operation import QubitId def plot(data: RotationData, fit: RotationResults, target: QubitId): """Plotting function for rotation.""" diff --git a/doc/source/tutorials/basic.rst b/doc/source/tutorials/basic.rst index 3f2fb1c4f..c6087b06f 100644 --- a/doc/source/tutorials/basic.rst +++ b/doc/source/tutorials/basic.rst @@ -56,7 +56,7 @@ user should make sure to specify an amplitude value sufficiently large. It is then possible to visualize a report included in the output folder. -.. image:: ../protocols/resonator_spectroscopy_high.png +.. image:: ../protocols/resonator_spectroscopy/resonator_spectroscopy_high.png The expected signal is a lorentzian centered around the bare frequency of the resonator. @@ -74,9 +74,9 @@ power we observe this shift it is possible to run a resonator punchout using the freq_width: 40_000_000 freq_step: 500_000 amplitude: 0.03 - min_amp_factor: 0.1 - max_amp_factor: 2.4 - step_amp_factor: 0.3 + min_amp: 0.1 + max_amp: 2.4 + step_amp: 0.3 nshots: 2048 relaxation_time: 5000 @@ -111,7 +111,7 @@ Note that in this case we changed the ``power_level`` entry from ``high`` to ``low``, this keyword is used by qibocal to upgrade correctly the QPU parameters depending on the power regime. -.. image:: ../protocols/resonator_spectroscopy_low.png +.. image:: ../protocols/resonator_spectroscopy/resonator_spectroscopy_low.png .. note:: @@ -180,9 +180,9 @@ the following runcard: operation: rabi_amplitude_signal parameters: - min_amp_factor: 0 - max_amp_factor: 1.1 - step_amp_factor: 0.1 + min_amp: 0 + max_amp: 1.1 + step_amp: 0.1 pulse_length: 40 relaxation_time: 100_000 nshots: 1024 @@ -191,7 +191,7 @@ In this particular case we are fixing the duration of the pulse to be 40 ns and a sweep in the drive amplitude to find the correct value. The :math:`\pi` corresponds to first half period of the oscillation. -.. image:: ../protocols/rabi_amplitude.png +.. image:: ../protocols/rabi/rabi_amplitude.png Classification model ^^^^^^^^^^^^^^^^^^^^ diff --git a/poetry.lock b/poetry.lock index f3c4c42d5..5da4c7f29 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "alabaster" @@ -13,13 +13,13 @@ files = [ [[package]] name = "alembic" -version = "1.14.0" +version = "1.14.1" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" files = [ - {file = "alembic-1.14.0-py3-none-any.whl", hash = "sha256:99bd884ca390466db5e27ffccff1d179ec5c05c965cfefc0607e69f9e411cb25"}, - {file = "alembic-1.14.0.tar.gz", hash = "sha256:b00892b53b3642d0b8dbedba234dbf1924b69be83a9a769d5a624b01094e304b"}, + {file = "alembic-1.14.1-py3-none-any.whl", hash = "sha256:1acdd7a3a478e208b0503cd73614d5e4c6efafa4e73518bb60e4f2846a37b1c5"}, + {file = "alembic-1.14.1.tar.gz", hash = "sha256:496e888245a53adf1498fcab31713a469c65836f8de76e01399aa1c3e90dd213"}, ] [package.dependencies] @@ -28,7 +28,7 @@ SQLAlchemy = ">=1.3.0" typing-extensions = ">=4" [package.extras] -tz = ["backports.zoneinfo"] +tz = ["backports.zoneinfo", "tzdata"] [[package]] name = "annotated-types" @@ -91,50 +91,51 @@ test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] [[package]] name = "attrs" -version = "24.2.0" +version = "25.1.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, - {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, + {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, + {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, ] [package.extras] benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" description = "Internationalization utilities" optional = false python-versions = ">=3.8" files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, ] [package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +dev = ["backports.zoneinfo", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata"] [[package]] name = "beautifulsoup4" -version = "4.12.3" +version = "4.13.3" description = "Screen-scraping library" optional = false -python-versions = ">=3.6.0" +python-versions = ">=3.7.0" files = [ - {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, - {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, + {file = "beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16"}, + {file = "beautifulsoup4-4.13.3.tar.gz", hash = "sha256:1bd32405dacc920b42b83ba01644747ed77456a65760e285fbc47633ceddaf8b"}, ] [package.dependencies] soupsieve = ">1.2" +typing-extensions = ">=4.0.0" [package.extras] cchardet = ["cchardet"] @@ -156,138 +157,125 @@ files = [ [[package]] name = "certifi" -version = "2024.8.30" +version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] @@ -439,73 +427,74 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.6.8" +version = "7.6.12" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.6.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b39e6011cd06822eb964d038d5dff5da5d98652b81f5ecd439277b32361a3a50"}, - {file = "coverage-7.6.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:63c19702db10ad79151a059d2d6336fe0c470f2e18d0d4d1a57f7f9713875dcf"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3985b9be361d8fb6b2d1adc9924d01dec575a1d7453a14cccd73225cb79243ee"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:644ec81edec0f4ad17d51c838a7d01e42811054543b76d4ba2c5d6af741ce2a6"}, - {file = "coverage-7.6.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f188a2402f8359cf0c4b1fe89eea40dc13b52e7b4fd4812450da9fcd210181d"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e19122296822deafce89a0c5e8685704c067ae65d45e79718c92df7b3ec3d331"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:13618bed0c38acc418896005732e565b317aa9e98d855a0e9f211a7ffc2d6638"}, - {file = "coverage-7.6.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:193e3bffca48ad74b8c764fb4492dd875038a2f9925530cb094db92bb5e47bed"}, - {file = "coverage-7.6.8-cp310-cp310-win32.whl", hash = "sha256:3988665ee376abce49613701336544041f2117de7b7fbfe91b93d8ff8b151c8e"}, - {file = "coverage-7.6.8-cp310-cp310-win_amd64.whl", hash = "sha256:f56f49b2553d7dd85fd86e029515a221e5c1f8cb3d9c38b470bc38bde7b8445a"}, - {file = "coverage-7.6.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:86cffe9c6dfcfe22e28027069725c7f57f4b868a3f86e81d1c62462764dc46d4"}, - {file = "coverage-7.6.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d82ab6816c3277dc962cfcdc85b1efa0e5f50fb2c449432deaf2398a2928ab94"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13690e923a3932e4fad4c0ebfb9cb5988e03d9dcb4c5150b5fcbf58fd8bddfc4"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4be32da0c3827ac9132bb488d331cb32e8d9638dd41a0557c5569d57cf22c9c1"}, - {file = "coverage-7.6.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44e6c85bbdc809383b509d732b06419fb4544dca29ebe18480379633623baafb"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:768939f7c4353c0fac2f7c37897e10b1414b571fd85dd9fc49e6a87e37a2e0d8"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e44961e36cb13c495806d4cac67640ac2866cb99044e210895b506c26ee63d3a"}, - {file = "coverage-7.6.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ea8bb1ab9558374c0ab591783808511d135a833c3ca64a18ec927f20c4030f0"}, - {file = "coverage-7.6.8-cp311-cp311-win32.whl", hash = "sha256:629a1ba2115dce8bf75a5cce9f2486ae483cb89c0145795603d6554bdc83e801"}, - {file = "coverage-7.6.8-cp311-cp311-win_amd64.whl", hash = "sha256:fb9fc32399dca861584d96eccd6c980b69bbcd7c228d06fb74fe53e007aa8ef9"}, - {file = "coverage-7.6.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e683e6ecc587643f8cde8f5da6768e9d165cd31edf39ee90ed7034f9ca0eefee"}, - {file = "coverage-7.6.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1defe91d41ce1bd44b40fabf071e6a01a5aa14de4a31b986aa9dfd1b3e3e414a"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7ad66e8e50225ebf4236368cc43c37f59d5e6728f15f6e258c8639fa0dd8e6d"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fe47da3e4fda5f1abb5709c156eca207eacf8007304ce3019eb001e7a7204cb"}, - {file = "coverage-7.6.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:202a2d645c5a46b84992f55b0a3affe4f0ba6b4c611abec32ee88358db4bb649"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4674f0daa1823c295845b6a740d98a840d7a1c11df00d1fd62614545c1583787"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:74610105ebd6f33d7c10f8907afed696e79c59e3043c5f20eaa3a46fddf33b4c"}, - {file = "coverage-7.6.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37cda8712145917105e07aab96388ae76e787270ec04bcb9d5cc786d7cbb8443"}, - {file = "coverage-7.6.8-cp312-cp312-win32.whl", hash = "sha256:9e89d5c8509fbd6c03d0dd1972925b22f50db0792ce06324ba069f10787429ad"}, - {file = "coverage-7.6.8-cp312-cp312-win_amd64.whl", hash = "sha256:379c111d3558272a2cae3d8e57e6b6e6f4fe652905692d54bad5ea0ca37c5ad4"}, - {file = "coverage-7.6.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b0c69f4f724c64dfbfe79f5dfb503b42fe6127b8d479b2677f2b227478db2eb"}, - {file = "coverage-7.6.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c15b32a7aca8038ed7644f854bf17b663bc38e1671b5d6f43f9a2b2bd0c46f63"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63068a11171e4276f6ece913bde059e77c713b48c3a848814a6537f35afb8365"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f4548c5ead23ad13fb7a2c8ea541357474ec13c2b736feb02e19a3085fac002"}, - {file = "coverage-7.6.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b4b4299dd0d2c67caaaf286d58aef5e75b125b95615dda4542561a5a566a1e3"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9ebfb2507751f7196995142f057d1324afdab56db1d9743aab7f50289abd022"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c1b4474beee02ede1eef86c25ad4600a424fe36cff01a6103cb4533c6bf0169e"}, - {file = "coverage-7.6.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9fd2547e6decdbf985d579cf3fc78e4c1d662b9b0ff7cc7862baaab71c9cc5b"}, - {file = "coverage-7.6.8-cp313-cp313-win32.whl", hash = "sha256:8aae5aea53cbfe024919715eca696b1a3201886ce83790537d1c3668459c7146"}, - {file = "coverage-7.6.8-cp313-cp313-win_amd64.whl", hash = "sha256:ae270e79f7e169ccfe23284ff5ea2d52a6f401dc01b337efb54b3783e2ce3f28"}, - {file = "coverage-7.6.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:de38add67a0af869b0d79c525d3e4588ac1ffa92f39116dbe0ed9753f26eba7d"}, - {file = "coverage-7.6.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b07c25d52b1c16ce5de088046cd2432b30f9ad5e224ff17c8f496d9cb7d1d451"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a66ff235e4c2e37ed3b6104d8b478d767ff73838d1222132a7a026aa548764"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b9f848b28081e7b975a3626e9081574a7b9196cde26604540582da60235fdf"}, - {file = "coverage-7.6.8-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:093896e530c38c8e9c996901858ac63f3d4171268db2c9c8b373a228f459bbc5"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a7b8ac36fd688c8361cbc7bf1cb5866977ece6e0b17c34aa0df58bda4fa18a4"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:38c51297b35b3ed91670e1e4efb702b790002e3245a28c76e627478aa3c10d83"}, - {file = "coverage-7.6.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2e4e0f60cb4bd7396108823548e82fdab72d4d8a65e58e2c19bbbc2f1e2bfa4b"}, - {file = "coverage-7.6.8-cp313-cp313t-win32.whl", hash = "sha256:6535d996f6537ecb298b4e287a855f37deaf64ff007162ec0afb9ab8ba3b8b71"}, - {file = "coverage-7.6.8-cp313-cp313t-win_amd64.whl", hash = "sha256:c79c0685f142ca53256722a384540832420dff4ab15fec1863d7e5bc8691bdcc"}, - {file = "coverage-7.6.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ac47fa29d8d41059ea3df65bd3ade92f97ee4910ed638e87075b8e8ce69599e"}, - {file = "coverage-7.6.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:24eda3a24a38157eee639ca9afe45eefa8d2420d49468819ac5f88b10de84f4c"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4c81ed2820b9023a9a90717020315e63b17b18c274a332e3b6437d7ff70abe0"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd55f8fc8fa494958772a2a7302b0354ab16e0b9272b3c3d83cdb5bec5bd1779"}, - {file = "coverage-7.6.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f39e2f3530ed1626c66e7493be7a8423b023ca852aacdc91fb30162c350d2a92"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:716a78a342679cd1177bc8c2fe957e0ab91405bd43a17094324845200b2fddf4"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:177f01eeaa3aee4a5ffb0d1439c5952b53d5010f86e9d2667963e632e30082cc"}, - {file = "coverage-7.6.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:912e95017ff51dc3d7b6e2be158dedc889d9a5cc3382445589ce554f1a34c0ea"}, - {file = "coverage-7.6.8-cp39-cp39-win32.whl", hash = "sha256:4db3ed6a907b555e57cc2e6f14dc3a4c2458cdad8919e40b5357ab9b6db6c43e"}, - {file = "coverage-7.6.8-cp39-cp39-win_amd64.whl", hash = "sha256:428ac484592f780e8cd7b6b14eb568f7c85460c92e2a37cb0c0e5186e1a0d076"}, - {file = "coverage-7.6.8-pp39.pp310-none-any.whl", hash = "sha256:5c52a036535d12590c32c49209e79cabaad9f9ad8aa4cbd875b68c4d67a9cbce"}, - {file = "coverage-7.6.8.tar.gz", hash = "sha256:8b2b8503edb06822c86d82fa64a4a5cb0760bb8f31f26e138ec743f422f37cfc"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, + {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, + {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, + {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, + {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, + {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, + {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, + {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, + {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, + {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, + {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, + {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, + {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, + {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, + {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, + {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, ] [package.dependencies] @@ -666,13 +655,13 @@ test = ["pytest (>=6)"] [[package]] name = "executing" -version = "2.1.0" +version = "2.2.0" description = "Get the currently executing AST node of a frame, and other information" optional = false python-versions = ">=3.8" files = [ - {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, - {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, + {file = "executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa"}, + {file = "executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755"}, ] [package.extras] @@ -695,18 +684,18 @@ pyrepl = ">=0.8.2" [[package]] name = "filelock" -version = "3.16.1" +version = "3.17.0" description = "A platform independent file lock." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, - {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, + {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, + {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] @@ -734,61 +723,61 @@ dotenv = ["python-dotenv"] [[package]] name = "fonttools" -version = "4.55.0" +version = "4.56.0" description = "Tools to manipulate font files" optional = true python-versions = ">=3.8" files = [ - {file = "fonttools-4.55.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:51c029d4c0608a21a3d3d169dfc3fb776fde38f00b35ca11fdab63ba10a16f61"}, - {file = "fonttools-4.55.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bca35b4e411362feab28e576ea10f11268b1aeed883b9f22ed05675b1e06ac69"}, - {file = "fonttools-4.55.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ce4ba6981e10f7e0ccff6348e9775ce25ffadbee70c9fd1a3737e3e9f5fa74f"}, - {file = "fonttools-4.55.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31d00f9852a6051dac23294a4cf2df80ced85d1d173a61ba90a3d8f5abc63c60"}, - {file = "fonttools-4.55.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e198e494ca6e11f254bac37a680473a311a88cd40e58f9cc4dc4911dfb686ec6"}, - {file = "fonttools-4.55.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7208856f61770895e79732e1dcbe49d77bd5783adf73ae35f87fcc267df9db81"}, - {file = "fonttools-4.55.0-cp310-cp310-win32.whl", hash = "sha256:e7e6a352ff9e46e8ef8a3b1fe2c4478f8a553e1b5a479f2e899f9dc5f2055880"}, - {file = "fonttools-4.55.0-cp310-cp310-win_amd64.whl", hash = "sha256:636caaeefe586d7c84b5ee0734c1a5ab2dae619dc21c5cf336f304ddb8f6001b"}, - {file = "fonttools-4.55.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fa34aa175c91477485c44ddfbb51827d470011e558dfd5c7309eb31bef19ec51"}, - {file = "fonttools-4.55.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:37dbb3fdc2ef7302d3199fb12468481cbebaee849e4b04bc55b77c24e3c49189"}, - {file = "fonttools-4.55.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5263d8e7ef3c0ae87fbce7f3ec2f546dc898d44a337e95695af2cd5ea21a967"}, - {file = "fonttools-4.55.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f307f6b5bf9e86891213b293e538d292cd1677e06d9faaa4bf9c086ad5f132f6"}, - {file = "fonttools-4.55.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f0a4b52238e7b54f998d6a56b46a2c56b59c74d4f8a6747fb9d4042190f37cd3"}, - {file = "fonttools-4.55.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3e569711464f777a5d4ef522e781dc33f8095ab5efd7548958b36079a9f2f88c"}, - {file = "fonttools-4.55.0-cp311-cp311-win32.whl", hash = "sha256:2b3ab90ec0f7b76c983950ac601b58949f47aca14c3f21eed858b38d7ec42b05"}, - {file = "fonttools-4.55.0-cp311-cp311-win_amd64.whl", hash = "sha256:aa046f6a63bb2ad521004b2769095d4c9480c02c1efa7d7796b37826508980b6"}, - {file = "fonttools-4.55.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:838d2d8870f84fc785528a692e724f2379d5abd3fc9dad4d32f91cf99b41e4a7"}, - {file = "fonttools-4.55.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f46b863d74bab7bb0d395f3b68d3f52a03444964e67ce5c43ce43a75efce9246"}, - {file = "fonttools-4.55.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33b52a9cfe4e658e21b1f669f7309b4067910321757fec53802ca8f6eae96a5a"}, - {file = "fonttools-4.55.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:732a9a63d6ea4a81b1b25a1f2e5e143761b40c2e1b79bb2b68e4893f45139a40"}, - {file = "fonttools-4.55.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7dd91ac3fcb4c491bb4763b820bcab6c41c784111c24172616f02f4bc227c17d"}, - {file = "fonttools-4.55.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f0e115281a32ff532118aa851ef497a1b7cda617f4621c1cdf81ace3e36fb0c"}, - {file = "fonttools-4.55.0-cp312-cp312-win32.whl", hash = "sha256:6c99b5205844f48a05cb58d4a8110a44d3038c67ed1d79eb733c4953c628b0f6"}, - {file = "fonttools-4.55.0-cp312-cp312-win_amd64.whl", hash = "sha256:f8c8c76037d05652510ae45be1cd8fb5dd2fd9afec92a25374ac82255993d57c"}, - {file = "fonttools-4.55.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8118dc571921dc9e4b288d9cb423ceaf886d195a2e5329cc427df82bba872cd9"}, - {file = "fonttools-4.55.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01124f2ca6c29fad4132d930da69158d3f49b2350e4a779e1efbe0e82bd63f6c"}, - {file = "fonttools-4.55.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ffd58d2691f11f7c8438796e9f21c374828805d33e83ff4b76e4635633674c"}, - {file = "fonttools-4.55.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5435e5f1eb893c35c2bc2b9cd3c9596b0fcb0a59e7a14121562986dd4c47b8dd"}, - {file = "fonttools-4.55.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d12081729280c39d001edd0f4f06d696014c26e6e9a0a55488fabc37c28945e4"}, - {file = "fonttools-4.55.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7ad1f1b98ab6cb927ab924a38a8649f1ffd7525c75fe5b594f5dab17af70e18"}, - {file = "fonttools-4.55.0-cp313-cp313-win32.whl", hash = "sha256:abe62987c37630dca69a104266277216de1023cf570c1643bb3a19a9509e7a1b"}, - {file = "fonttools-4.55.0-cp313-cp313-win_amd64.whl", hash = "sha256:2863555ba90b573e4201feaf87a7e71ca3b97c05aa4d63548a4b69ea16c9e998"}, - {file = "fonttools-4.55.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:00f7cf55ad58a57ba421b6a40945b85ac7cc73094fb4949c41171d3619a3a47e"}, - {file = "fonttools-4.55.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f27526042efd6f67bfb0cc2f1610fa20364396f8b1fc5edb9f45bb815fb090b2"}, - {file = "fonttools-4.55.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e67974326af6a8879dc2a4ec63ab2910a1c1a9680ccd63e4a690950fceddbe"}, - {file = "fonttools-4.55.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61dc0a13451143c5e987dec5254d9d428f3c2789a549a7cf4f815b63b310c1cc"}, - {file = "fonttools-4.55.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b2e526b325a903868c62155a6a7e24df53f6ce4c5c3160214d8fe1be2c41b478"}, - {file = "fonttools-4.55.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b7ef9068a1297714e6fefe5932c33b058aa1d45a2b8be32a4c6dee602ae22b5c"}, - {file = "fonttools-4.55.0-cp38-cp38-win32.whl", hash = "sha256:55718e8071be35dff098976bc249fc243b58efa263768c611be17fe55975d40a"}, - {file = "fonttools-4.55.0-cp38-cp38-win_amd64.whl", hash = "sha256:553bd4f8cc327f310c20158e345e8174c8eed49937fb047a8bda51daf2c353c8"}, - {file = "fonttools-4.55.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f901cef813f7c318b77d1c5c14cf7403bae5cb977cede023e22ba4316f0a8f6"}, - {file = "fonttools-4.55.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8c9679fc0dd7e8a5351d321d8d29a498255e69387590a86b596a45659a39eb0d"}, - {file = "fonttools-4.55.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2820a8b632f3307ebb0bf57948511c2208e34a4939cf978333bc0a3f11f838"}, - {file = "fonttools-4.55.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23bbbb49bec613a32ed1b43df0f2b172313cee690c2509f1af8fdedcf0a17438"}, - {file = "fonttools-4.55.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a656652e1f5d55b9728937a7e7d509b73d23109cddd4e89ee4f49bde03b736c6"}, - {file = "fonttools-4.55.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f50a1f455902208486fbca47ce33054208a4e437b38da49d6721ce2fef732fcf"}, - {file = "fonttools-4.55.0-cp39-cp39-win32.whl", hash = "sha256:161d1ac54c73d82a3cded44202d0218ab007fde8cf194a23d3dd83f7177a2f03"}, - {file = "fonttools-4.55.0-cp39-cp39-win_amd64.whl", hash = "sha256:ca7fd6987c68414fece41c96836e945e1f320cda56fc96ffdc16e54a44ec57a2"}, - {file = "fonttools-4.55.0-py3-none-any.whl", hash = "sha256:12db5888cd4dd3fcc9f0ee60c6edd3c7e1fd44b7dd0f31381ea03df68f8a153f"}, - {file = "fonttools-4.55.0.tar.gz", hash = "sha256:7636acc6ab733572d5e7eec922b254ead611f1cdad17be3f0be7418e8bfaca71"}, + {file = "fonttools-4.56.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:331954d002dbf5e704c7f3756028e21db07097c19722569983ba4d74df014000"}, + {file = "fonttools-4.56.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d1613abd5af2f93c05867b3a3759a56e8bf97eb79b1da76b2bc10892f96ff16"}, + {file = "fonttools-4.56.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:705837eae384fe21cee5e5746fd4f4b2f06f87544fa60f60740007e0aa600311"}, + {file = "fonttools-4.56.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc871904a53a9d4d908673c6faa15689874af1c7c5ac403a8e12d967ebd0c0dc"}, + {file = "fonttools-4.56.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:38b947de71748bab150259ee05a775e8a0635891568e9fdb3cdd7d0e0004e62f"}, + {file = "fonttools-4.56.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:86b2a1013ef7a64d2e94606632683f07712045ed86d937c11ef4dde97319c086"}, + {file = "fonttools-4.56.0-cp310-cp310-win32.whl", hash = "sha256:133bedb9a5c6376ad43e6518b7e2cd2f866a05b1998f14842631d5feb36b5786"}, + {file = "fonttools-4.56.0-cp310-cp310-win_amd64.whl", hash = "sha256:17f39313b649037f6c800209984a11fc256a6137cbe5487091c6c7187cae4685"}, + {file = "fonttools-4.56.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7ef04bc7827adb7532be3d14462390dd71287644516af3f1e67f1e6ff9c6d6df"}, + {file = "fonttools-4.56.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ffda9b8cd9cb8b301cae2602ec62375b59e2e2108a117746f12215145e3f786c"}, + {file = "fonttools-4.56.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e993e8db36306cc3f1734edc8ea67906c55f98683d6fd34c3fc5593fdbba4c"}, + {file = "fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:003548eadd674175510773f73fb2060bb46adb77c94854af3e0cc5bc70260049"}, + {file = "fonttools-4.56.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd9825822e7bb243f285013e653f6741954d8147427aaa0324a862cdbf4cbf62"}, + {file = "fonttools-4.56.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b23d30a2c0b992fb1c4f8ac9bfde44b5586d23457759b6cf9a787f1a35179ee0"}, + {file = "fonttools-4.56.0-cp311-cp311-win32.whl", hash = "sha256:47b5e4680002ae1756d3ae3b6114e20aaee6cc5c69d1e5911f5ffffd3ee46c6b"}, + {file = "fonttools-4.56.0-cp311-cp311-win_amd64.whl", hash = "sha256:14a3e3e6b211660db54ca1ef7006401e4a694e53ffd4553ab9bc87ead01d0f05"}, + {file = "fonttools-4.56.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6f195c14c01bd057bc9b4f70756b510e009c83c5ea67b25ced3e2c38e6ee6e9"}, + {file = "fonttools-4.56.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fa760e5fe8b50cbc2d71884a1eff2ed2b95a005f02dda2fa431560db0ddd927f"}, + {file = "fonttools-4.56.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d54a45d30251f1d729e69e5b675f9a08b7da413391a1227781e2a297fa37f6d2"}, + {file = "fonttools-4.56.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:661a8995d11e6e4914a44ca7d52d1286e2d9b154f685a4d1f69add8418961563"}, + {file = "fonttools-4.56.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9d94449ad0a5f2a8bf5d2f8d71d65088aee48adbe45f3c5f8e00e3ad861ed81a"}, + {file = "fonttools-4.56.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f59746f7953f69cc3290ce2f971ab01056e55ddd0fb8b792c31a8acd7fee2d28"}, + {file = "fonttools-4.56.0-cp312-cp312-win32.whl", hash = "sha256:bce60f9a977c9d3d51de475af3f3581d9b36952e1f8fc19a1f2254f1dda7ce9c"}, + {file = "fonttools-4.56.0-cp312-cp312-win_amd64.whl", hash = "sha256:300c310bb725b2bdb4f5fc7e148e190bd69f01925c7ab437b9c0ca3e1c7cd9ba"}, + {file = "fonttools-4.56.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f20e2c0dfab82983a90f3d00703ac0960412036153e5023eed2b4641d7d5e692"}, + {file = "fonttools-4.56.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f36a0868f47b7566237640c026c65a86d09a3d9ca5df1cd039e30a1da73098a0"}, + {file = "fonttools-4.56.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62b4c6802fa28e14dba010e75190e0e6228513573f1eeae57b11aa1a39b7e5b1"}, + {file = "fonttools-4.56.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a05d1f07eb0a7d755fbe01fee1fd255c3a4d3730130cf1bfefb682d18fd2fcea"}, + {file = "fonttools-4.56.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0073b62c3438cf0058488c002ea90489e8801d3a7af5ce5f7c05c105bee815c3"}, + {file = "fonttools-4.56.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cad98c94833465bcf28f51c248aaf07ca022efc6a3eba750ad9c1e0256d278"}, + {file = "fonttools-4.56.0-cp313-cp313-win32.whl", hash = "sha256:d0cb73ccf7f6d7ca8d0bc7ea8ac0a5b84969a41c56ac3ac3422a24df2680546f"}, + {file = "fonttools-4.56.0-cp313-cp313-win_amd64.whl", hash = "sha256:62cc1253827d1e500fde9dbe981219fea4eb000fd63402283472d38e7d8aa1c6"}, + {file = "fonttools-4.56.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3fd3fccb7b9adaaecfa79ad51b759f2123e1aba97f857936ce044d4f029abd71"}, + {file = "fonttools-4.56.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:193b86e9f769320bc98ffdb42accafb5d0c8c49bd62884f1c0702bc598b3f0a2"}, + {file = "fonttools-4.56.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e81c1cc80c1d8bf071356cc3e0e25071fbba1c75afc48d41b26048980b3c771"}, + {file = "fonttools-4.56.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9270505a19361e81eecdbc2c251ad1e1a9a9c2ad75fa022ccdee533f55535dc"}, + {file = "fonttools-4.56.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:53f5e9767978a4daf46f28e09dbeb7d010319924ae622f7b56174b777258e5ba"}, + {file = "fonttools-4.56.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9da650cb29bc098b8cfd15ef09009c914b35c7986c8fa9f08b51108b7bc393b4"}, + {file = "fonttools-4.56.0-cp38-cp38-win32.whl", hash = "sha256:965d0209e6dbdb9416100123b6709cb13f5232e2d52d17ed37f9df0cc31e2b35"}, + {file = "fonttools-4.56.0-cp38-cp38-win_amd64.whl", hash = "sha256:654ac4583e2d7c62aebc6fc6a4c6736f078f50300e18aa105d87ce8925cfac31"}, + {file = "fonttools-4.56.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ca7962e8e5fc047cc4e59389959843aafbf7445b6c08c20d883e60ced46370a5"}, + {file = "fonttools-4.56.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1af375734018951c31c0737d04a9d5fd0a353a0253db5fbed2ccd44eac62d8c"}, + {file = "fonttools-4.56.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:442ad4122468d0e47d83bc59d0e91b474593a8c813839e1872e47c7a0cb53b10"}, + {file = "fonttools-4.56.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cf4f8d2a30b454ac682e12c61831dcb174950c406011418e739de592bbf8f76"}, + {file = "fonttools-4.56.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:96a4271f63a615bcb902b9f56de00ea225d6896052c49f20d0c91e9f43529a29"}, + {file = "fonttools-4.56.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d38642ca2dddc7ae992ef5d026e5061a84f10ff2b906be5680ab089f55bb8"}, + {file = "fonttools-4.56.0-cp39-cp39-win32.whl", hash = "sha256:2d351275f73ebdd81dd5b09a8b8dac7a30f29a279d41e1c1192aedf1b6dced40"}, + {file = "fonttools-4.56.0-cp39-cp39-win_amd64.whl", hash = "sha256:d6ca96d1b61a707ba01a43318c9c40aaf11a5a568d1e61146fafa6ab20890793"}, + {file = "fonttools-4.56.0-py3-none-any.whl", hash = "sha256:1088182f68c303b50ca4dc0c82d42083d176cba37af1937e1a976a31149d4d14"}, + {file = "fonttools-4.56.0.tar.gz", hash = "sha256:a114d1567e1a1586b7e9e7fc2ff686ca542a82769a296cef131e4c4af51e58f4"}, ] [package.extras] @@ -807,13 +796,13 @@ woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] [[package]] name = "fsspec" -version = "2024.10.0" +version = "2025.2.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, - {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, + {file = "fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b"}, + {file = "fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd"}, ] [package.extras] @@ -840,7 +829,7 @@ sftp = ["paramiko"] smb = ["smbprotocol"] ssh = ["paramiko"] test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] -test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] @@ -970,13 +959,13 @@ lxml = ["lxml"] [[package]] name = "huggingface-hub" -version = "0.26.3" +version = "0.28.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.26.3-py3-none-any.whl", hash = "sha256:e66aa99e569c2d5419240a9e553ad07245a5b1300350bfbc5a4945cf7432991b"}, - {file = "huggingface_hub-0.26.3.tar.gz", hash = "sha256:90e1fe62ffc26757a073aaad618422b899ccf9447c2bba8c902a90bef5b42e1d"}, + {file = "huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7"}, + {file = "huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae"}, ] [package.dependencies] @@ -989,13 +978,13 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp"] -quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] @@ -1029,13 +1018,13 @@ files = [ [[package]] name = "importlib-metadata" -version = "8.5.0" +version = "8.6.1" description = "Read metadata from Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, + {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, + {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, ] [package.dependencies] @@ -1047,18 +1036,18 @@ cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] name = "importlib-resources" -version = "6.4.5" +version = "6.5.2" description = "Read resources from Python packages" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, - {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, + {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, + {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, ] [package.dependencies] @@ -1166,13 +1155,13 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.5" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, ] [package.dependencies] @@ -1374,167 +1363,167 @@ files = [ [[package]] name = "lxml" -version = "5.3.0" +version = "5.3.1" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = false python-versions = ">=3.6" files = [ - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656"}, - {file = "lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8"}, - {file = "lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03"}, - {file = "lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7"}, - {file = "lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80"}, - {file = "lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b"}, - {file = "lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080"}, - {file = "lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec"}, - {file = "lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be"}, - {file = "lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9"}, - {file = "lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859"}, - {file = "lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c"}, - {file = "lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8"}, - {file = "lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d"}, - {file = "lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30"}, - {file = "lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a"}, - {file = "lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367"}, - {file = "lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb"}, - {file = "lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b"}, - {file = "lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957"}, - {file = "lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d"}, - {file = "lxml-5.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:8f0de2d390af441fe8b2c12626d103540b5d850d585b18fcada58d972b74a74e"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe0a8c353746e610bd9031a630a95bcfb1a720684c3f2b36c4710a0a96528f"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56b9861a71575f5795bde89256e7467ece3d339c9b43141dbdd54544566b3b94"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:9fb81d2824dff4f2e297a276297e9031f46d2682cafc484f49de182aa5e5df99"}, - {file = "lxml-5.3.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2c226a06ecb8cdef28845ae976da407917542c5e6e75dcac7cc33eb04aaeb237"}, - {file = "lxml-5.3.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7d3d1ca42870cdb6d0d29939630dbe48fa511c203724820fc0fd507b2fb46577"}, - {file = "lxml-5.3.0-cp36-cp36m-win32.whl", hash = "sha256:094cb601ba9f55296774c2d57ad68730daa0b13dc260e1f941b4d13678239e70"}, - {file = "lxml-5.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:eafa2c8658f4e560b098fe9fc54539f86528651f61849b22111a9b107d18910c"}, - {file = "lxml-5.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cb83f8a875b3d9b458cada4f880fa498646874ba4011dc974e071a0a84a1b033"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25f1b69d41656b05885aa185f5fdf822cb01a586d1b32739633679699f220391"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23e0553b8055600b3bf4a00b255ec5c92e1e4aebf8c2c09334f8368e8bd174d6"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ada35dd21dc6c039259596b358caab6b13f4db4d4a7f8665764d616daf9cc1d"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:81b4e48da4c69313192d8c8d4311e5d818b8be1afe68ee20f6385d0e96fc9512"}, - {file = "lxml-5.3.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:2bc9fd5ca4729af796f9f59cd8ff160fe06a474da40aca03fcc79655ddee1a8b"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07da23d7ee08577760f0a71d67a861019103e4812c87e2fab26b039054594cc5"}, - {file = "lxml-5.3.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ea2e2f6f801696ad7de8aec061044d6c8c0dd4037608c7cab38a9a4d316bfb11"}, - {file = "lxml-5.3.0-cp37-cp37m-win32.whl", hash = "sha256:5c54afdcbb0182d06836cc3d1be921e540be3ebdf8b8a51ee3ef987537455f84"}, - {file = "lxml-5.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f2901429da1e645ce548bf9171784c0f74f0718c3f6150ce166be39e4dd66c3e"}, - {file = "lxml-5.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c56a1d43b2f9ee4786e4658c7903f05da35b923fb53c11025712562d5cc02753"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ee8c39582d2652dcd516d1b879451500f8db3fe3607ce45d7c5957ab2596040"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdf3a3059611f7585a78ee10399a15566356116a4288380921a4b598d807a22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:146173654d79eb1fc97498b4280c1d3e1e5d58c398fa530905c9ea50ea849b22"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0a7056921edbdd7560746f4221dca89bb7a3fe457d3d74267995253f46343f15"}, - {file = "lxml-5.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:9e4b47ac0f5e749cfc618efdf4726269441014ae1d5583e047b452a32e221920"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f914c03e6a31deb632e2daa881fe198461f4d06e57ac3d0e05bbcab8eae01945"}, - {file = "lxml-5.3.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:213261f168c5e1d9b7535a67e68b1f59f92398dd17a56d934550837143f79c42"}, - {file = "lxml-5.3.0-cp38-cp38-win32.whl", hash = "sha256:218c1b2e17a710e363855594230f44060e2025b05c80d1f0661258142b2add2e"}, - {file = "lxml-5.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:315f9542011b2c4e1d280e4a20ddcca1761993dda3afc7a73b01235f8641e903"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1ffc23010330c2ab67fac02781df60998ca8fe759e8efde6f8b756a20599c5de"}, - {file = "lxml-5.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2b3778cb38212f52fac9fe913017deea2fdf4eb1a4f8e4cfc6b009a13a6d3fcc"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b0c7a688944891086ba192e21c5229dea54382f4836a209ff8d0a660fac06be"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:747a3d3e98e24597981ca0be0fd922aebd471fa99d0043a3842d00cdcad7ad6a"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86a6b24b19eaebc448dc56b87c4865527855145d851f9fc3891673ff97950540"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b11a5d918a6216e521c715b02749240fb07ae5a1fefd4b7bf12f833bc8b4fe70"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b87753c784d6acb8a25b05cb526c3406913c9d988d51f80adecc2b0775d6aa"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:109fa6fede314cc50eed29e6e56c540075e63d922455346f11e4d7a036d2b8cf"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:02ced472497b8362c8e902ade23e3300479f4f43e45f4105c85ef43b8db85229"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:6b038cc86b285e4f9fea2ba5ee76e89f21ed1ea898e287dc277a25884f3a7dfe"}, - {file = "lxml-5.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7437237c6a66b7ca341e868cda48be24b8701862757426852c9b3186de1da8a2"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7f41026c1d64043a36fda21d64c5026762d53a77043e73e94b71f0521939cc71"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:482c2f67761868f0108b1743098640fbb2a28a8e15bf3f47ada9fa59d9fe08c3"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1483fd3358963cc5c1c9b122c80606a3a79ee0875bcac0204149fa09d6ff2727"}, - {file = "lxml-5.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dec2d1130a9cda5b904696cec33b2cfb451304ba9081eeda7f90f724097300a"}, - {file = "lxml-5.3.0-cp39-cp39-win32.whl", hash = "sha256:a0eabd0a81625049c5df745209dc7fcef6e2aea7793e5f003ba363610aa0a3ff"}, - {file = "lxml-5.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:89e043f1d9d341c52bf2af6d02e6adde62e0a46e6755d5eb60dc6e4f0b8aeca2"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83"}, - {file = "lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:94d6c3782907b5e40e21cadf94b13b0842ac421192f26b84c45f13f3c9d5dc27"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c300306673aa0f3ed5ed9372b21867690a17dba38c68c44b287437c362ce486b"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d9b952e07aed35fe2e1a7ad26e929595412db48535921c5013edc8aa4a35ce"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:01220dca0d066d1349bd6a1726856a78f7929f3878f7e2ee83c296c69495309e"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2d9b8d9177afaef80c53c0a9e30fa252ff3036fb1c6494d427c066a4ce6a282f"}, - {file = "lxml-5.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:20094fc3f21ea0a8669dc4c61ed7fa8263bd37d97d93b90f28fc613371e7a875"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ace2c2326a319a0bb8a8b0e5b570c764962e95818de9f259ce814ee666603f19"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92e67a0be1639c251d21e35fe74df6bcc40cba445c2cda7c4a967656733249e2"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5350b55f9fecddc51385463a4f67a5da829bc741e38cf689f38ec9023f54ab"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c1fefd7e3d00921c44dc9ca80a775af49698bbfd92ea84498e56acffd4c5469"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:71a8dd38fbd2f2319136d4ae855a7078c69c9a38ae06e0c17c73fd70fc6caad8"}, - {file = "lxml-5.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:97acf1e1fd66ab53dacd2c35b319d7e548380c2e9e8c54525c6e76d21b1ae3b1"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:68934b242c51eb02907c5b81d138cb977b2129a0a75a8f8b60b01cb8586c7b21"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b710bc2b8292966b23a6a0121f7a6c51d45d2347edcc75f016ac123b8054d3f2"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18feb4b93302091b1541221196a2155aa296c363fd233814fa11e181adebc52f"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3eb44520c4724c2e1a57c0af33a379eee41792595023f367ba3952a2d96c2aab"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:609251a0ca4770e5a8768ff902aa02bf636339c5a93f9349b48eb1f606f7f3e9"}, - {file = "lxml-5.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:516f491c834eb320d6c843156440fe7fc0d50b33e44387fcec5b02f0bc118a4c"}, - {file = "lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f"}, + {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4058f16cee694577f7e4dd410263cd0ef75644b43802a689c2b3c2a7e69453b"}, + {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:364de8f57d6eda0c16dcfb999af902da31396949efa0e583e12675d09709881b"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:528f3a0498a8edc69af0559bdcf8a9f5a8bf7c00051a6ef3141fdcf27017bbf5"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4743e30d6f5f92b6d2b7c86b3ad250e0bad8dee4b7ad8a0c44bfb276af89a3"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b5d7f8acf809465086d498d62a981fa6a56d2718135bb0e4aa48c502055f5c"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:928e75a7200a4c09e6efc7482a1337919cc61fe1ba289f297827a5b76d8969c2"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a997b784a639e05b9d4053ef3b20c7e447ea80814a762f25b8ed5a89d261eac"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7b82e67c5feb682dbb559c3e6b78355f234943053af61606af126df2183b9ef9"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:f1de541a9893cf8a1b1db9bf0bf670a2decab42e3e82233d36a74eda7822b4c9"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:de1fc314c3ad6bc2f6bd5b5a5b9357b8c6896333d27fdbb7049aea8bd5af2d79"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7c0536bd9178f754b277a3e53f90f9c9454a3bd108b1531ffff720e082d824f2"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68018c4c67d7e89951a91fbd371e2e34cd8cfc71f0bb43b5332db38497025d51"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa826340a609d0c954ba52fd831f0fba2a4165659ab0ee1a15e4aac21f302406"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:796520afa499732191e39fc95b56a3b07f95256f2d22b1c26e217fb69a9db5b5"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3effe081b3135237da6e4c4530ff2a868d3f80be0bda027e118a5971285d42d0"}, + {file = "lxml-5.3.1-cp310-cp310-win32.whl", hash = "sha256:a22f66270bd6d0804b02cd49dae2b33d4341015545d17f8426f2c4e22f557a23"}, + {file = "lxml-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0bcfadea3cdc68e678d2b20cb16a16716887dd00a881e16f7d806c2138b8ff0c"}, + {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e220f7b3e8656ab063d2eb0cd536fafef396829cafe04cb314e734f87649058f"}, + {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f2cfae0688fd01f7056a17367e3b84f37c545fb447d7282cf2c242b16262607"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67d2f8ad9dcc3a9e826bdc7802ed541a44e124c29b7d95a679eeb58c1c14ade8"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db0c742aad702fd5d0c6611a73f9602f20aec2007c102630c06d7633d9c8f09a"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:198bb4b4dd888e8390afa4f170d4fa28467a7eaf857f1952589f16cfbb67af27"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2a3e412ce1849be34b45922bfef03df32d1410a06d1cdeb793a343c2f1fd666"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8969dbc8d09d9cd2ae06362c3bad27d03f433252601ef658a49bd9f2b22d79"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5be8f5e4044146a69c96077c7e08f0709c13a314aa5315981185c1f00235fe65"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:133f3493253a00db2c870d3740bc458ebb7d937bd0a6a4f9328373e0db305709"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:52d82b0d436edd6a1d22d94a344b9a58abd6c68c357ed44f22d4ba8179b37629"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b6f92e35e2658a5ed51c6634ceb5ddae32053182851d8cad2a5bc102a359b33"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:203b1d3eaebd34277be06a3eb880050f18a4e4d60861efba4fb946e31071a295"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:155e1a5693cf4b55af652f5c0f78ef36596c7f680ff3ec6eb4d7d85367259b2c"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22ec2b3c191f43ed21f9545e9df94c37c6b49a5af0a874008ddc9132d49a2d9c"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7eda194dd46e40ec745bf76795a7cccb02a6a41f445ad49d3cf66518b0bd9cff"}, + {file = "lxml-5.3.1-cp311-cp311-win32.whl", hash = "sha256:fb7c61d4be18e930f75948705e9718618862e6fc2ed0d7159b2262be73f167a2"}, + {file = "lxml-5.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c809eef167bf4a57af4b03007004896f5c60bd38dc3852fcd97a26eae3d4c9e6"}, + {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e69add9b6b7b08c60d7ff0152c7c9a6c45b4a71a919be5abde6f98f1ea16421c"}, + {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e52e1b148867b01c05e21837586ee307a01e793b94072d7c7b91d2c2da02ffe"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b382e0e636ed54cd278791d93fe2c4f370772743f02bcbe431a160089025c9"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e49dc23a10a1296b04ca9db200c44d3eb32c8d8ec532e8c1fd24792276522a"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4399b4226c4785575fb20998dc571bc48125dc92c367ce2602d0d70e0c455eb0"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5412500e0dc5481b1ee9cf6b38bb3b473f6e411eb62b83dc9b62699c3b7b79f7"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c93ed3c998ea8472be98fb55aed65b5198740bfceaec07b2eba551e55b7b9ae"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:63d57fc94eb0bbb4735e45517afc21ef262991d8758a8f2f05dd6e4174944519"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:b450d7cabcd49aa7ab46a3c6aa3ac7e1593600a1a0605ba536ec0f1b99a04322"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:4df0ec814b50275ad6a99bc82a38b59f90e10e47714ac9871e1b223895825468"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d184f85ad2bb1f261eac55cddfcf62a70dee89982c978e92b9a74a1bfef2e367"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b725e70d15906d24615201e650d5b0388b08a5187a55f119f25874d0103f90dd"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a31fa7536ec1fb7155a0cd3a4e3d956c835ad0a43e3610ca32384d01f079ea1c"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3c3c8b55c7fc7b7e8877b9366568cc73d68b82da7fe33d8b98527b73857a225f"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d61ec60945d694df806a9aec88e8f29a27293c6e424f8ff91c80416e3c617645"}, + {file = "lxml-5.3.1-cp312-cp312-win32.whl", hash = "sha256:f4eac0584cdc3285ef2e74eee1513a6001681fd9753b259e8159421ed28a72e5"}, + {file = "lxml-5.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:29bfc8d3d88e56ea0a27e7c4897b642706840247f59f4377d81be8f32aa0cfbf"}, + {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c093c7088b40d8266f57ed71d93112bd64c6724d31f0794c1e52cc4857c28e0e"}, + {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0884e3f22d87c30694e625b1e62e6f30d39782c806287450d9dc2fdf07692fd"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1637fa31ec682cd5760092adfabe86d9b718a75d43e65e211d5931809bc111e7"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a364e8e944d92dcbf33b6b494d4e0fb3499dcc3bd9485beb701aa4b4201fa414"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:779e851fd0e19795ccc8a9bb4d705d6baa0ef475329fe44a13cf1e962f18ff1e"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4393600915c308e546dc7003d74371744234e8444a28622d76fe19b98fa59d1"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:673b9d8e780f455091200bba8534d5f4f465944cbdd61f31dc832d70e29064a5"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2e4a570f6a99e96c457f7bec5ad459c9c420ee80b99eb04cbfcfe3fc18ec6423"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:71f31eda4e370f46af42fc9f264fafa1b09f46ba07bdbee98f25689a04b81c20"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:42978a68d3825eaac55399eb37a4d52012a205c0c6262199b8b44fcc6fd686e8"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8b1942b3e4ed9ed551ed3083a2e6e0772de1e5e3aca872d955e2e86385fb7ff9"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:85c4f11be9cf08917ac2a5a8b6e1ef63b2f8e3799cec194417e76826e5f1de9c"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:231cf4d140b22a923b1d0a0a4e0b4f972e5893efcdec188934cc65888fd0227b"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5865b270b420eda7b68928d70bb517ccbe045e53b1a428129bb44372bf3d7dd5"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dbf7bebc2275016cddf3c997bf8a0f7044160714c64a9b83975670a04e6d2252"}, + {file = "lxml-5.3.1-cp313-cp313-win32.whl", hash = "sha256:d0751528b97d2b19a388b302be2a0ee05817097bab46ff0ed76feeec24951f78"}, + {file = "lxml-5.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:91fb6a43d72b4f8863d21f347a9163eecbf36e76e2f51068d59cd004c506f332"}, + {file = "lxml-5.3.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:016b96c58e9a4528219bb563acf1aaaa8bc5452e7651004894a973f03b84ba81"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82a4bb10b0beef1434fb23a09f001ab5ca87895596b4581fd53f1e5145a8934a"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d68eeef7b4d08a25e51897dac29bcb62aba830e9ac6c4e3297ee7c6a0cf6439"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:f12582b8d3b4c6be1d298c49cb7ae64a3a73efaf4c2ab4e37db182e3545815ac"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2df7ed5edeb6bd5590914cd61df76eb6cce9d590ed04ec7c183cf5509f73530d"}, + {file = "lxml-5.3.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:585c4dc429deebc4307187d2b71ebe914843185ae16a4d582ee030e6cfbb4d8a"}, + {file = "lxml-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:06a20d607a86fccab2fc15a77aa445f2bdef7b49ec0520a842c5c5afd8381576"}, + {file = "lxml-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:057e30d0012439bc54ca427a83d458752ccda725c1c161cc283db07bcad43cf9"}, + {file = "lxml-5.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4867361c049761a56bd21de507cab2c2a608c55102311d142ade7dab67b34f32"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dddf0fb832486cc1ea71d189cb92eb887826e8deebe128884e15020bb6e3f61"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bcc211542f7af6f2dfb705f5f8b74e865592778e6cafdfd19c792c244ccce19"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaca5a812f050ab55426c32177091130b1e49329b3f002a32934cd0245571307"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:236610b77589faf462337b3305a1be91756c8abc5a45ff7ca8f245a71c5dab70"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:aed57b541b589fa05ac248f4cb1c46cbb432ab82cbd467d1c4f6a2bdc18aecf9"}, + {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:75fa3d6946d317ffc7016a6fcc44f42db6d514b7fdb8b4b28cbe058303cb6e53"}, + {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:96eef5b9f336f623ffc555ab47a775495e7e8846dde88de5f941e2906453a1ce"}, + {file = "lxml-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:ef45f31aec9be01379fc6c10f1d9c677f032f2bac9383c827d44f620e8a88407"}, + {file = "lxml-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0611da6b07dd3720f492db1b463a4d1175b096b49438761cc9f35f0d9eaaef5"}, + {file = "lxml-5.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2aca14c235c7a08558fe0a4786a1a05873a01e86b474dfa8f6df49101853a4e"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82fce1d964f065c32c9517309f0c7be588772352d2f40b1574a214bd6e6098"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7aae7a3d63b935babfdc6864b31196afd5145878ddd22f5200729006366bc4d5"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8e0d177b1fe251c3b1b914ab64135475c5273c8cfd2857964b2e3bb0fe196a7"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:6c4dd3bfd0c82400060896717dd261137398edb7e524527438c54a8c34f736bf"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f1208c1c67ec9e151d78aa3435aa9b08a488b53d9cfac9b699f15255a3461ef2"}, + {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c6aacf00d05b38a5069826e50ae72751cb5bc27bdc4d5746203988e429b385bb"}, + {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5881aaa4bf3a2d086c5f20371d3a5856199a0d8ac72dd8d0dbd7a2ecfc26ab73"}, + {file = "lxml-5.3.1-cp38-cp38-win32.whl", hash = "sha256:45fbb70ccbc8683f2fb58bea89498a7274af1d9ec7995e9f4af5604e028233fc"}, + {file = "lxml-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:7512b4d0fc5339d5abbb14d1843f70499cab90d0b864f790e73f780f041615d7"}, + {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5885bc586f1edb48e5d68e7a4b4757b5feb2a496b64f462b4d65950f5af3364f"}, + {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1b92fe86e04f680b848fff594a908edfa72b31bfc3499ef7433790c11d4c8cd8"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a091026c3bf7519ab1e64655a3f52a59ad4a4e019a6f830c24d6430695b1cf6a"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ffb141361108e864ab5f1813f66e4e1164181227f9b1f105b042729b6c15125"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3715cdf0dd31b836433af9ee9197af10e3df41d273c19bb249230043667a5dfd"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88b72eb7222d918c967202024812c2bfb4048deeb69ca328363fb8e15254c549"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa59974880ab5ad8ef3afaa26f9bda148c5f39e06b11a8ada4660ecc9fb2feb3"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3bb8149840daf2c3f97cebf00e4ed4a65a0baff888bf2605a8d0135ff5cf764e"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:0d6b2fa86becfa81f0a0271ccb9eb127ad45fb597733a77b92e8a35e53414914"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:136bf638d92848a939fd8f0e06fcf92d9f2e4b57969d94faae27c55f3d85c05b"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:89934f9f791566e54c1d92cdc8f8fd0009447a5ecdb1ec6b810d5f8c4955f6be"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8ade0363f776f87f982572c2860cc43c65ace208db49c76df0a21dde4ddd16e"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bfbbab9316330cf81656fed435311386610f78b6c93cc5db4bebbce8dd146675"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:172d65f7c72a35a6879217bcdb4bb11bc88d55fb4879e7569f55616062d387c2"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e3c623923967f3e5961d272718655946e5322b8d058e094764180cdee7bab1af"}, + {file = "lxml-5.3.1-cp39-cp39-win32.whl", hash = "sha256:ce0930a963ff593e8bb6fda49a503911accc67dee7e5445eec972668e672a0f0"}, + {file = "lxml-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7b64fcd670bca8800bc10ced36620c6bbb321e7bc1214b9c0c0df269c1dddc2"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:afa578b6524ff85fb365f454cf61683771d0170470c48ad9d170c48075f86725"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f5e80adf0aafc7b5454f2c1cb0cde920c9b1f2cbd0485f07cc1d0497c35c5d"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd0b80ac2d8f13ffc906123a6f20b459cb50a99222d0da492360512f3e50f84"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:422c179022ecdedbe58b0e242607198580804253da220e9454ffe848daa1cfd2"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:524ccfded8989a6595dbdda80d779fb977dbc9a7bc458864fc9a0c2fc15dc877"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:05123fad495a429f123307ac6d8fd6f977b71e9a0b6d9aeeb8f80c017cb17131"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a243132767150a44e6a93cd1dde41010036e1cbc63cc3e9fe1712b277d926ce3"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c92ea6d9dd84a750b2bae72ff5e8cf5fdd13e58dda79c33e057862c29a8d5b50"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2f1be45d4c15f237209bbf123a0e05b5d630c8717c42f59f31ea9eae2ad89394"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a83d3adea1e0ee36dac34627f78ddd7f093bb9cfc0a8e97f1572a949b695cb98"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:3edbb9c9130bac05d8c3fe150c51c337a471cc7fdb6d2a0a7d3a88e88a829314"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2f23cf50eccb3255b6e913188291af0150d89dab44137a69e14e4dcb7be981f1"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7e5edac4778127f2bf452e0721a58a1cfa4d1d9eac63bdd650535eb8543615"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:094b28ed8a8a072b9e9e2113a81fda668d2053f2ca9f2d202c2c8c7c2d6516b1"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:514fe78fc4b87e7a7601c92492210b20a1b0c6ab20e71e81307d9c2e377c64de"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8fffc08de02071c37865a155e5ea5fce0282e1546fd5bde7f6149fcaa32558ac"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4b0d5cdba1b655d5b18042ac9c9ff50bda33568eb80feaaca4fc237b9c4fbfde"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3031e4c16b59424e8d78522c69b062d301d951dc55ad8685736c3335a97fc270"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb659702a45136c743bc130760c6f137870d4df3a9e14386478b8a0511abcfca"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a11b16a33656ffc43c92a5343a28dc71eefe460bcc2a4923a96f292692709f6"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5ae125276f254b01daa73e2c103363d3e99e3e10505686ac7d9d2442dd4627a"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76722b5ed4a31ba103e0dc77ab869222ec36efe1a614e42e9bcea88a36186fe"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:33e06717c00c788ab4e79bc4726ecc50c54b9bfb55355eae21473c145d83c2d2"}, + {file = "lxml-5.3.1.tar.gz", hash = "sha256:106b7b5d2977b339f1e97efe2778e2ab20e99994cbb0ec5e55771ed0795920c8"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] -html-clean = ["lxml-html-clean"] +html-clean = ["lxml_html_clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.11)"] +source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "mako" -version = "1.3.6" +version = "1.3.9" description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false python-versions = ">=3.8" files = [ - {file = "Mako-1.3.6-py3-none-any.whl", hash = "sha256:a91198468092a2f1a0de86ca92690fb0cfc43ca90ee17e15d93662b4c04b241a"}, - {file = "mako-1.3.6.tar.gz", hash = "sha256:9ec3a1583713479fae654f83ed9fa8c9a4c16b7bb0daba0e6bbebff50c0d983d"}, + {file = "Mako-1.3.9-py3-none-any.whl", hash = "sha256:95920acccb578427a9aa38e37a186b1e43156c87260d7ba18ca63aa4c7cbd3a1"}, + {file = "mako-1.3.9.tar.gz", hash = "sha256:b5d65ff3462870feec922dbccf38f6efb44e5714d7b593a656be86663d8600ac"}, ] [package.dependencies] @@ -1635,51 +1624,52 @@ files = [ [[package]] name = "matplotlib" -version = "3.9.2" +version = "3.9.4" description = "Python plotting package" optional = true python-versions = ">=3.9" files = [ - {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, - {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, - {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, - {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, - {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, - {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, - {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, - {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, - {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, - {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, - {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, - {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, - {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, - {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, + {file = "matplotlib-3.9.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c5fdd7abfb706dfa8d307af64a87f1a862879ec3cd8d0ec8637458f0885b9c50"}, + {file = "matplotlib-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d89bc4e85e40a71d1477780366c27fb7c6494d293e1617788986f74e2a03d7ff"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddf9f3c26aae695c5daafbf6b94e4c1a30d6cd617ba594bbbded3b33a1fcfa26"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18ebcf248030173b59a868fda1fe42397253f6698995b55e81e1f57431d85e50"}, + {file = "matplotlib-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974896ec43c672ec23f3f8c648981e8bc880ee163146e0312a9b8def2fac66f5"}, + {file = "matplotlib-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:4598c394ae9711cec135639374e70871fa36b56afae17bdf032a345be552a88d"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4dd29641d9fb8bc4492420c5480398dd40a09afd73aebe4eb9d0071a05fbe0c"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e5b22e8bcfb95442bf7d48b0d7f3bdf4a450cbf68986ea45fca3d11ae9d099"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bb0030d1d447fd56dcc23b4c64a26e44e898f0416276cac1ebc25522e0ac249"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca90ed222ac3565d2752b83dbb27627480d27662671e4d39da72e97f657a423"}, + {file = "matplotlib-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a181b2aa2906c608fcae72f977a4a2d76e385578939891b91c2550c39ecf361e"}, + {file = "matplotlib-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:1f6882828231eca17f501c4dcd98a05abb3f03d157fbc0769c6911fe08b6cfd3"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dfc48d67e6661378a21c2983200a654b72b5c5cdbd5d2cf6e5e1ece860f0cc70"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47aef0fab8332d02d68e786eba8113ffd6f862182ea2999379dec9e237b7e483"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fba1f52c6b7dc764097f52fd9ab627b90db452c9feb653a59945de16752e965f"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173ac3748acaac21afcc3fa1633924609ba1b87749006bc25051c52c422a5d00"}, + {file = "matplotlib-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320edea0cadc07007765e33f878b13b3738ffa9745c5f707705692df70ffe0e0"}, + {file = "matplotlib-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4a4cfc82330b27042a7169533da7991e8789d180dd5b3daeaee57d75cd5a03b"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37eeffeeca3c940985b80f5b9a7b95ea35671e0e7405001f249848d2b62351b6"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3e7465ac859ee4abcb0d836137cd8414e7bb7ad330d905abced457217d4f0f45"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c12302c34afa0cf061bea23b331e747e5e554b0fa595c96e01c7b75bc3b858"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8c97917f21b75e72108b97707ba3d48f171541a74aa2a56df7a40626bafc64"}, + {file = "matplotlib-3.9.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0229803bd7e19271b03cb09f27db76c918c467aa4ce2ae168171bc67c3f508df"}, + {file = "matplotlib-3.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:7c0d8ef442ebf56ff5e206f8083d08252ee738e04f3dc88ea882853a05488799"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a04c3b00066a688834356d196136349cb32f5e1003c55ac419e91585168b88fb"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04c519587f6c210626741a1e9a68eefc05966ede24205db8982841826af5871a"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308afbf1a228b8b525fcd5cec17f246bbbb63b175a3ef6eb7b4d33287ca0cf0c"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb3b02246ddcffd3ce98e88fed5b238bc5faff10dbbaa42090ea13241d15764"}, + {file = "matplotlib-3.9.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8a75287e9cb9eee48cb79ec1d806f75b29c0fde978cb7223a1f4c5848d696041"}, + {file = "matplotlib-3.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:488deb7af140f0ba86da003e66e10d55ff915e152c78b4b66d231638400b1965"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3c3724d89a387ddf78ff88d2a30ca78ac2b4c89cf37f2db4bd453c34799e933c"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5f0a8430ffe23d7e32cfd86445864ccad141797f7d25b7c41759a5b5d17cfd7"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb0141a21aef3b64b633dc4d16cbd5fc538b727e4958be82a0e1c92a234160e"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57aa235109e9eed52e2c2949db17da185383fa71083c00c6c143a60e07e0888c"}, + {file = "matplotlib-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b18c600061477ccfdd1e6fd050c33d8be82431700f3452b297a56d9ed7037abb"}, + {file = "matplotlib-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:ef5f2d1b67d2d2145ff75e10f8c008bfbf71d45137c4b648c87193e7dd053eac"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:44e0ed786d769d85bc787b0606a53f2d8d2d1d3c8a2608237365e9121c1a338c"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:09debb9ce941eb23ecdbe7eab972b1c3e0276dcf01688073faff7b0f61d6c6ca"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc53cf157a657bfd03afab14774d54ba73aa84d42cfe2480c91bd94873952db"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ad45da51be7ad02387801fd154ef74d942f49fe3fcd26a64c94842ba7ec0d865"}, + {file = "matplotlib-3.9.4.tar.gz", hash = "sha256:1e00e8be7393cbdc6fedfa8a6fba02cf3e83814b285db1c60b906a023ba41bc3"}, ] [package.dependencies] @@ -1695,7 +1685,7 @@ pyparsing = ">=2.3.1" python-dateutil = ">=2.7" [package.extras] -dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] +dev = ["meson-python (>=0.13.1,<0.17.0)", "numpy (>=1.25)", "pybind11 (>=2.6,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] [[package]] name = "matplotlib-inline" @@ -1722,17 +1712,6 @@ files = [ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] -[[package]] -name = "more-itertools" -version = "9.1.0" -description = "More routines for operating on iterables, beyond itertools" -optional = false -python-versions = ">=3.7" -files = [ - {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, - {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, -] - [[package]] name = "mpmath" version = "1.3.0" @@ -1846,13 +1825,13 @@ tests = ["pytest (>=6.0)", "pyyaml"] [[package]] name = "optuna" -version = "4.1.0" +version = "4.2.1" description = "A hyperparameter optimization framework" optional = false python-versions = ">=3.8" files = [ - {file = "optuna-4.1.0-py3-none-any.whl", hash = "sha256:1763856b01c9238594d9d21db92611aac9980e9a6300bd658a7c6464712c704e"}, - {file = "optuna-4.1.0.tar.gz", hash = "sha256:b364e87a2038f9946c5e2770c130597538aac528b4a82c1cab5267f337ea7679"}, + {file = "optuna-4.2.1-py3-none-any.whl", hash = "sha256:6d38199013441d3f70fac27136e05c0188c5f4ec3848db708ac311cbdeb30dbf"}, + {file = "optuna-4.2.1.tar.gz", hash = "sha256:2ecd74cdc8aaf5dda1f2b9e267999bab21def9a33e0a4f415ecae0c468c401e0"}, ] [package.dependencies] @@ -1866,10 +1845,10 @@ tqdm = "*" [package.extras] benchmark = ["asv (>=0.5.0)", "cma", "virtualenv"] -checking = ["black", "blackdoc", "flake8", "isort", "mypy", "mypy-boto3-s3", "types-PyYAML", "types-redis", "types-setuptools", "types-tqdm", "typing-extensions (>=3.10.0.0)"] -document = ["ase", "cmaes (>=0.10.0)", "fvcore", "kaleido", "lightgbm", "matplotlib (!=3.6.0)", "pandas", "pillow", "plotly (>=4.9.0)", "scikit-learn", "sphinx", "sphinx-copybutton", "sphinx-gallery", "sphinx-rtd-theme (>=1.2.0)", "torch", "torchvision"] -optional = ["boto3", "cmaes (>=0.10.0)", "google-cloud-storage", "matplotlib (!=3.6.0)", "pandas", "plotly (>=4.9.0)", "redis", "scikit-learn (>=0.24.2)", "scipy", "torch"] -test = ["coverage", "fakeredis[lua]", "kaleido", "moto", "pytest", "scipy (>=1.9.2)", "torch"] +checking = ["black", "blackdoc", "flake8", "isort", "mypy", "mypy_boto3_s3", "types-PyYAML", "types-redis", "types-setuptools", "types-tqdm", "typing_extensions (>=3.10.0.0)"] +document = ["ase", "cmaes (>=0.10.0)", "fvcore", "kaleido (<0.4)", "lightgbm", "matplotlib (!=3.6.0)", "pandas", "pillow", "plotly (>=4.9.0)", "scikit-learn", "sphinx", "sphinx-copybutton", "sphinx-gallery", "sphinx-notfound-page", "sphinx_rtd_theme (>=1.2.0)", "torch", "torchvision"] +optional = ["boto3", "cmaes (>=0.10.0)", "google-cloud-storage", "grpcio", "matplotlib (!=3.6.0)", "pandas", "plotly (>=4.9.0)", "protobuf (>=5.28.1)", "redis", "scikit-learn (>=0.24.2)", "scipy", "torch"] +test = ["coverage", "fakeredis[lua]", "grpcio", "kaleido (<0.4)", "moto", "protobuf (>=5.28.1)", "pytest", "scipy (>=1.9.2)", "torch"] [[package]] name = "packaging" @@ -1940,6 +1919,7 @@ lxml = {version = ">=4.9.2", optional = true, markers = "extra == \"html\""} numpy = [ {version = ">=1.22.4", markers = "python_version < \"3.11\""}, {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -2021,93 +2001,89 @@ ptyprocess = ">=0.5" [[package]] name = "pillow" -version = "11.0.0" +version = "11.1.0" description = "Python Imaging Library (Fork)" optional = true python-versions = ">=3.9" files = [ - {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, - {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, - {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, - {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, - {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, - {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, - {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, - {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, - {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, - {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, - {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, - {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, - {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, - {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, - {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, - {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, - {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, - {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, - {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, - {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, - {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, - {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, + {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, + {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"}, + {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"}, + {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"}, + {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"}, + {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"}, + {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"}, + {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"}, + {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"}, + {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"}, + {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"}, + {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"}, + {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"}, + {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"}, + {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"}, + {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"}, + {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"}, + {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"}, + {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"}, + {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"}, + {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"}, + {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"}, ] [package.extras] docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] typing = ["typing-extensions"] xmp = ["defusedxml"] @@ -2159,13 +2135,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "prompt-toolkit" -version = "3.0.48" +version = "3.0.50" description = "Library for building powerful interactive command lines in Python" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, - {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, + {file = "prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198"}, + {file = "prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab"}, ] [package.dependencies] @@ -2232,18 +2208,18 @@ pybtex = ">=0.16" [[package]] name = "pydantic" -version = "2.10.2" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, - {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -2252,111 +2228,111 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -2378,13 +2354,13 @@ pyparsing = ">=2.1.4" [[package]] name = "pygments" -version = "2.18.0" +version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, ] [package.extras] @@ -2421,13 +2397,13 @@ testutils = ["gitpython (>3)"] [[package]] name = "pyparsing" -version = "3.2.0" +version = "3.2.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = true python-versions = ">=3.9" files = [ - {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, - {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, + {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, + {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, ] [package.extras] @@ -2543,13 +2519,13 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2024.2" +version = "2025.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, - {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, + {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, + {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, ] [[package]] @@ -2616,13 +2592,13 @@ files = [ [[package]] name = "qibo" -version = "0.2.13" +version = "0.2.16" description = "A framework for quantum computing with hardware acceleration." optional = false python-versions = "<3.13,>=3.9" files = [ - {file = "qibo-0.2.13-py3-none-any.whl", hash = "sha256:2c67234fdbdd7bfceed4df0fe8d3d9bede9354c4e18b2c061098b002c665e0f3"}, - {file = "qibo-0.2.13.tar.gz", hash = "sha256:3a815f2262b4d38d57127653df83dbbcbe7e941e8fb9a53c8a107f303b270dc4"}, + {file = "qibo-0.2.16-py3-none-any.whl", hash = "sha256:6a854e144dc0384fc6ee472ec0467c90f23661701a96821ad55fe1e33e386603"}, + {file = "qibo-0.2.16.tar.gz", hash = "sha256:3063fae1c92314c93b179f220e09406e8efc59876f6b6d5a99a6b583c6d428fd"}, ] [package.dependencies] @@ -2642,28 +2618,30 @@ torch = ["torch (>=2.1.1,<2.4)"] [[package]] name = "qibolab" -version = "0.1.10" +version = "0.2.3" description = "Quantum hardware module and drivers for Qibo" optional = false -python-versions = "<3.12,>=3.9" +python-versions = "<3.13,>=3.9" files = [ - {file = "qibolab-0.1.10-py3-none-any.whl", hash = "sha256:f0ca402148acd3f97aab22440d2f9c0a93774b28216a3d2e3ad14d5a912a4476"}, - {file = "qibolab-0.1.10.tar.gz", hash = "sha256:2541ded6ce3d7071c4c5be6714266e23b5c86c5e3dde1657c82382246f05b81a"}, + {file = "qibolab-0.2.3-py3-none-any.whl", hash = "sha256:75a8c69b29359d057be0feabea8b0f564201914853ea08339d0e33cd6fe32e0b"}, + {file = "qibolab-0.2.3.tar.gz", hash = "sha256:8a4f4ac04b2f45b499372b0e0300a2eb84959f51b30b794ca38832d413bf22c4"}, ] [package.dependencies] -more-itertools = ">=9.1.0,<10.0.0" -networkx = ">=3.0,<4.0" numpy = ">=1.26.4,<2.0.0" -qibo = ">=0.2.6" -setuptools = ">67.0.0" +pydantic = ">=2.6.4,<3.0.0" +qibo = ">=0.2.8,<0.3.0" +scipy = ">=1.13.0,<2.0.0" [package.extras] -emulator = ["qutip (==4.7.5)", "scipy (<1.13.0)"] +bluefors = ["pyyaml (>=6.0.2,<7.0.0)"] +emulator = ["qutip (>=5.0.2,<6.0.0)"] los = ["pyvisa-py (==0.5.3)", "qcodes (>=0.37.0,<0.38.0)", "qcodes_contrib_drivers (==0.18.0)"] qblox = ["pyvisa-py (==0.5.3)", "qblox-instruments (==0.12.0)", "qcodes (>=0.37.0,<0.38.0)", "qcodes_contrib_drivers (==0.18.0)"] -qm = ["qm-qua (==1.1.6)", "qualang-tools (>=0.15.0,<0.16.0)"] +qm = ["qm-qua (==1.2.1)"] +qrng = ["pyserial (>=3.5,<4.0)"] rfsoc = ["qibosoq (>=0.1.2,<0.2)"] +twpa = ["pyvisa-py (==0.5.3)", "qcodes (>=0.37.0,<0.38.0)", "qcodes_contrib_drivers (==0.18.0)"] zh = ["laboneq (==2.25.0)"] [[package]] @@ -2719,32 +2697,41 @@ six = ">=1.7.0" [[package]] name = "scikit-learn" -version = "1.5.2" +version = "1.6.1" description = "A set of python modules for machine learning and data mining" optional = false python-versions = ">=3.9" files = [ - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, - {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, - {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, - {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, - {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, + {file = "scikit_learn-1.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d056391530ccd1e501056160e3c9673b4da4805eb67eb2bdf4e983e1f9c9204e"}, + {file = "scikit_learn-1.6.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0c8d036eb937dbb568c6242fa598d551d88fb4399c0344d95c001980ec1c7d36"}, + {file = "scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8634c4bd21a2a813e0a7e3900464e6d593162a29dd35d25bdf0103b3fce60ed5"}, + {file = "scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775da975a471c4f6f467725dff0ced5c7ac7bda5e9316b260225b48475279a1b"}, + {file = "scikit_learn-1.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:8a600c31592bd7dab31e1c61b9bbd6dea1b3433e67d264d17ce1017dbdce8002"}, + {file = "scikit_learn-1.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72abc587c75234935e97d09aa4913a82f7b03ee0b74111dcc2881cba3c5a7b33"}, + {file = "scikit_learn-1.6.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b3b00cdc8f1317b5f33191df1386c0befd16625f49d979fe77a8d44cae82410d"}, + {file = "scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc4765af3386811c3ca21638f63b9cf5ecf66261cc4815c1db3f1e7dc7b79db2"}, + {file = "scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25fc636bdaf1cc2f4a124a116312d837148b5e10872147bdaf4887926b8c03d8"}, + {file = "scikit_learn-1.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:fa909b1a36e000a03c382aade0bd2063fd5680ff8b8e501660c0f59f021a6415"}, + {file = "scikit_learn-1.6.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:926f207c804104677af4857b2c609940b743d04c4c35ce0ddc8ff4f053cddc1b"}, + {file = "scikit_learn-1.6.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c2cae262064e6a9b77eee1c8e768fc46aa0b8338c6a8297b9b6759720ec0ff2"}, + {file = "scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f"}, + {file = "scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86"}, + {file = "scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52"}, + {file = "scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322"}, + {file = "scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1"}, + {file = "scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348"}, + {file = "scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97"}, + {file = "scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb"}, + {file = "scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236"}, + {file = "scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35"}, + {file = "scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691"}, + {file = "scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f"}, + {file = "scikit_learn-1.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6849dd3234e87f55dce1db34c89a810b489ead832aaf4d4550b7ea85628be6c1"}, + {file = "scikit_learn-1.6.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e7be3fa5d2eb9be7d77c3734ff1d599151bb523674be9b834e8da6abe132f44e"}, + {file = "scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44a17798172df1d3c1065e8fcf9019183f06c87609b49a124ebdf57ae6cb0107"}, + {file = "scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b7a3b86e411e4bce21186e1c180d792f3d99223dcfa3b4f597ecc92fa1a422"}, + {file = "scikit_learn-1.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7a73d457070e3318e32bdb3aa79a8d990474f19035464dfd8bede2883ab5dc3b"}, + {file = "scikit_learn-1.6.1.tar.gz", hash = "sha256:b4fc2525eca2c69a59260f583c56a7557c6ccdf8deafdba6e060f94c1c59738e"}, ] [package.dependencies] @@ -2756,11 +2743,11 @@ threadpoolctl = ">=3.1.0" [package.extras] benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.17.1)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)", "towncrier (>=24.8.0)"] examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] maintenance = ["conda-lock (==2.5.6)"] -tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.5.1)", "scikit-image (>=0.17.2)"] [[package]] name = "scipy" @@ -2827,56 +2814,54 @@ stats = ["scipy (>=1.3)", "statsmodels (>=0.10)"] [[package]] name = "setuptools" -version = "75.6.0" +version = "75.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "setuptools-75.6.0-py3-none-any.whl", hash = "sha256:ce74b49e8f7110f9bf04883b730f4765b774ef3ef28f722cce7c273d253aaf7d"}, - {file = "setuptools-75.6.0.tar.gz", hash = "sha256:8199222558df7c86216af4f84c30e9b34a61d8ba19366cc914424cdbd28252f6"}, + {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"}, + {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.7.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12,<1.14)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] name = "skops" -version = "0.10.0" -description = "A set of tools to push scikit-learn based models to and pull from Hugging Face Hub" +version = "0.11.0" +description = "A set of tools, related to machine learning in production." optional = false python-versions = ">=3.9" files = [ - {file = "skops-0.10.0-py3-none-any.whl", hash = "sha256:2a8a8efe7dca350f920cb0d18a2b79168520ebf28ac7dda78c51c31878583622"}, - {file = "skops-0.10.0.tar.gz", hash = "sha256:95645999976e296a55af5d1c96a4ae5e683dee0c6af711634cd5286a224053b7"}, + {file = "skops-0.11.0-py3-none-any.whl", hash = "sha256:8c6109e27e4d762948cad7d21de008034bd14e15f111e9405c7930e74a7fe8c1"}, + {file = "skops-0.11.0.tar.gz", hash = "sha256:229c867fbc5e669a1c6a88661c3883a14f3591abd9bfa6073df308d63ae1fa3a"}, ] [package.dependencies] huggingface-hub = ">=0.17.0" packaging = ">=17.0" -scikit-learn = ">=0.24" +scikit-learn = ">=1.1" tabulate = ">=0.8.8" [package.extras] -docs = ["fairlearn (>=0.7.0)", "matplotlib (>=3.3)", "numpydoc (>=1.0.0)", "pandas (>=1)", "sphinx (>=3.2.0)", "sphinx-gallery (>=0.7.0)", "sphinx-issues (>=1.2.0)", "sphinx-prompt (>=1.3.0)", "sphinx-rtd-theme (>=1)"] rich = ["rich (>=12)"] -tests = ["catboost (>=1.0)", "fairlearn (>=0.7.0)", "flake8 (>=3.8.2)", "flaky (>=3.7.0)", "lightgbm (>=3)", "matplotlib (>=3.3)", "pandas (>=1)", "pytest (>=7)", "pytest-cov (>=2.9.0)", "quantile-forest (>=1.0.0)", "rich (>=12)", "types-requests (>=2.28.5)", "xgboost (>=1.6)"] [[package]] name = "snowballstemmer" @@ -3098,72 +3083,72 @@ test = ["pytest"] [[package]] name = "sqlalchemy" -version = "2.0.36" +version = "2.0.38" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, - {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, - {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5e1d9e429028ce04f187a9f522818386c8b076723cdbe9345708384f49ebcec6"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b87a90f14c68c925817423b0424381f0e16d80fc9a1a1046ef202ab25b19a444"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:402c2316d95ed90d3d3c25ad0390afa52f4d2c56b348f212aa9c8d072a40eee5"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6493bc0eacdbb2c0f0d260d8988e943fee06089cd239bd7f3d0c45d1657a70e2"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0561832b04c6071bac3aad45b0d3bb6d2c4f46a8409f0a7a9c9fa6673b41bc03"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:49aa2cdd1e88adb1617c672a09bf4ebf2f05c9448c6dbeba096a3aeeb9d4d443"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-win32.whl", hash = "sha256:64aa8934200e222f72fcfd82ee71c0130a9c07d5725af6fe6e919017d095b297"}, + {file = "SQLAlchemy-2.0.38-cp310-cp310-win_amd64.whl", hash = "sha256:c57b8e0841f3fce7b703530ed70c7c36269c6d180ea2e02e36b34cb7288c50c7"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf89e0e4a30714b357f5d46b6f20e0099d38b30d45fa68ea48589faf5f12f62d"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8455aa60da49cb112df62b4721bd8ad3654a3a02b9452c783e651637a1f21fa2"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f53c0d6a859b2db58332e0e6a921582a02c1677cc93d4cbb36fdf49709b327b2"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3c4817dff8cef5697f5afe5fec6bc1783994d55a68391be24cb7d80d2dbc3a6"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9cea5b756173bb86e2235f2f871b406a9b9d722417ae31e5391ccaef5348f2c"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:40e9cdbd18c1f84631312b64993f7d755d85a3930252f6276a77432a2b25a2f3"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-win32.whl", hash = "sha256:cb39ed598aaf102251483f3e4675c5dd6b289c8142210ef76ba24aae0a8f8aba"}, + {file = "SQLAlchemy-2.0.38-cp311-cp311-win_amd64.whl", hash = "sha256:f9d57f1b3061b3e21476b0ad5f0397b112b94ace21d1f439f2db472e568178ae"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12d5b06a1f3aeccf295a5843c86835033797fea292c60e72b07bcb5d820e6dd3"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e036549ad14f2b414c725349cce0772ea34a7ab008e9cd67f9084e4f371d1f32"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee3bee874cb1fadee2ff2b79fc9fc808aa638670f28b2145074538d4a6a5028e"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e185ea07a99ce8b8edfc788c586c538c4b1351007e614ceb708fd01b095ef33e"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b79ee64d01d05a5476d5cceb3c27b5535e6bb84ee0f872ba60d9a8cd4d0e6579"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:afd776cf1ebfc7f9aa42a09cf19feadb40a26366802d86c1fba080d8e5e74bdd"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-win32.whl", hash = "sha256:a5645cd45f56895cfe3ca3459aed9ff2d3f9aaa29ff7edf557fa7a23515a3725"}, + {file = "SQLAlchemy-2.0.38-cp312-cp312-win_amd64.whl", hash = "sha256:1052723e6cd95312f6a6eff9a279fd41bbae67633415373fdac3c430eca3425d"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ecef029b69843b82048c5b347d8e6049356aa24ed644006c9a9d7098c3bd3bfd"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c8bcad7fc12f0cc5896d8e10fdf703c45bd487294a986903fe032c72201596b"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a0ef3f98175d77180ffdc623d38e9f1736e8d86b6ba70bff182a7e68bed7727"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b0ac78898c50e2574e9f938d2e5caa8fe187d7a5b69b65faa1ea4648925b096"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9eb4fa13c8c7a2404b6a8e3772c17a55b1ba18bc711e25e4d6c0c9f5f541b02a"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5dba1cdb8f319084f5b00d41207b2079822aa8d6a4667c0f369fce85e34b0c86"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-win32.whl", hash = "sha256:eae27ad7580529a427cfdd52c87abb2dfb15ce2b7a3e0fc29fbb63e2ed6f8120"}, + {file = "SQLAlchemy-2.0.38-cp313-cp313-win_amd64.whl", hash = "sha256:b335a7c958bc945e10c522c069cd6e5804f4ff20f9a744dd38e748eb602cbbda"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:40310db77a55512a18827488e592965d3dec6a3f1e3d8af3f8243134029daca3"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d3043375dd5bbcb2282894cbb12e6c559654c67b5fffb462fda815a55bf93f7"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70065dfabf023b155a9c2a18f573e47e6ca709b9e8619b2e04c54d5bcf193178"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:c058b84c3b24812c859300f3b5abf300daa34df20d4d4f42e9652a4d1c48c8a4"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0398361acebb42975deb747a824b5188817d32b5c8f8aba767d51ad0cc7bb08d"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-win32.whl", hash = "sha256:a2bc4e49e8329f3283d99840c136ff2cd1a29e49b5624a46a290f04dff48e079"}, + {file = "SQLAlchemy-2.0.38-cp37-cp37m-win_amd64.whl", hash = "sha256:9cd136184dd5f58892f24001cdce986f5d7e96059d004118d5410671579834a4"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:665255e7aae5f38237b3a6eae49d2358d83a59f39ac21036413fab5d1e810578"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:92f99f2623ff16bd4aaf786ccde759c1f676d39c7bf2855eb0b540e1ac4530c8"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa498d1392216fae47eaf10c593e06c34476ced9549657fca713d0d1ba5f7248"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9afbc3909d0274d6ac8ec891e30210563b2c8bdd52ebbda14146354e7a69373"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:57dd41ba32430cbcc812041d4de8d2ca4651aeefad2626921ae2a23deb8cd6ff"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3e35d5565b35b66905b79ca4ae85840a8d40d31e0b3e2990f2e7692071b179ca"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-win32.whl", hash = "sha256:f0d3de936b192980209d7b5149e3c98977c3810d401482d05fb6d668d53c1c63"}, + {file = "SQLAlchemy-2.0.38-cp38-cp38-win_amd64.whl", hash = "sha256:3868acb639c136d98107c9096303d2d8e5da2880f7706f9f8c06a7f961961149"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07258341402a718f166618470cde0c34e4cec85a39767dce4e24f61ba5e667ea"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a826f21848632add58bef4f755a33d45105d25656a0c849f2dc2df1c71f6f50"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:386b7d136919bb66ced64d2228b92d66140de5fefb3c7df6bd79069a269a7b06"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f2951dc4b4f990a4b394d6b382accb33141d4d3bd3ef4e2b27287135d6bdd68"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bf312ed8ac096d674c6aa9131b249093c1b37c35db6a967daa4c84746bc1bc9"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6db316d6e340f862ec059dc12e395d71f39746a20503b124edc255973977b728"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-win32.whl", hash = "sha256:c09a6ea87658695e527104cf857c70f79f14e9484605e205217aae0ec27b45fc"}, + {file = "SQLAlchemy-2.0.38-cp39-cp39-win_amd64.whl", hash = "sha256:12f5c9ed53334c3ce719155424dc5407aaa4f6cadeb09c5b627e06abb93933a1"}, + {file = "SQLAlchemy-2.0.38-py3-none-any.whl", hash = "sha256:63178c675d4c80def39f1febd625a6333f44c0ba269edd8a468b156394b27753"}, + {file = "sqlalchemy-2.0.38.tar.gz", hash = "sha256:e5a4d82bdb4bf1ac1285a68eab02d253ab73355d9f0fe725a97e1e0fa689decb"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} typing-extensions = ">=4.6.0" [package.extras] @@ -3368,24 +3353,24 @@ files = [ [[package]] name = "tzdata" -version = "2024.2" +version = "2025.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, - {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, + {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, + {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, ] [[package]] name = "urllib3" -version = "2.2.3" +version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, ] [package.extras] @@ -3452,76 +3437,90 @@ test = ["pytest"] [[package]] name = "wrapt" -version = "1.17.0" +version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" files = [ - {file = "wrapt-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a0c23b8319848426f305f9cb0c98a6e32ee68a36264f45948ccf8e7d2b941f8"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ca5f060e205f72bec57faae5bd817a1560fcfc4af03f414b08fa29106b7e2d"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e185ec6060e301a7e5f8461c86fb3640a7beb1a0f0208ffde7a65ec4074931df"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb90765dd91aed05b53cd7a87bd7f5c188fcd95960914bae0d32c5e7f899719d"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:879591c2b5ab0a7184258274c42a126b74a2c3d5a329df16d69f9cee07bba6ea"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fce6fee67c318fdfb7f285c29a82d84782ae2579c0e1b385b7f36c6e8074fffb"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0698d3a86f68abc894d537887b9bbf84d29bcfbc759e23f4644be27acf6da301"}, - {file = "wrapt-1.17.0-cp310-cp310-win32.whl", hash = "sha256:69d093792dc34a9c4c8a70e4973a3361c7a7578e9cd86961b2bbf38ca71e4e22"}, - {file = "wrapt-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:f28b29dc158ca5d6ac396c8e0a2ef45c4e97bb7e65522bfc04c989e6fe814575"}, - {file = "wrapt-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74bf625b1b4caaa7bad51d9003f8b07a468a704e0644a700e936c357c17dd45a"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f2a28eb35cf99d5f5bd12f5dd44a0f41d206db226535b37b0c60e9da162c3ed"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81b1289e99cf4bad07c23393ab447e5e96db0ab50974a280f7954b071d41b489"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2939cd4a2a52ca32bc0b359015718472d7f6de870760342e7ba295be9ebaf9"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a9653131bda68a1f029c52157fd81e11f07d485df55410401f745007bd6d339"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4e4b4385363de9052dac1a67bfb535c376f3d19c238b5f36bddc95efae15e12d"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bdf62d25234290db1837875d4dceb2151e4ea7f9fff2ed41c0fde23ed542eb5b"}, - {file = "wrapt-1.17.0-cp311-cp311-win32.whl", hash = "sha256:5d8fd17635b262448ab8f99230fe4dac991af1dabdbb92f7a70a6afac8a7e346"}, - {file = "wrapt-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:92a3d214d5e53cb1db8b015f30d544bc9d3f7179a05feb8f16df713cecc2620a"}, - {file = "wrapt-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:89fc28495896097622c3fc238915c79365dd0ede02f9a82ce436b13bd0ab7569"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875d240fdbdbe9e11f9831901fb8719da0bd4e6131f83aa9f69b96d18fae7504"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ed16d95fd142e9c72b6c10b06514ad30e846a0d0917ab406186541fe68b451"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b956061b8db634120b58f668592a772e87e2e78bc1f6a906cfcaa0cc7991c1"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:daba396199399ccabafbfc509037ac635a6bc18510ad1add8fd16d4739cdd106"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4d63f4d446e10ad19ed01188d6c1e1bb134cde8c18b0aa2acfd973d41fcc5ada"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8a5e7cc39a45fc430af1aefc4d77ee6bad72c5bcdb1322cfde852c15192b8bd4"}, - {file = "wrapt-1.17.0-cp312-cp312-win32.whl", hash = "sha256:0a0a1a1ec28b641f2a3a2c35cbe86c00051c04fffcfcc577ffcdd707df3f8635"}, - {file = "wrapt-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:3c34f6896a01b84bab196f7119770fd8466c8ae3dfa73c59c0bb281e7b588ce7"}, - {file = "wrapt-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:714c12485aa52efbc0fc0ade1e9ab3a70343db82627f90f2ecbc898fdf0bb181"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da427d311782324a376cacb47c1a4adc43f99fd9d996ffc1b3e8529c4074d393"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba1739fb38441a27a676f4de4123d3e858e494fac05868b7a281c0a383c098f4"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e711fc1acc7468463bc084d1b68561e40d1eaa135d8c509a65dd534403d83d7b"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:140ea00c87fafc42739bd74a94a5a9003f8e72c27c47cd4f61d8e05e6dec8721"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73a96fd11d2b2e77d623a7f26e004cc31f131a365add1ce1ce9a19e55a1eef90"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0b48554952f0f387984da81ccfa73b62e52817a4386d070c75e4db7d43a28c4a"}, - {file = "wrapt-1.17.0-cp313-cp313-win32.whl", hash = "sha256:498fec8da10e3e62edd1e7368f4b24aa362ac0ad931e678332d1b209aec93045"}, - {file = "wrapt-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd136bb85f4568fffca995bd3c8d52080b1e5b225dbf1c2b17b66b4c5fa02838"}, - {file = "wrapt-1.17.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:17fcf043d0b4724858f25b8826c36e08f9fb2e475410bece0ec44a22d533da9b"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a557d97f12813dc5e18dad9fa765ae44ddd56a672bb5de4825527c847d6379"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0229b247b0fc7dee0d36176cbb79dbaf2a9eb7ecc50ec3121f40ef443155fb1d"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8425cfce27b8b20c9b89d77fb50e368d8306a90bf2b6eef2cdf5cd5083adf83f"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9c900108df470060174108012de06d45f514aa4ec21a191e7ab42988ff42a86c"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4e547b447073fc0dbfcbff15154c1be8823d10dab4ad401bdb1575e3fdedff1b"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:914f66f3b6fc7b915d46c1cc424bc2441841083de01b90f9e81109c9759e43ab"}, - {file = "wrapt-1.17.0-cp313-cp313t-win32.whl", hash = "sha256:a4192b45dff127c7d69b3bdfb4d3e47b64179a0b9900b6351859f3001397dabf"}, - {file = "wrapt-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:4f643df3d4419ea3f856c5c3f40fec1d65ea2e89ec812c83f7767c8730f9827a"}, - {file = "wrapt-1.17.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:69c40d4655e078ede067a7095544bcec5a963566e17503e75a3a3e0fe2803b13"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f495b6754358979379f84534f8dd7a43ff8cff2558dcdea4a148a6e713a758f"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:baa7ef4e0886a6f482e00d1d5bcd37c201b383f1d314643dfb0367169f94f04c"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fc931382e56627ec4acb01e09ce66e5c03c384ca52606111cee50d931a342d"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8f8909cdb9f1b237786c09a810e24ee5e15ef17019f7cecb207ce205b9b5fcce"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ad47b095f0bdc5585bced35bd088cbfe4177236c7df9984b3cc46b391cc60627"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:948a9bd0fb2c5120457b07e59c8d7210cbc8703243225dbd78f4dfc13c8d2d1f"}, - {file = "wrapt-1.17.0-cp38-cp38-win32.whl", hash = "sha256:5ae271862b2142f4bc687bdbfcc942e2473a89999a54231aa1c2c676e28f29ea"}, - {file = "wrapt-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:f335579a1b485c834849e9075191c9898e0731af45705c2ebf70e0cd5d58beed"}, - {file = "wrapt-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d751300b94e35b6016d4b1e7d0e7bbc3b5e1751e2405ef908316c2a9024008a1"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7264cbb4a18dc4acfd73b63e4bcfec9c9802614572025bdd44d0721983fc1d9c"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33539c6f5b96cf0b1105a0ff4cf5db9332e773bb521cc804a90e58dc49b10578"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30970bdee1cad6a8da2044febd824ef6dc4cc0b19e39af3085c763fdec7de33"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bc7f729a72b16ee21795a943f85c6244971724819819a41ddbaeb691b2dd85ad"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6ff02a91c4fc9b6a94e1c9c20f62ea06a7e375f42fe57587f004d1078ac86ca9"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dfb7cff84e72e7bf975b06b4989477873dcf160b2fd89959c629535df53d4e0"}, - {file = "wrapt-1.17.0-cp39-cp39-win32.whl", hash = "sha256:2399408ac33ffd5b200480ee858baa58d77dd30e0dd0cab6a8a9547135f30a88"}, - {file = "wrapt-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f763a29ee6a20c529496a20a7bcb16a73de27f5da6a843249c7047daf135977"}, - {file = "wrapt-1.17.0-py3-none-any.whl", hash = "sha256:d2c63b93548eda58abf5188e505ffed0229bf675f7c3090f8e36ad55b8cbc371"}, - {file = "wrapt-1.17.0.tar.gz", hash = "sha256:16187aa2317c731170a88ef35e8937ae0f533c402872c1ee5e6d079fcf320801"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, ] [[package]] @@ -3549,5 +3548,5 @@ viz = ["pydot"] [metadata] lock-version = "2.0" -python-versions = ">=3.9,<3.12" -content-hash = "3f6e1ecfcf93e08b5989930a4a5c2e68dfa8a22160a7783812b2524f9f8ed1ac" +python-versions = ">=3.9,<3.13" +content-hash = "0284c499500dc2ac5da604e1683e289333c7db2a50f0312da2120187707ce800" diff --git a/pyproject.toml b/pyproject.toml index 518bb66b0..092388048 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,9 +16,9 @@ classifiers = [ [tool.poetry.dependencies] -python = ">=3.9,<3.12" -qibolab = "^0.1.8" -qibo = "^0.2.12" +python = ">=3.9,<3.13" +qibolab = "^0.2.3" +qibo = "^0.2.13" numpy = "^1.26.4" scipy = "^1.10.1" pandas = { version = "^2.2.2", extras = ["html"] } @@ -27,7 +27,7 @@ click = "^8.1.3" jinja2 = "^3.1.2" plotly = "^5.22.0" dash = "^2.6.0" -skops = "^0.10.0" +skops = "^0.11.0" matplotlib = { version = "^3.7.0", optional = true } seaborn = { version = "^0.12.2", optional = true } pydot = { version = "^1.4.2", optional = true } @@ -84,6 +84,7 @@ docs-clean = "make -C doc clean" test-docs = "make -C doc doctest" [tool.pytest.ini_options] +env = ["QIBO_PLATFORM = dummy"] testpaths = ['tests/'] addopts = ['--cov=qibocal', '--cov-report=xml', '--cov-report=html'] diff --git a/src/qibocal/auto/execute.py b/src/qibocal/auto/execute.py index a7fa2a9b9..1b61e0e27 100644 --- a/src/qibocal/auto/execute.py +++ b/src/qibocal/auto/execute.py @@ -11,12 +11,11 @@ from typing import Optional, Union from qibo.backends import construct_backend -from qibolab import create_platform -from qibolab.platform import Platform from qibocal import protocols from qibocal.config import log +from ..calibration import CalibrationPlatform, create_calibration_platform from .history import History from .mode import AUTOCALIBRATION, ExecutionMode from .operation import Routine @@ -72,7 +71,7 @@ class Executor: """The execution history, with results and exit states.""" targets: Targets """Qubits/Qubit Pairs to be calibrated.""" - platform: Platform + platform: CalibrationPlatform """Qubits' platform.""" update: bool = True """Runcard update mechanism.""" @@ -99,12 +98,14 @@ def __post_init__(self): _register(self.name, self) @classmethod - def create(cls, name: str, platform: Union[Platform, str, None] = None): + def create(cls, name: str, platform: Union[CalibrationPlatform, str, None] = None): """Load list of protocols.""" platform = ( platform - if isinstance(platform, Platform) - else create_platform(platform if platform is not None else "dummy") + if isinstance(platform, CalibrationPlatform) + else create_calibration_platform( + platform if platform is not None else "dummy" + ) ) return cls( name=name, @@ -248,17 +249,21 @@ def init( self, path: os.PathLike, force: bool = False, - platform: Union[Platform, str, None] = None, + platform: Union[CalibrationPlatform, str, None] = None, update: Optional[bool] = None, targets: Optional[Targets] = None, ): """Initialize execution.""" - if platform is None: + if platform is None or isinstance(platform, CalibrationPlatform): platform = self.platform + elif isinstance(platform, str): + platform = self.platform = create_calibration_platform(platform) + else: + platform = self.platform = CalibrationPlatform.from_platform(platform) + + assert isinstance(platform, CalibrationPlatform) - backend = construct_backend(backend="qibolab", platform=platform) - platform = self.platform = backend.platform - assert isinstance(platform, Platform) + backend = construct_backend(backend="qibolab", platform=platform.name) if update is not None: self.update = update @@ -307,7 +312,7 @@ def open( name: str, path: os.PathLike, force: bool = False, - platform: Union[Platform, str, None] = None, + platform: Union[CalibrationPlatform, str, None] = None, update: Optional[bool] = None, targets: Optional[Targets] = None, ): diff --git a/src/qibocal/auto/history.py b/src/qibocal/auto/history.py index c5db3745d..e0496e836 100644 --- a/src/qibocal/auto/history.py +++ b/src/qibocal/auto/history.py @@ -87,7 +87,7 @@ def route(task_id: TaskId, folder: Path) -> Path: """Determine the path related to a completed task given TaskId. `folder` should be usually the general output folder, used by Qibocal to store - all the execution results. Cf. :cls:`qibocal.auto.output.Output`. + all the execution results. Cf. :class:`qibocal.auto.output.Output`. """ return folder / "data" / f"{task_id}" @@ -95,7 +95,7 @@ def flush(self, output: Optional[Path] = None): """Flush all content to disk. Specifying `output` is possible to select which folder should be considered as - the general Qibocal output folder. Cf. :cls:`qibocal.auto.output.Output`. + the general Qibocal output folder. Cf. :class:`qibocal.auto.output.Output`. """ for task_id, completed in self.items(): if output is not None: diff --git a/src/qibocal/auto/operation.py b/src/qibocal/auto/operation.py index 8b1f602eb..51aa98cec 100644 --- a/src/qibocal/auto/operation.py +++ b/src/qibocal/auto/operation.py @@ -9,12 +9,11 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import Qubit, QubitId, QubitPair, QubitPairId +from qibolab import AcquisitionType, AveragingMode, Platform, Qubit from qibocal.config import log +from ..calibration.calibration import QubitId, QubitPairId from .serialize import deserialize, load, serialize OperationId = NewType("OperationId", str) @@ -23,7 +22,6 @@ """Valid value for a routine and runcard parameter.""" Qubits = dict[QubitId, Qubit] """Convenient way of passing qubit pairs in the routines.""" -QubitsPairs = dict[tuple[QubitId, QubitId], QubitPair] DATAFILE = "data" @@ -112,7 +110,7 @@ def execution_parameters(self): if self.classify else AcquisitionType.INTEGRATION ) - return ExecutionParameters( + return dict( nshots=self.nshots, relaxation_time=self.relaxation_time, acquisition_type=acquisition_type, diff --git a/src/qibocal/auto/output.py b/src/qibocal/auto/output.py index 275c4cfd8..0f716b93d 100644 --- a/src/qibocal/auto/output.py +++ b/src/qibocal/auto/output.py @@ -7,9 +7,8 @@ from typing import Optional from qibo.backends import construct_backend -from qibolab import Platform -from qibolab.serialize import dump_platform +from ..calibration import CalibrationPlatform from ..config import log from ..version import __version__ from .history import History @@ -155,7 +154,7 @@ class Output: history: History meta: Metadata - platform: Optional[Platform] = None + platform: Optional[CalibrationPlatform] = None @classmethod def load(cls, path: Path): @@ -202,7 +201,7 @@ def dump(self, path: Path): self.update_platform(self.platform, path) @staticmethod - def update_platform(platform: Platform, path: Path): + def update_platform(platform: CalibrationPlatform, path: Path): """Dump platform used. If the original one is not defined, use the current one as the @@ -213,7 +212,7 @@ def update_platform(platform: Platform, path: Path): platpath = path / UPDATED_PLATFORM platpath.mkdir(parents=True, exist_ok=True) - dump_platform(platform, platpath) + platform.dump(platpath) def _export_stats(self): """Export task statistics. @@ -234,9 +233,10 @@ def process( force: bool = False, ): """Process existing output.""" - self.platform = construct_backend( + backend = construct_backend( backend=self.meta.backend, platform=self.meta.platform - ).platform + ) + self.platform = CalibrationPlatform.from_platform(backend.platform) assert self.platform is not None for task_id, completed in self.history.items(): diff --git a/src/qibocal/auto/runcard.py b/src/qibocal/auto/runcard.py index ec56c2aa2..3bb4def52 100644 --- a/src/qibocal/auto/runcard.py +++ b/src/qibocal/auto/runcard.py @@ -6,7 +6,7 @@ import yaml from pydantic.dataclasses import dataclass -from qibolab.platform import Platform +from qibolab import Platform from .. import protocols from .execute import Executor diff --git a/src/qibocal/auto/task.py b/src/qibocal/auto/task.py index c8a07d9fc..7f5f4d003 100644 --- a/src/qibocal/auto/task.py +++ b/src/qibocal/auto/task.py @@ -8,8 +8,9 @@ import yaml from qibo import Circuit -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId +from qibolab import Platform + +from qibocal.auto.operation import QubitId, QubitPairId from .. import protocols from ..config import log diff --git a/src/qibocal/auto/transpile.py b/src/qibocal/auto/transpile.py index 6a34f942b..5cba73abf 100644 --- a/src/qibocal/auto/transpile.py +++ b/src/qibocal/auto/transpile.py @@ -4,9 +4,11 @@ from qibo.backends import Backend from qibo.transpiler.pipeline import Passes from qibo.transpiler.unroller import NativeGates, Unroller -from qibolab.compilers.compiler import Compiler -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import PulseSequence +from qibolab._core.compilers import Compiler +from qibolab._core.native import NativeContainer + +from qibocal.auto.operation import QubitId REPLACEMENTS = { "RX": "GPI2", @@ -109,32 +111,35 @@ def execute_transpiled_circuit( ) -def natives(platform): +def natives(platform) -> dict[str, NativeContainer]: """ - Return the list of native gates defined in the `platform`. - This function assumes the native gates to be the same for each + Return the dict of native gates name with the associated native container + defined in the `platform`. This function assumes the native gates to be the same for each qubit and pair. """ - pair = next(iter(platform.pairs.values())) - qubit = next(iter(platform.qubits.values())) - two_qubit_natives = list(pair.native_gates.raw) - single_qubit_natives = list(qubit.native_gates.raw) + pair = next(iter(platform.pairs)) + qubit = next(iter(platform.qubits)) + two_qubit_natives_container = platform.natives.two_qubit[pair] + single_qubit_natives_container = platform.natives.single_qubit[qubit] + single_qubit_natives = list(single_qubit_natives_container.model_fields) + two_qubit_natives = list(two_qubit_natives_container.model_fields) # Solve Qibo-Qibolab mismatch single_qubit_natives.append("RZ") single_qubit_natives.append("Z") single_qubit_natives.remove("RX12") - new_single_natives = [REPLACEMENTS.get(i, i) for i in single_qubit_natives] - return new_single_natives + two_qubit_natives + single_qubit_natives.remove("RX90") + single_qubit_natives.remove("CP") + single_qubit_natives = [REPLACEMENTS.get(x, x) for x in single_qubit_natives] + return {i: platform.natives.single_qubit[qubit] for i in single_qubit_natives} | { + i: platform.natives.two_qubit[pair] for i in two_qubit_natives + } -def create_rule(native): - def rule(qubits_ids, platform, parameters=None): - if len(qubits_ids[1]) == 1: - native_gate = platform.qubits[tuple(qubits_ids[1])].native_gates - else: - native_gate = platform.pairs[tuple(qubits_ids[1])].native_gates - pulses = getattr(native_gate, native).pulses - return PulseSequence(pulses), {} +def create_rule(name, natives): + """Create rule for gate name given container natives.""" + + def rule(gate: gates.Gate, natives: NativeContainer) -> PulseSequence: + return natives.ensure(name).create_sequence() return rule @@ -145,10 +150,10 @@ def set_compiler(backend, natives_): """ compiler = backend.compiler rules = {} - for native in natives_: - gate = getattr(gates, native) + for name, natives_container in natives_.items(): + gate = getattr(gates, name) if gate not in compiler.rules: - rules[gate] = create_rule(native) + rules[gate] = create_rule(name, natives_container) else: rules[gate] = compiler.rules[gate] rules[gates.I] = compiler.rules[gates.I] @@ -167,7 +172,7 @@ def dummy_transpiler(backend: Backend) -> Passes: set_compiler(backend, native_gates) native_gates = [getattr(gates, x) for x in native_gates] unroller = Unroller(NativeGates.from_gatelist(native_gates)) - return Passes(connectivity=platform.topology, passes=[unroller]) + return Passes(connectivity=platform.pairs, passes=[unroller]) def pad_circuit(nqubits, circuit: Circuit, qubit_map: list[int]) -> Circuit: diff --git a/src/qibocal/calibration/__init__.py b/src/qibocal/calibration/__init__.py new file mode 100644 index 000000000..fc3d1d777 --- /dev/null +++ b/src/qibocal/calibration/__init__.py @@ -0,0 +1 @@ +from .platform import CalibrationPlatform, create_calibration_platform diff --git a/src/qibocal/calibration/calibration.py b/src/qibocal/calibration/calibration.py new file mode 100644 index 000000000..b2e463077 --- /dev/null +++ b/src/qibocal/calibration/calibration.py @@ -0,0 +1,185 @@ +from pathlib import Path +from typing import Annotated, Optional, Union + +import numpy as np +from pydantic import BaseModel, BeforeValidator, ConfigDict, Field, PlainSerializer + +from .serialize import NdArray, SparseArray + +QubitId = Annotated[Union[int, str], Field(union_mode="left_to_right")] +"""Qubit name.""" + +QubitPairId = Annotated[ + tuple[QubitId, QubitId], + BeforeValidator(lambda p: tuple(p.split("-")) if isinstance(p, str) else p), + PlainSerializer(lambda p: f"{p[0]}-{p[1]}"), +] +"""Qubit pair name.""" + +CALIBRATION = "calibration.json" +"""Calibration file.""" + +Measure = tuple[float, Optional[float]] +"""Measured is represented as two values: mean and error.""" + + +class Model(BaseModel): + """Global model, holding common configurations.""" + + model_config = ConfigDict(extra="forbid") + + +class Resonator(Model): + """Representation of resonator parameters.""" + + bare_frequency: Optional[float] = None + """Bare resonator frequency [Hz].""" + dressed_frequency: Optional[float] = None + """Dressed resonator frequency [Hz].""" + depletion_time: Optional[int] = None + """Depletion time [ns].""" + + @property + def dispersive_shift(self): + """Dispersive shift.""" + return self.bare_frequency - self.dressed_frequency + + # TODO: Add setter for dispersive shift as well + # TODO: Add something related to resonator calibration + + +class Qubit(Model): + """Representation of Qubit parameters""" + + frequency_01: Optional[float] = None + """"0->1 transition frequency [Hz].""" + frequency_12: Optional[float] = None + """1->2 transition frequency [Hz].""" + maximum_frequency: Optional[float] = None + """Maximum transition frequency [Hz].""" + asymmetry: Optional[float] = None + """Junctions asymmetry.""" + sweetspot: Optional[float] = None + """Qubit sweetspot [V].""" + + @property + def anharmonicity(self): + """Anharmonicity of the qubit [Hz].""" + if self.frequency_12 is None: + return 0 + return self.frequency_12 - self.frequency_01 + + @property + def charging_energy(self): + """Charging energy Ec [Hz].""" + return -self.anharmonicity + + @property + def josephson_energy(self): + """Josephson energy [Hz]. + + The following formula is the inversion of the maximum frequency + obtained from the flux dependence protoco. + + """ + return ( + (self.maximum_frequency + self.charging_energy) ** 2 + / 8 + / self.charging_energy + ) + + +class Readout(Model): + """Readout parameters.""" + + fidelity: Optional[float] = None + """Readout fidelity.""" + coupling: Optional[float] = None + """Readout coupling [Hz].""" + effective_temperature: Optional[float] = None + """Qubit effective temperature.""" + ground_state: list[float] = Field(default_factory=list) + """Ground state position in IQ plane.""" + excited_state: list[float] = Field(default_factory=list) + """Excited state position in IQ plane.""" + qudits_frequency: dict[int, float] = Field(default_factory=dict) + """Dictionary mapping state with readout frequency.""" + + @property + def assignment_fidelity(self): + """Assignment fidelity.""" + return (1 + self.fidelity) / 2 + + +class QubitCalibration(Model): + """Container for calibration of single qubit.""" + + resonator: Resonator = Field(default_factory=Resonator) + """Resonator calibration.""" + qubit: Qubit = Field(default_factory=Qubit) + """Qubit calibration.""" + readout: Readout = Field(default_factory=Readout) + """Readout information.""" + t1: Optional[Measure] = None + """Relaxation time [ns].""" + t2: Optional[Measure] = None + """T2 of the qubit [ns].""" + t2_spin_echo: Optional[Measure] = None + """T2 hanh echo [ns].""" + rb_fidelity: Optional[Measure] = None + """Standard rb pulse fidelity.""" + + +class TwoQubitCalibration(Model): + """Container for calibration of qubit pair.""" + + rb_fidelity: Optional[Measure] = None + """Two qubit standard rb fidelity.""" + cz_fidelity: Optional[Measure] = None + """CZ interleaved rb fidelity.""" + coupling: Optional[float] = None + """Qubit-qubit coupling.""" + + +class Calibration(Model): + """Calibration container.""" + + single_qubits: dict[QubitId, QubitCalibration] = Field(default_factory=dict) + """Dict with single qubit calibration.""" + two_qubits: dict[QubitPairId, TwoQubitCalibration] = Field(default_factory=dict) + """Dict with qubit pairs calibration.""" + readout_mitigation_matrix: Optional[SparseArray] = None + """Readout mitigation matrix.""" + flux_crosstalk_matrix: Optional[NdArray] = None + """Crosstalk flux matrix.""" + + def dump(self, path: Path): + """Dump calibration model.""" + (path / CALIBRATION).write_text(self.model_dump_json(indent=4)) + + @property + def qubits(self) -> list: + """List of qubits available in the model.""" + return list(self.single_qubits) + + @property + def nqubits(self) -> int: + """Number of qubits available.""" + return len(self.qubits) + + def qubit_index(self, qubit: QubitId): + """Return qubit index from platform qubits.""" + return self.qubits.index(qubit) + + # TODO: add crosstalk object where I can do this + def get_crosstalk_element(self, qubit1: QubitId, qubit2: QubitId): + if self.flux_crosstalk_matrix is None: + self.flux_crosstalk_matrix = np.zeros((self.nqubits, self.nqubits)) + a, b = self.qubit_index(qubit1), self.qubit_index(qubit2) + return self.flux_crosstalk_matrix[a, b] # pylint: disable=E1136 + + def set_crosstalk_element(self, qubit1: QubitId, qubit2: QubitId, value: float): + if self.flux_crosstalk_matrix is None: + self.flux_crosstalk_matrix = np.zeros((self.nqubits, self.nqubits)) + a, b = self.qubit_index(qubit1), self.qubit_index(qubit2) + self.flux_crosstalk_matrix[a, b] = value # pylint: disable=E1137 diff --git a/src/qibocal/calibration/dummy.json b/src/qibocal/calibration/dummy.json new file mode 100644 index 000000000..6aabbd896 --- /dev/null +++ b/src/qibocal/calibration/dummy.json @@ -0,0 +1,139 @@ +{ + "single_qubits": { + "0": { + "resonator": { + "bare_frequency": 0.0, + "dressed_frequency": 5200000000.0, + "depletion_time": 0 + }, + "qubit": { + "frequency_01": 4000000000.0, + "frequency_12": 4700000000.0, + "maximum_frequency": 4000000000.0, + "asymmetry": 0.0, + "sweetspot": 0.0 + }, + "readout": { + "fidelity": 0.0, + "ground_state": [ + 0.0, + 1.0 + ], + "excited_state": [ + 1.0, + 0.0 + ] + }, + "rb_fidelity": null + }, + "1": { + "resonator": { + "bare_frequency": 0.0, + "dressed_frequency": 4900000000.0, + "depletion_time": 0 + }, + "qubit": { + "frequency_01": 4200000000.0, + "frequency_12": 4855663000.0, + "maximum_frequency": 4000000000.0, + "asymmetry": 0.0, + "sweetspot": 0.0 + }, + "readout": { + "fidelity": 0.0, + "ground_state": [ + 0.25, + 0.0 + ], + "excited_state": [ + 0.0, + 0.25 + ] + }, + "rb_fidelity": null + }, + "2": { + "resonator": { + "bare_frequency": 0.0, + "dressed_frequency": 6100000000.0, + "depletion_time": 0 + }, + "qubit": { + "frequency_01": 4500000000.0, + "frequency_12": 2700000000.0, + "maximum_frequency": 4000000000.0, + "asymmetry": 0.0, + "sweetspot": 0.0 + }, + "readout": { + "fidelity": 0.0, + "ground_state": [ + 0.5, + 0.0 + ], + "excited_state": [ + 0.0, + 0.5 + ] + }, + "rb_fidelity": null + }, + "3": { + "resonator": { + "bare_frequency": 0.0, + "dressed_frequency": 5800000000.0, + "depletion_time": 0 + }, + "qubit": { + "frequency_01": 4150000000.0, + "frequency_12": 5855663000.0, + "maximum_frequency": 4000000000.0, + "asymmetry": 0.0, + "sweetspot": 0.0 + }, + "readout": { + "fidelity": 0.0, + "ground_state": [ + 0.75, + 0.0 + ], + "excited_state": [ + 0.0, + 0.75 + ] + }, + "rb_fidelity": null + }, + "4": { + "resonator": { + "bare_frequency": 0.0, + "dressed_frequency": 5500000000.0, + "depletion_time": 0 + }, + "qubit": { + "frequency_01": 4100000000.0, + "frequency_12": 5855663000.0, + "maximum_frequency": 4000000000.0, + "asymmetry": 0.0, + "sweetspot": 0.0 + }, + "readout": { + "fidelity": 0.0, + "ground_state": [ + 1.0, + 0.0 + ], + "excited_state": [ + 0.0, + 1.0 + ] + }, + "rb_fidelity": null + } + }, + "two_qubits": { + "1-2": { + "rb_fidelity": [0.99, 0.01] + } + } +} diff --git a/src/qibocal/calibration/platform.py b/src/qibocal/calibration/platform.py new file mode 100644 index 000000000..5a5e94e4f --- /dev/null +++ b/src/qibocal/calibration/platform.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from pathlib import Path + +from qibolab import Platform, create_platform, locate_platform + +from .calibration import CALIBRATION, Calibration + + +@dataclass +class CalibrationPlatform(Platform): + """Qibolab platform with calibration information.""" + + calibration: Calibration = None + """Calibration information.""" + + @classmethod + def from_platform(cls, platform: Platform): + name = platform.name + if name == "dummy": + calibration = Calibration.model_validate_json( + (Path(__file__).parent / "dummy.json").read_text() + ) + else: # pragma: no cover + path = locate_platform(name) + calibration = Calibration.model_validate_json( + (path / CALIBRATION).read_text() + ) + # TODO: this is loading twice a platform + return cls(**vars(platform), calibration=calibration) + + def dump(self, path: Path): + super().dump(path) + self.calibration.dump(path) + + +def create_calibration_platform(name: str) -> CalibrationPlatform: + platform = create_platform(name) + return CalibrationPlatform.from_platform(platform) diff --git a/src/qibocal/calibration/serialize.py b/src/qibocal/calibration/serialize.py new file mode 100644 index 000000000..083436c00 --- /dev/null +++ b/src/qibocal/calibration/serialize.py @@ -0,0 +1,63 @@ +import base64 +import io +from typing import Annotated, Optional, Union + +import numpy as np +import numpy.typing as npt +from pydantic import PlainSerializer, PlainValidator +from scipy.sparse import csr_matrix, lil_matrix + + +# TODO: add tests about this +def sparse_serialize(matrix: lil_matrix) -> str: + """Serialize a lil_matrix to a base64 string.""" + csr_matrix = matrix.tocsr() + buffer = io.BytesIO() + np.save(buffer, csr_matrix.shape) + np.save(buffer, csr_matrix.data) + np.save(buffer, csr_matrix.indices) + np.save(buffer, csr_matrix.indptr) + buffer.seek(0) + return base64.standard_b64encode(buffer.read()).decode() + + +def sparse_deserialize(data: str) -> Optional[lil_matrix]: + """Deserialize a base64 string back into a lil_matrix.""" + buffer = io.BytesIO(base64.standard_b64decode(data)) + shape = np.load(buffer, allow_pickle=True) + data_array = np.load(buffer, allow_pickle=True) + indices_array = np.load(buffer, allow_pickle=True) + indptr_array = np.load(buffer, allow_pickle=True) + csr = csr_matrix((data_array, indices_array, indptr_array), shape=shape) + return lil_matrix(csr) + + +SparseArray = Annotated[ + lil_matrix, + PlainValidator(sparse_deserialize), + PlainSerializer(sparse_serialize, return_type=str), +] + + +def ndarray_serialize(ar: npt.NDArray) -> str: + """Serialize array to string.""" + buffer = io.BytesIO() + np.save(buffer, ar) + buffer.seek(0) + return base64.standard_b64encode(buffer.read()).decode() + + +def ndarray_deserialize(x: Union[str, npt.NDArray]) -> npt.NDArray: + """Deserialize array.""" + buffer = io.BytesIO() + buffer.write(base64.standard_b64decode(x)) + buffer.seek(0) + return np.load(buffer) + + +NdArray = Annotated[ + npt.NDArray, + PlainValidator(ndarray_deserialize), + PlainSerializer(ndarray_serialize, return_type=str), +] +"""Pydantic-compatible array representation.""" diff --git a/src/qibocal/cli/acquisition.py b/src/qibocal/cli/acquisition.py index dd8aa9285..956f553c9 100644 --- a/src/qibocal/cli/acquisition.py +++ b/src/qibocal/cli/acquisition.py @@ -6,6 +6,7 @@ from ..auto.mode import ExecutionMode from ..auto.output import Metadata, Output from ..auto.runcard import Runcard +from ..calibration import CalibrationPlatform def acquire(runcard: Runcard, folder: Path, force: bool): @@ -17,7 +18,7 @@ def acquire(runcard: Runcard, folder: Path, force: bool): """ # rename for brevity backend = construct_backend(backend=runcard.backend, platform=runcard.platform) - platform = backend.platform + platform = CalibrationPlatform.from_platform(backend.platform) if platform is None: raise ValueError("Qibocal requires a Qibolab platform to run.") diff --git a/src/qibocal/cli/report.py b/src/qibocal/cli/report.py index b6c4e6e20..edbfce5ba 100644 --- a/src/qibocal/cli/report.py +++ b/src/qibocal/cli/report.py @@ -4,13 +4,20 @@ import plotly.graph_objects as go from jinja2 import Environment, FileSystemLoader -from qibolab.qubits import QubitId, QubitPairId from qibocal.auto.history import History +from qibocal.auto.operation import QubitId, QubitPairId from qibocal.auto.output import Output from qibocal.auto.task import Completed from qibocal.config import log -from qibocal.web.report import STYLES, TEMPLATES, Report, report_css_styles +from qibocal.web.report import ( + SCRIPT, + STYLES, + TEMPLATES, + Report, + report_css_styles, + report_script, +) ReportOutcome = tuple[str, list[go.Figure]] """Report produced by protocol.""" @@ -24,6 +31,10 @@ def generate_figures_and_report( It operates on a completed `node` and a specific protocol `target`, generating a report outcome (cf. `ReportOutcome`). """ + # TODO: remove temporary fix + if isinstance(target, list): + target = tuple(target) + if node.results is None: # plot acquisition data return node.task.operation.report(data=node.data, fit=None, target=target) @@ -75,6 +86,7 @@ def report(path: pathlib.Path, history: Optional[History] = None): html = template.render( is_static=True, css_styles=report_css_styles(STYLES), + js_script=report_script(SCRIPT), path=path, title=path.name, report=Report( diff --git a/src/qibocal/cli/run.py b/src/qibocal/cli/run.py index 5edc9e46f..ae7fbf658 100644 --- a/src/qibocal/cli/run.py +++ b/src/qibocal/cli/run.py @@ -6,6 +6,7 @@ from ..auto.mode import AUTOCALIBRATION from ..auto.output import Metadata, Output from ..auto.runcard import Runcard +from ..calibration import CalibrationPlatform from .report import report @@ -18,7 +19,8 @@ def protocols_execution(runcard: Runcard, folder: Path, force, update): """ # rename for brevity backend = construct_backend(backend=runcard.backend, platform=runcard.platform) - platform = backend.platform + platform = CalibrationPlatform.from_platform(backend.platform) + if platform is None: raise ValueError("Qibocal requires a Qibolab platform to run.") diff --git a/src/qibocal/cli/update.py b/src/qibocal/cli/update.py index 9d57929fe..f4ccd3124 100644 --- a/src/qibocal/cli/update.py +++ b/src/qibocal/cli/update.py @@ -3,6 +3,8 @@ import pathlib import shutil +from qibolab import locate_platform + from ..auto.output import META, UPDATED_PLATFORM from ..config import log, raise_error @@ -18,7 +20,7 @@ def update(path: pathlib.Path): raise_error(FileNotFoundError, f"No updated runcard platform found in {path}.") platform_name = json.loads((path / META).read_text())["platform"] - platform_path = pathlib.Path(os.getenv("QIBOLAB_PLATFORMS")) / platform_name + platform_path = locate_platform(platform_name) for filename in os.listdir(new_platform_path): shutil.copy( diff --git a/src/qibocal/fitting/classifier/run.py b/src/qibocal/fitting/classifier/run.py index cf962085f..daefd2ccc 100644 --- a/src/qibocal/fitting/classifier/run.py +++ b/src/qibocal/fitting/classifier/run.py @@ -8,9 +8,10 @@ import numpy as np import pandas as pd -from qibolab.qubits import QubitId from sklearn.metrics import accuracy_score +from qibocal.auto.operation import QubitId + from . import data CLS_MODULES = [ diff --git a/src/qibocal/protocols/__init__.py b/src/qibocal/protocols/__init__.py index 7d420e9f3..77284fec9 100644 --- a/src/qibocal/protocols/__init__.py +++ b/src/qibocal/protocols/__init__.py @@ -1,31 +1,23 @@ from enum import Enum from .allxy.allxy import allxy +from .allxy.allxy_resonator_depletion_tuning import allxy_resonator_depletion_tuning from .classification import single_shot_classification +from .coherence.cpmg import cpmg from .coherence.spin_echo import spin_echo from .coherence.spin_echo_signal import spin_echo_signal from .coherence.t1 import t1 -from .coherence.t1_sequences import t1_sequences from .coherence.t1_signal import t1_signal from .coherence.t2 import t2 -from .coherence.t2_sequences import t2_sequences from .coherence.t2_signal import t2_signal from .coherence.zeno import zeno -from .coherence.zeno_signal import zeno_signal -from .couplers.coupler_chevron import coupler_chevron -from .couplers.coupler_qubit_spectroscopy import coupler_qubit_spectroscopy -from .couplers.coupler_resonator_spectroscopy import coupler_resonator_spectroscopy from .dispersive_shift import dispersive_shift from .dispersive_shift_qutrit import dispersive_shift_qutrit from .drag import drag_tuning -from .fast_reset.fast_reset import fast_reset +from .drag_simple import drag_simple from .flipping import flipping -from .flipping_signal import flipping_signal -from .flux_dependence.avoided_crossing import avoided_crossing from .flux_dependence.qubit_crosstalk import qubit_crosstalk from .flux_dependence.qubit_flux_dependence import qubit_flux -from .flux_dependence.qubit_flux_tracking import qubit_flux_tracking -from .flux_dependence.resonator_crosstalk import resonator_crosstalk from .flux_dependence.resonator_flux_dependence import resonator_flux from .qubit_spectroscopy import qubit_spectroscopy from .qubit_spectroscopy_ef import qubit_spectroscopy_ef @@ -35,7 +27,8 @@ from .rabi.amplitude_frequency_signal import rabi_amplitude_frequency_signal from .rabi.ef import rabi_amplitude_ef from .rabi.length import rabi_length -from .rabi.length_sequences import rabi_length_sequences +from .rabi.length_frequency import rabi_length_frequency +from .rabi.length_frequency_signal import rabi_length_frequency_signal from .rabi.length_signal import rabi_length_signal from .ramsey.ramsey import ramsey from .ramsey.ramsey_signal import ramsey_signal @@ -46,14 +39,7 @@ from .readout_characterization import readout_characterization from .readout_mitigation_matrix import readout_mitigation_matrix from .readout_optimization.resonator_amplitude import resonator_amplitude -from .readout_optimization.resonator_frequency import resonator_frequency -from .readout_optimization.twpa_calibration.frequency import twpa_frequency -from .readout_optimization.twpa_calibration.frequency_power import twpa_frequency_power -from .readout_optimization.twpa_calibration.frequency_SNR import twpa_frequency_snr -from .readout_optimization.twpa_calibration.power import twpa_power -from .readout_optimization.twpa_calibration.power_SNR import twpa_power_snr from .resonator_punchout import resonator_punchout -from .resonator_punchout_attenuation import resonator_punchout_attenuation from .resonator_spectroscopy import resonator_spectroscopy from .signal_experiments.calibrate_state_discrimination import ( calibrate_state_discrimination, @@ -62,11 +48,7 @@ from .two_qubit_interaction import ( chevron, chevron_signal, - chsh_circuits, - chsh_pulses, correct_virtual_z_phases, - correct_virtual_z_phases_signal, - mermin, optimize_two_qubit_gate, ) from .two_qubit_interaction import ( @@ -84,32 +66,20 @@ __all__ = [ "allxy", - "allxy_drag_pulse_tuning", "single_shot_classification", "spin_echo", "spin_echo_signal", "t1", - "t1_sequences", "t1_signal", "t2", - "t2_sequences", "t2_signal", "zeno", - "zeno_signal", - "coupler_chevron", - "coupler_qubit_spectroscopy", - "coupler_resonator_spectroscopy", "dispersive_shift", "dispersive_shift_qutrit", "drag_tuning", - "fast_reset", "flipping", - "flipping_signal", - "avoided_crossing", "qubit_crosstalk", "qubit_flux", - "qubit_flux_tracking", - "resonator_crosstalk", "resonator_flux", "qubit_spectroscopy", "qubit_spectroscopy_ef", @@ -118,7 +88,6 @@ "rabi_amplitude_signal", "rabi_length", "rabi_amplitude_ef", - "rabi_length_sequences", "rabi_length_signal", "ramsey", "ramsey_signal", @@ -127,23 +96,13 @@ "readout_characterization", "readout_mitigation_matrix", "resonator_amplitude", - "resonator_frequency", - "twpa_frequency", - "twpa_frequency_power", - "twpa_frequency_snr", - "twpa_power", - "twpa_power_snr", "resonator_punchout", - "resonator_punchout_attenuation", "resonator_spectroscopy", "calibrate_state_discrimination", "time_of_flight_readout", "chevron", "chevron_signal", - "chsh_circuits", - "chsh_pulses", "correct_virtual_z_phases", - "correct_virtual_z_phases_signal", "state_tomography", "allxy_resonator_depletion_tuning", "two_qubit_state_tomography", @@ -155,7 +114,6 @@ "standard_rb_2q", "standard_rb_2q_inter", "optimize_two_qubit_gate", - "mermin", "ramsey_zz", "cross_resonance_length", "cross_resonance_length_sequences", @@ -165,4 +123,6 @@ "cross_resonance_chevron_amplitude_frequency", "cross_resonance_chevron_coupler", "cross_resonance_cnot", + "cpmg", + "drag_simple", ] diff --git a/src/qibocal/protocols/allxy/allxy.py b/src/qibocal/protocols/allxy/allxy.py index 3c52b4f72..379b494fd 100644 --- a/src/qibocal/protocols/allxy/allxy.py +++ b/src/qibocal/protocols/allxy/allxy.py @@ -1,14 +1,22 @@ from dataclasses import dataclass, field +from typing import Optional import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Drag, + Gaussian, + Pulse, + PulseSequence, +) -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.update import replace @dataclass @@ -67,53 +75,50 @@ class AllXYData(Data): def _acquisition( params: AllXYParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> AllXYData: r""" Data acquisition for allXY experiment. - The AllXY experiment is a simple test of the calibration of single qubit gatesThe qubit (initialized in the |0> state) + The AllXY experiment is a simple test of the calibration of single qubit gatesThe qubit (initialized in the 0 state) is subjected to two back-to-back single-qubit gates and measured. In each round, we run 21 different gate pairs: - ideally, the first 5 return the qubit to |0>, the next 12 drive it to superposition state, and the last 4 put the - qubit in |1> state. + ideally, the first 5 return the qubit to 0, the next 12 drive it to superposition state, and the last 4 put the + qubit in 1 state. """ # create a Data object to store the results data = AllXYData(beta_param=params.beta_param) - # repeat the experiment as many times as defined by software_averages - # for iteration in range(params.software_averages): sequences, all_ro_pulses = [], [] for gates in gatelist: - sequences.append(PulseSequence()) - all_ro_pulses.append({}) + sequence = PulseSequence() + ro_pulses = {} for qubit in targets: - sequences[-1], all_ro_pulses[-1][qubit] = add_gate_pair_pulses_to_sequence( - platform, gates, qubit, sequences[-1], beta_param=params.beta_param + qubit_sequence, ro_pulses[qubit] = allxy_sequence( + platform, gates, qubit, beta_param=params.beta_param ) + sequence += qubit_sequence + sequences.append(sequence) + all_ro_pulses.append(ro_pulses) # execute the pulse sequence - options = ExecutionParameters( - nshots=params.nshots, averaging_mode=AveragingMode.CYCLIC + options = dict( + nshots=params.nshots, + averaging_mode=AveragingMode.CYCLIC, + acquisition_type=AcquisitionType.DISCRIMINATION, ) if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) + results = platform.execute(sequences, **options) else: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] + results = {} + for sequence in sequences: + results.update(platform.execute([sequence], **options)) - for ig, (gates, ro_pulses) in enumerate(zip(gatelist, all_ro_pulses)): + for gates, ro_pulses in zip(gatelist, all_ro_pulses): gate = "-".join(gates) for qubit in targets: - serial = ro_pulses[qubit].serial - if params.unrolling: - prob = results[serial][ig].probability(0) - z_proj = 2 * prob - 1 - else: - prob = results[ig][serial].probability(0) - z_proj = 2 * prob - 1 - + prob = results[ro_pulses[qubit].id] + z_proj = 1 - 2 * prob errors = 2 * np.sqrt(prob * (1 - prob) / params.nshots) data.register_qubit( AllXYType, @@ -129,94 +134,74 @@ def _acquisition( return data -def add_gate_pair_pulses_to_sequence( - platform: Platform, +def apply_drag(pulse: Pulse, beta_param: Optional[float] = None) -> Pulse: + """Apply Drag with parameter beta.""" + if beta_param is None: + return replace( + pulse, + envelope=Gaussian( + rel_sigma=pulse.envelope.rel_sigma, + ), + ) + return replace( # pragma: no cover + pulse, + envelope=Drag( + rel_sigma=pulse.envelope.rel_sigma, + beta=beta_param, + ), + ) + + +def allxy_sequence( + platform: CalibrationPlatform, gates, qubit, - sequence, - sequence_delay=0, - readout_delay=0, + sequence_delay=None, + readout_delay=None, beta_param=None, ): - pulse_duration = platform.create_RX_pulse(qubit, start=0).duration - # All gates have equal pulse duration - - sequence_duration = sequence.get_qubit_pulses(qubit).duration + sequence_delay - pulse_start = sequence.get_qubit_pulses(qubit).duration + sequence_delay - + natives = platform.natives.single_qubit[qubit] + qd_channel, _ = natives.RX()[0] + sequence = PulseSequence() + if sequence_delay is not None: + sequence.append((qd_channel, Delay(duration=sequence_delay))) for gate in gates: if gate == "I": pass if gate == "Xp": - if beta_param == None: - RX_pulse = platform.create_RX_pulse( - qubit, - start=pulse_start, - ) - else: - RX_pulse = platform.create_RX_drag_pulse( - qubit, - start=pulse_start, - beta=beta_param, - ) - sequence.add(RX_pulse) + qd_channel, rx_pulse = natives.RX()[0] + sequence.append((qd_channel, apply_drag(rx_pulse, beta_param))) if gate == "X9": - if beta_param == None: - RX90_pulse = platform.create_RX90_pulse( - qubit, - start=pulse_start, - ) - else: - RX90_pulse = platform.create_RX90_drag_pulse( - qubit, - start=pulse_start, - beta=beta_param, - ) - sequence.add(RX90_pulse) + qd_channel, rx90_pulse = natives.R(theta=np.pi / 2)[0] + sequence.append((qd_channel, apply_drag(rx90_pulse, beta_param))) if gate == "Yp": - if beta_param == None: - RY_pulse = platform.create_RX_pulse( - qubit, - start=pulse_start, - relative_phase=np.pi / 2, - ) - else: - RY_pulse = platform.create_RX_drag_pulse( - qubit, - start=pulse_start, - relative_phase=np.pi / 2, - beta=beta_param, - ) - sequence.add(RY_pulse) - + qd_channel, ry_pulse = natives.R(phi=np.pi / 2)[0] + sequence.append((qd_channel, apply_drag(ry_pulse, beta_param))) if gate == "Y9": - if beta_param == None: - RY90_pulse = platform.create_RX90_pulse( - qubit, - start=pulse_start, - relative_phase=np.pi / 2, - ) - else: - RY90_pulse = platform.create_RX90_drag_pulse( - qubit, - start=pulse_start, - relative_phase=np.pi / 2, - beta=beta_param, - ) - sequence.add(RY90_pulse) - - sequence_duration += pulse_duration - pulse_start = sequence_duration + qd_channel, ry90_pulse = natives.R(theta=np.pi / 2, phi=np.pi / 2)[0] + sequence.append((qd_channel, apply_drag(ry90_pulse, beta_param))) # RO pulse starting just after pair of gates - ro_pulse = platform.create_qubit_readout_pulse( - qubit, start=sequence_duration + readout_delay - ) - - sequence.add(ro_pulse) + qd_channel = platform.qubits[qubit].drive + ro_channel, ro_pulse = natives.MZ()[0] + if readout_delay is not None: + sequence.append( + ( + ro_channel, + Delay(duration=sequence.channel_duration(qd_channel) + readout_delay), + ) + ) + else: + sequence.append( + ( + ro_channel, + Delay(duration=sequence.channel_duration(qd_channel)), + ) + ) + sequence.append((ro_channel, ro_pulse)) return sequence, ro_pulse diff --git a/src/qibocal/protocols/allxy/allxy_drag_pulse_tuning.py b/src/qibocal/protocols/allxy/allxy_drag_pulse_tuning.py deleted file mode 100644 index df76e1ca1..000000000 --- a/src/qibocal/protocols/allxy/allxy_drag_pulse_tuning.py +++ /dev/null @@ -1,166 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab import AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Parameters, Results, Routine - -from . import allxy - - -@dataclass -class AllXYDragParameters(Parameters): - """AllXYDrag runcard inputs.""" - - beta_start: float - """Initial beta parameter for Drag pulse.""" - beta_end: float - """Final beta parameter for Drag pulse.""" - beta_step: float - """Step beta parameter for Drag pulse.""" - - -@dataclass -class AllXYDragResults(Results): - """AllXYDrag outputs.""" - - -@dataclass -class AllXYDragData(Data): - """AllXY acquisition outputs.""" - - beta_param: Optional[float] = None - """Beta parameter for drag pulse.""" - data: dict[tuple[QubitId, float], npt.NDArray[allxy.AllXYType]] = field( - default_factory=dict - ) - """Raw data acquired.""" - - @property - def beta_params(self): - """Access qubits from data structure.""" - return np.unique([b[1] for b in self.data]) - - -def _acquisition( - params: AllXYDragParameters, - platform: Platform, - targets: list[QubitId], -) -> AllXYDragData: - r""" - Data acquisition for allXY experiment varying beta. - The AllXY experiment is a simple test of the calibration of single qubit gatesThe qubit (initialized in the |0> state) - is subjected to two back-to-back single-qubit gates and measured. In each round, we run 21 different gate pairs: - ideally, the first 5 return the qubit to |0>, the next 12 drive it to superposition state, and the last 4 put the - qubit in |1> state. - - The AllXY iteration method allows the user to execute iteratively the list of gates playing with the drag pulse shape - in order to find the optimal drag pulse coefficient for pi pulses. - """ - - data = AllXYDragData() - - betas = np.arange(params.beta_start, params.beta_end, params.beta_step).round(4) - # sweep the parameters - for beta_param in betas: - for gates in allxy.gatelist: - # create a sequence of pulses - ro_pulses = {} - sequence = PulseSequence() - for qubit in targets: - sequence, ro_pulses[qubit] = allxy.add_gate_pair_pulses_to_sequence( - platform, gates, qubit, sequence, beta_param - ) - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - - # retrieve the results for every qubit - for qubit in targets: - z_proj = 2 * results[ro_pulses[qubit].serial].probability(0) - 1 - # store the results - gate = "-".join(gates) - data.register_qubit( - allxy.AllXYType, - (qubit, beta_param), - dict(prob=np.array([z_proj]), gate=np.array([gate])), - ) - return data - - -def _fit(_data: AllXYDragData) -> AllXYDragResults: - """Post-processing for allXYDrag.""" - return AllXYDragResults() - - -def _plot(data: AllXYDragData, target: QubitId, fit: AllXYDragResults = None): - """Plotting function for allXYDrag.""" - - figures = [] - fitting_report = "" - - fig = go.Figure() - beta_params = data.beta_params - - for j, beta_param in enumerate(beta_params): - beta_param_data = data[target, beta_param] - fig.add_trace( - go.Scatter( - x=beta_param_data.gate, - y=beta_param_data.prob, - mode="markers+lines", - opacity=0.5, - name=f"Beta {beta_param}", - showlegend=True, - legendgroup=f"group{j}", - text=allxy.gatelist, - textposition="bottom center", - ), - ) - - fig.add_hline( - y=0, - line_width=2, - line_dash="dash", - line_color="grey", - ) - fig.add_hline( - y=1, - line_width=2, - line_dash="dash", - line_color="grey", - ) - - fig.add_hline( - y=-1, - line_width=2, - line_dash="dash", - line_color="grey", - ) - - fig.update_layout( - showlegend=True, - xaxis_title="Gate sequence number", - yaxis_title="Expectation value of Z", - ) - - figures.append(fig) - - return figures, fitting_report - - -allxy_drag_pulse_tuning = Routine(_acquisition, _fit, _plot) -"""AllXYDrag Routine object.""" diff --git a/src/qibocal/protocols/allxy/allxy_resonator_depletion_tuning.py b/src/qibocal/protocols/allxy/allxy_resonator_depletion_tuning.py index 6807ab993..d50190fbe 100644 --- a/src/qibocal/protocols/allxy/allxy_resonator_depletion_tuning.py +++ b/src/qibocal/protocols/allxy/allxy_resonator_depletion_tuning.py @@ -4,12 +4,10 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AveragingMode, PulseSequence -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from . import allxy @@ -24,6 +22,13 @@ class AllXYResonatorParameters(Parameters): """Final delay parameter for resonator depletion.""" delay_step: float """Step delay parameter for resonator depletion.""" + readout_delay: int = 1000 + """Delay on readout.""" + unrolling: bool = False + """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. + Defaults to ``False``.""" + beta_param: float = None + """Beta parameter for drag pulse.""" @dataclass @@ -50,7 +55,7 @@ def delay_params(self): def _acquisition( params: AllXYResonatorParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> AllXYResonatorData: r""" @@ -65,45 +70,47 @@ def _acquisition( delays = np.arange(params.delay_start, params.delay_end, params.delay_step) # sweep the parameters - for delay_param in delays: + for delay in delays: + sequences, all_ro_pulses = [], [] for gates in allxy.gatelist: - # create a sequence of pulses - ro_pulses = {} sequence = PulseSequence() + ro_pulses = {} for qubit in targets: - ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) - sequence.add(ro_pulse) - sequence, ro_pulses[qubit] = allxy.add_gate_pair_pulses_to_sequence( + qubit_sequence, ro_pulses[qubit] = allxy.allxy_sequence( platform, gates, qubit, - sequence, - sequence_delay=int( - delay_param - ), # We need conversion to int due to devices for now + beta_param=params.beta_param, + sequence_delay=delay, readout_delay=1000, ) - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - - # retrieve the results for every qubit + sequence += qubit_sequence + sequences.append(sequence) + all_ro_pulses.append(ro_pulses) + options = dict(nshots=params.nshots, averaging_mode=AveragingMode.CYCLIC) + if params.unrolling: + results = platform.execute(sequences, **options) + else: + results = {} + for sequence in sequences: + results.update(platform.execute([sequence], **options)) + + for gates, ro_pulses in zip(allxy.gatelist, all_ro_pulses): + gate = "-".join(gates) for qubit in targets: - z_proj = 2 * results[ro_pulses[qubit].serial].probability(0) - 1 - # store the results - gate = "-".join(gates) + prob = 1 - results[ro_pulses[qubit].id] + z_proj = 2 * prob - 1 + errors = 2 * np.sqrt(prob * (1 - prob) / params.nshots) data.register_qubit( allxy.AllXYType, - (qubit, float(delay_param)), - dict(prob=np.array([z_proj]), gate=np.array([gate])), + (qubit, float(delay)), + dict( + prob=np.array([z_proj]), + gate=np.array([gate]), + errors=np.array([errors]), + ), ) + return data diff --git a/src/qibocal/protocols/classification.py b/src/qibocal/protocols/classification.py index 6731e08a0..98acb4f6c 100644 --- a/src/qibocal/protocols/classification.py +++ b/src/qibocal/protocols/classification.py @@ -7,15 +7,20 @@ import numpy.typing as npt import pandas as pd import plotly.graph_objects as go -from qibolab import AcquisitionType, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, PulseSequence from sklearn.metrics import roc_auc_score, roc_curve from qibocal import update -from qibocal.auto.operation import RESULTSFILE, Data, Parameters, Results, Routine +from qibocal.auto.operation import ( + RESULTSFILE, + Data, + Parameters, + QubitId, + Results, + Routine, +) from qibocal.auto.serialize import serialize +from qibocal.calibration import CalibrationPlatform from qibocal.fitting.classifier import run from qibocal.protocols.utils import ( LEGEND_FONT_SIZE, @@ -35,19 +40,33 @@ DEFAULT_CLASSIFIER = "qubit_fit" +def evaluate_snr(zeros: npt.NDArray, ones: npt.NDArray) -> float: + """Compute snr for zeros and ones""" + line = np.mean(ones, axis=0) - np.mean(zeros, axis=0) + projection_zeros, projection_ones = np.dot(zeros, line), np.dot(ones, line) + mu0, std0 = np.mean(projection_zeros), np.std(projection_zeros) + mu1, std1 = np.mean(projection_ones), np.std(projection_ones) + return np.abs(mu1 - mu0) ** 2 / 2 / std0 / std1 + + @dataclass class SingleShotClassificationParameters(Parameters): """SingleShotClassification runcard inputs.""" unrolling: bool = False - """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. - Defaults to ``False``.""" + """Whether to unroll the sequences. + + If ``True`` it uses sequence unrolling to deploy multiple sequences in a + single instrument call. + + Defaults to ``False``. + """ classifiers_list: Optional[list[str]] = field( default_factory=lambda: [DEFAULT_CLASSIFIER] ) - """List of models to classify the qubit states""" + """List of models to classify the qubit states.""" savedir: Optional[str] = " " - """Dumping folder of the classification results""" + """Dumping folder of the classification results.""" ClassificationType = np.dtype([("i", np.float64), ("q", np.float64), ("state", int)]) @@ -59,7 +78,7 @@ class SingleShotClassificationData(Data): nshots: int """Number of shots.""" savedir: str - """Dumping folder of the classification results""" + """Dumping folder of the classification results.""" qubit_frequencies: dict[QubitId, float] = field(default_factory=dict) """Qubit frequencies.""" data: dict[QubitId, npt.NDArray] = field(default_factory=dict) @@ -67,7 +86,17 @@ class SingleShotClassificationData(Data): classifiers_list: Optional[list[str]] = field( default_factory=lambda: [DEFAULT_CLASSIFIER] ) - """List of models to classify the qubit states""" + """List of models to classify the qubit states.""" + + def state_zero(self, qubit: QubitId) -> npt.NDArray: + """Get state zero data.""" + state_zero = self.data[qubit][self.data[qubit].state == 0] + return np.column_stack([state_zero.i, state_zero.q]) + + def state_one(self, qubit: QubitId) -> npt.NDArray: + """Get state one data.""" + state_one = self.data[qubit][self.data[qubit].state == 1] + return np.column_stack([state_one.i, state_one.q]) @dataclass @@ -106,12 +135,14 @@ class SingleShotClassificationResults(Results): """Test set.""" y_tests: dict[QubitId, list] = field(default_factory=dict) """Test set.""" + snr: dict[QubitId, float] = field(default_factory=dict) + """SNR for two clouds""" def __contains__(self, key: QubitId): """Checking if key is in Results. - Overwritten because classifiers_hpars is empty when running - the default_classifier. + Overwritten because classifiers_hpars is empty when running the + default_classifier. """ return all( key in getattr(self, field.name) @@ -142,7 +173,7 @@ def save(self, path): def _acquisition( params: SingleShotClassificationParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> SingleShotClassificationData: """ @@ -173,19 +204,21 @@ def _acquisition( # state1_sequence: RX - MZ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel + native = platform.natives.single_qubit sequences, all_ro_pulses = [], [] for state in [0, 1]: - sequence = PulseSequence() - RX_pulses = {} ro_pulses = {} - for qubit in targets: - RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX_pulses[qubit].finish - ) - if state == 1: - sequence.add(RX_pulses[qubit]) - sequence.add(ro_pulses[qubit]) + sequence = PulseSequence() + for q in targets: + ro_sequence = native[q].MZ() + ro_pulses[q] = ro_sequence[0][1].id + sequence += ro_sequence + + if state == 1: + rx_sequence = PulseSequence() + for q in targets: + rx_sequence += native[q].RX() + sequence = rx_sequence | sequence sequences.append(sequence) all_ro_pulses.append(ro_pulses) @@ -193,38 +226,36 @@ def _acquisition( data = SingleShotClassificationData( nshots=params.nshots, qubit_frequencies={ - qubit: platform.qubits[qubit].drive_frequency for qubit in targets + qubit: platform.config(platform.qubits[qubit].drive).frequency + for qubit in targets }, classifiers_list=params.classifiers_list, savedir=params.savedir, ) - options = ExecutionParameters( + options = dict( nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, ) if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) + results = platform.execute(sequences, **options) else: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] + results = {} + for sequence in sequences: + results.update(platform.execute([sequence], **options)) - for ig, (state, ro_pulses) in enumerate(zip([0, 1], all_ro_pulses)): + for state, ro_pulses in zip([0, 1], all_ro_pulses): for qubit in targets: - serial = ro_pulses[qubit].serial - if params.unrolling: - result = results[serial][ig] - else: - result = results[ig][serial] + serial = ro_pulses[qubit] + result = results[serial] data.register_qubit( ClassificationType, (qubit), dict( - i=result.voltage_i, - q=result.voltage_q, + i=result[..., 0], + q=result[..., 1], state=[state] * params.nshots, ), ) @@ -240,6 +271,7 @@ def _fit(data: SingleShotClassificationData) -> SingleShotClassificationResults: y_tests = {} x_tests = {} hpars = {} + snr = {} threshold = {} rotation_angle = {} mean_gnd_states = {} @@ -281,6 +313,7 @@ def _fit(data: SingleShotClassificationData) -> SingleShotClassificationResults: mean_gnd_states[qubit] = models[i].iq_mean0.tolist() mean_exc_states[qubit] = models[i].iq_mean1.tolist() fidelity[qubit] = models[i].fidelity + snr[qubit] = evaluate_snr(data.state_zero(qubit), data.state_one(qubit)) assignment_fidelity[qubit] = models[i].assignment_fidelity predictions_state0 = models[i].predict(iq_state0.tolist()) effective_temperature[qubit] = models[i].effective_temperature( @@ -305,6 +338,7 @@ def _fit(data: SingleShotClassificationData) -> SingleShotClassificationResults: savedir=data.savedir, y_preds=y_test_predict, grid_preds=grid_preds_dict, + snr=snr, ) @@ -367,6 +401,7 @@ def _plot( "Threshold", "Readout Fidelity", "Assignment Fidelity", + "SNR", "Effective Qubit Temperature [K]", ], [ @@ -376,6 +411,7 @@ def _plot( np.round(fit.threshold[target], 6), np.round(fit.fidelity[target], 3), np.round(fit.assignment_fidelity[target], 3), + np.round(fit.snr[target], 1), format_error_single_cell( round_report([fit.effective_temperature[target]]) ), @@ -387,14 +423,18 @@ def _plot( def _update( - results: SingleShotClassificationResults, platform: Platform, target: QubitId + results: SingleShotClassificationResults, + platform: CalibrationPlatform, + target: QubitId, ): update.iq_angle(results.rotation_angle[target], platform, target) update.threshold(results.threshold[target], platform, target) update.mean_gnd_states(results.mean_gnd_states[target], platform, target) update.mean_exc_states(results.mean_exc_states[target], platform, target) update.readout_fidelity(results.fidelity[target], platform, target) - update.assignment_fidelity(results.assignment_fidelity[target], platform, target) + platform.calibration.single_qubits[target].readout.effective_temperature = ( + results.effective_temperature[target][0] + ) single_shot_classification = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/coherence/cpmg.py b/src/qibocal/protocols/coherence/cpmg.py new file mode 100644 index 000000000..d3f6b5d49 --- /dev/null +++ b/src/qibocal/protocols/coherence/cpmg.py @@ -0,0 +1,107 @@ +from dataclasses import dataclass + +import numpy as np +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper + +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import probability + +from . import t1 +from .spin_echo import SpinEchoParameters, SpinEchoResults +from .utils import dynamical_decoupling_sequence, exponential_fit_probability, plot + + +@dataclass +class CpmgParameters(SpinEchoParameters): + """Cpmg runcard inputs.""" + + n: int = 1 + """Number of pi rotations.""" + + +@dataclass +class CpmgResults(SpinEchoResults): + """SpinEcho outputs.""" + + +class CpmgData(t1.T1Data): + """SpinEcho acquisition outputs.""" + + +def _acquisition( + params: CpmgParameters, + platform: CalibrationPlatform, + targets: list[QubitId], +) -> CpmgData: + """Data acquisition for Cpmg""" + # create a sequence of pulses for the experiment: + sequence, delays = dynamical_decoupling_sequence( + platform, targets, n=params.n, kind="CPMG" + ) + + # define the parameter to sweep and its range: + # delay between pulses + wait_range = np.arange( + params.delay_between_pulses_start, + params.delay_between_pulses_end, + params.delay_between_pulses_step, + ) + durations = [] + for q in targets: + # this is assuming that RX and RX90 have the same duration + duration = platform.natives.single_qubit[q].RX()[0][1].duration + durations.append(duration) + assert ( + params.delay_between_pulses_start - params.n * duration + ) / 2 / params.n >= 0, ( + f"Initial delay too short for qubit {q}, " + f"minimum delay should be {params.n * duration}" + ) + + assert ( + len(set(durations)) == 1 + ), "Cannot run on mulitple qubit with different RX duration." + + sweeper = Sweeper( + parameter=Parameter.duration, + values=(wait_range - params.n * durations[0]) / 2 / params.n, + pulses=delays, + ) + + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, + ) + + data = CpmgData() + for qubit in targets: + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] + prob = probability(result, state=1) + error = np.sqrt(prob * (1 - prob) / params.nshots) + data.register_qubit( + t1.CoherenceProbType, + (qubit), + dict( + wait=wait_range, + prob=prob, + error=error, + ), + ) + + return data + + +def _fit(data: CpmgData) -> CpmgResults: + """Post-processing for Cpmg.""" + t2Echos, fitted_parameters, pcovs, chi2 = exponential_fit_probability(data) + return CpmgResults(t2Echos, fitted_parameters, pcovs, chi2) + + +cpmg = Routine(_acquisition, _fit, plot) +"""Cpmg Routine object.""" diff --git a/src/qibocal/protocols/coherence/spin_echo.py b/src/qibocal/protocols/coherence/spin_echo.py index 20e8fb042..bd77fb115 100644 --- a/src/qibocal/protocols/coherence/spin_echo.py +++ b/src/qibocal/protocols/coherence/spin_echo.py @@ -1,20 +1,16 @@ -from copy import deepcopy from dataclasses import dataclass, field from typing import Optional import numpy as np -import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import probability -from ..utils import table_dict, table_html from . import t1 from .spin_echo_signal import SpinEchoSignalParameters, SpinEchoSignalResults, _update -from .utils import exp_decay, exponential_fit_probability +from .utils import dynamical_decoupling_sequence, exponential_fit_probability, plot @dataclass @@ -38,42 +34,43 @@ class SpinEchoData(t1.T1Data): def _acquisition( params: SpinEchoParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> SpinEchoData: """Data acquisition for SpinEcho""" # create a sequence of pulses for the experiment: - # Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout - ro_pulses = {} - RX90_pulses1 = {} - RX_pulses = {} - RX90_pulses2 = {} - sequence = PulseSequence() - for qubit in targets: - RX90_pulses1[qubit] = platform.create_RX90_pulse(qubit, start=0) - RX_pulses[qubit] = platform.create_RX_pulse( - qubit, start=RX90_pulses1[qubit].finish - ) - RX90_pulses2[qubit] = platform.create_RX90_pulse( - qubit, start=RX_pulses[qubit].finish - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX90_pulses2[qubit].finish - ) - sequence.add(RX90_pulses1[qubit]) - sequence.add(RX_pulses[qubit]) - sequence.add(RX90_pulses2[qubit]) - sequence.add(ro_pulses[qubit]) + sequence, delays = dynamical_decoupling_sequence(platform, targets, kind="CP") # define the parameter to sweep and its range: # delay between pulses - ro_wait_range = np.arange( + wait_range = np.arange( params.delay_between_pulses_start, params.delay_between_pulses_end, params.delay_between_pulses_step, ) - options = ExecutionParameters( + durations = [] + for q in targets: + # this is assuming that RX and RX90 have the same duration + duration = platform.natives.single_qubit[q].RX()[0][1].duration + durations.append(duration) + assert (params.delay_between_pulses_start - duration) / 2 >= 0, ( + f"Initial delay too short for qubit {q}, " + f"minimum delay should be {duration}" + ) + assert ( + len(set(durations)) == 1 + ), "Cannot run on mulitple qubit with different RX duration." + + sweeper = Sweeper( + parameter=Parameter.duration, + values=(wait_range - durations[0]) / 2, + pulses=delays, + ) + + results = platform.execute( + [sequence], + [[sweeper]], nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.DISCRIMINATION, @@ -81,45 +78,20 @@ def _acquisition( ) data = SpinEchoData() - sequences, all_ro_pulses = [], [] - # sweep the parameter - for wait in ro_wait_range: - # save data as often as defined by points - - for qubit in targets: - RX_pulses[qubit].start = RX90_pulses1[qubit].finish + wait // 2 - RX90_pulses2[qubit].start = RX_pulses[qubit].finish + wait // 2 - ro_pulses[qubit].start = RX90_pulses2[qubit].finish - - sequences.append(deepcopy(sequence)) - all_ro_pulses.append(deepcopy(sequence).ro_pulses) - - if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) - - elif not params.unrolling: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] - - for ig, (wait, ro_pulses) in enumerate(zip(ro_wait_range, all_ro_pulses)): - for qubit in targets: - serial = ro_pulses.get_qubit_pulses(qubit)[0].serial - if params.unrolling: - result = results[serial][0] - else: - result = results[ig][serial] - prob = result.probability(state=0) - error = np.sqrt(prob * (1 - prob) / params.nshots) - data.register_qubit( - t1.CoherenceProbType, - (qubit), - dict( - wait=np.array([wait]), - prob=np.array([prob]), - error=np.array([error]), - ), - ) + for qubit in targets: + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] + prob = probability(result, state=1) + error = np.sqrt(prob * (1 - prob) / params.nshots) + data.register_qubit( + t1.CoherenceProbType, + (qubit), + dict( + wait=wait_range, + prob=prob, + error=error, + ), + ) return data @@ -130,77 +102,5 @@ def _fit(data: SpinEchoData) -> SpinEchoResults: return SpinEchoResults(t2Echos, fitted_parameters, pcovs, chi2) -def _plot(data: SpinEchoData, target: QubitId, fit: SpinEchoResults = None): - """Plotting for SpinEcho""" - - figures = [] - # iterate over multiple data folders - fitting_report = "" - - qubit_data = data[target] - waits = qubit_data.wait - probs = qubit_data.prob - error_bars = qubit_data.error - - fig = go.Figure( - [ - go.Scatter( - x=waits, - y=probs, - opacity=1, - name="Probability of 0", - showlegend=True, - legendgroup="Probability of 0", - mode="lines", - ), - go.Scatter( - x=np.concatenate((waits, waits[::-1])), - y=np.concatenate((probs + error_bars, (probs - error_bars)[::-1])), - fill="toself", - fillcolor=t1.COLORBAND, - line=dict(color=t1.COLORBAND_LINE), - showlegend=True, - name="Errors", - ), - ] - ) - - if fit is not None: - # add fitting trace - waitrange = np.linspace( - min(waits), - max(waits), - 2 * len(qubit_data), - ) - params = fit.fitted_parameters[target] - - fig.add_trace( - go.Scatter( - x=waitrange, - y=exp_decay(waitrange, *params), - name="Fit", - line=go.scatter.Line(dash="dot"), - ), - ) - fitting_report = table_html( - table_dict( - target, - ["T2 Spin Echo [ns]", "chi2 reduced"], - [fit.t2_spin_echo[target], fit.chi2[target]], - display_error=True, - ) - ) - - fig.update_layout( - showlegend=True, - xaxis_title="Time [ns]", - yaxis_title="Probability of State 0", - ) - - figures.append(fig) - - return figures, fitting_report - - -spin_echo = Routine(_acquisition, _fit, _plot, _update) +spin_echo = Routine(_acquisition, _fit, plot, _update) """SpinEcho Routine object.""" diff --git a/src/qibocal/protocols/coherence/spin_echo_signal.py b/src/qibocal/protocols/coherence/spin_echo_signal.py index 94a852f01..8b8885f85 100644 --- a/src/qibocal/protocols/coherence/spin_echo_signal.py +++ b/src/qibocal/protocols/coherence/spin_echo_signal.py @@ -1,20 +1,23 @@ -from copy import deepcopy from dataclasses import dataclass from typing import Union import numpy as np import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal import update -from qibocal.auto.operation import Parameters, Results, Routine +from qibocal.auto.operation import Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude, phase +from ... import update from ..utils import table_dict, table_html from .t1_signal import T1SignalData -from .utils import CoherenceType, exp_decay, exponential_fit +from .utils import ( + CoherenceType, + dynamical_decoupling_sequence, + exp_decay, + exponential_fit, +) @dataclass @@ -27,18 +30,14 @@ class SpinEchoSignalParameters(Parameters): """Final delay between pulses [ns].""" delay_between_pulses_step: int """Step delay between pulses [ns].""" - unrolling: bool = False - """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. - Defaults to ``False``.""" single_shot: bool = False - """If ``True`` save single shot signal data.""" @dataclass class SpinEchoSignalResults(Results): """SpinEchoSignal outputs.""" - t2_spin_echo: dict[QubitId, Union[float, list[float]]] + t2: dict[QubitId, Union[float, list[float]]] """T2 echo for each qubit.""" fitted_parameters: dict[QubitId, dict[str, float]] """Raw fitting output.""" @@ -52,42 +51,44 @@ class SpinEchoSignalData(T1SignalData): def _acquisition( params: SpinEchoSignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> SpinEchoSignalData: """Data acquisition for SpinEcho""" # create a sequence of pulses for the experiment: - # Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout - ro_pulses = {} - RX90_pulses1 = {} - RX_pulses = {} - RX90_pulses2 = {} - sequence = PulseSequence() - for qubit in targets: - RX90_pulses1[qubit] = platform.create_RX90_pulse(qubit, start=0) - RX_pulses[qubit] = platform.create_RX_pulse( - qubit, start=RX90_pulses1[qubit].finish - ) - RX90_pulses2[qubit] = platform.create_RX90_pulse( - qubit, start=RX_pulses[qubit].finish - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX90_pulses2[qubit].finish - ) - sequence.add(RX90_pulses1[qubit]) - sequence.add(RX_pulses[qubit]) - sequence.add(RX90_pulses2[qubit]) - sequence.add(ro_pulses[qubit]) + sequence, delays = dynamical_decoupling_sequence(platform, targets, kind="CP") # define the parameter to sweep and its range: # delay between pulses - ro_wait_range = np.arange( + wait_range = np.arange( params.delay_between_pulses_start, params.delay_between_pulses_end, params.delay_between_pulses_step, ) - options = ExecutionParameters( + durations = [] + for q in targets: + # this is assuming that RX and RX90 have the same duration + duration = platform.natives.single_qubit[q].RX()[0][1].duration + durations.append(duration) + assert (params.delay_between_pulses_start - duration) / 2 >= 0, ( + f"Initial delay too short for qubit {q}, " + f"minimum delay should be {duration}" + ) + + assert ( + len(set(durations)) == 1 + ), "Cannot run on mulitple qubit with different RX duration." + + sweeper = Sweeper( + parameter=Parameter.duration, + values=(wait_range - durations[0]) / 2, + pulses=delays, + ) + + results = platform.execute( + [sequence], + [[sweeper]], nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, @@ -96,55 +97,24 @@ def _acquisition( ), ) - sequences, all_ro_pulses = [], [] - - # sweep the parameter - for wait in ro_wait_range: - # save data as often as defined by points - - for qubit in targets: - RX_pulses[qubit].start = RX90_pulses1[qubit].finish + wait / 2 - RX90_pulses2[qubit].start = RX_pulses[qubit].finish + wait / 2 - ro_pulses[qubit].start = RX90_pulses2[qubit].finish - - sequences.append(deepcopy(sequence)) - all_ro_pulses.append(deepcopy(sequence).ro_pulses) - - if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) - - elif not params.unrolling: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] - data = SpinEchoSignalData() - for ig, (wait, ro_pulses) in enumerate(zip(ro_wait_range, all_ro_pulses)): - for qubit in targets: - serial = ro_pulses.get_qubit_pulses(qubit)[0].serial - if params.unrolling: - result = results[serial][0] - else: - result = results[ig][serial] - if params.single_shot: - _wait = np.array(len(result.magnitude) * [wait]) - else: - _wait = np.array([wait]) - data.register_qubit( - CoherenceType, - (qubit), - dict( - wait=_wait, - signal=np.array([result.magnitude]), - phase=np.array([result.phase]), - ), - ) - - if params.single_shot: - data.data = { - qubit: values.reshape((len(ro_wait_range), params.nshots)).T - for qubit, values in data.data.items() - } + for qubit in targets: + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] + signal = magnitude(result) + if params.single_shot: + _wait = np.array(len(signal) * [wait_range]) + else: + _wait = wait_range + data.register_qubit( + CoherenceType, + (qubit), + dict( + wait=_wait, + signal=signal, + phase=phase(result), + ), + ) return data @@ -204,7 +174,7 @@ def _plot(data: SpinEchoSignalData, target: QubitId, fit: SpinEchoSignalResults table_dict( target, ["T2 Spin Echo [ns]"], - [np.round(fit.t2_spin_echo[target])], + [np.round(fit.t2[target])], display_error=True, ) ) @@ -220,8 +190,10 @@ def _plot(data: SpinEchoSignalData, target: QubitId, fit: SpinEchoSignalResults return figures, fitting_report -def _update(results: SpinEchoSignalResults, platform: Platform, target: QubitId): - update.t2_spin_echo(results.t2_spin_echo[target], platform, target) +def _update( + results: SpinEchoSignalResults, platform: CalibrationPlatform, target: QubitId +): + update.t2_spin_echo(results.t2[target], platform, target) spin_echo_signal = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/coherence/t1.py b/src/qibocal/protocols/coherence/t1.py index 54b99ee93..d6630a3aa 100644 --- a/src/qibocal/protocols/coherence/t1.py +++ b/src/qibocal/protocols/coherence/t1.py @@ -4,20 +4,15 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Data, Routine +from qibocal.auto.operation import Data, QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import probability from ..utils import table_dict, table_html from . import t1_signal, utils -COLORBAND = "rgba(0,100,80,0.2)" -COLORBAND_LINE = "rgba(255,255,255,0)" - @dataclass class T1Parameters(t1_signal.T1SignalParameters): @@ -47,41 +42,14 @@ class T1Data(Data): def _acquisition( - params: T1Parameters, platform: Platform, targets: list[QubitId] + params: T1Parameters, platform: CalibrationPlatform, targets: list[QubitId] ) -> T1Data: - r"""Data acquisition for T1 experiment. - In a T1 experiment, we measure an excited qubit after a delay. Due to decoherence processes - (e.g. amplitude damping channel), it is possible that, at the time of measurement, after the delay, - the qubit will not be excited anymore. The larger the delay time is, the more likely is the qubit to - fall to the ground state. The goal of the experiment is to characterize the decay rate of the qubit - towards the ground state. - - Args: - params: - platform (Platform): Qibolab platform object - targets (list): list of target qubits to perform the action - delay_before_readout_start (int): Initial time delay before ReadOut - delay_before_readout_end (list): Maximum time delay before ReadOut - delay_before_readout_step (int): Scan range step for the delay before ReadOut - software_averages (int): Number of executions of the routine for averaging results - points (int): Save data results in a file every number of points - """ + """Data acquisition for T1 experiment.""" - # create a sequence of pulses for the experiment - # RX - wait t - MZ - qd_pulses = {} - ro_pulses = {} - sequence = PulseSequence() - for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].duration - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) + sequence, ro_pulses, delays = t1_signal.t1_sequence( + platform=platform, targets=targets + ) - # define the parameter to sweep and its range: - # wait time before readout ro_wait_range = np.arange( params.delay_before_readout_start, params.delay_before_readout_end, @@ -89,29 +57,24 @@ def _acquisition( ) sweeper = Sweeper( - Parameter.start, - ro_wait_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=ro_wait_range, + pulses=[delays[q] for q in targets], ) data = T1Data() - # sweep the parameter - # execute the pulse sequence - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) for qubit in targets: - probs = results[ro_pulses[qubit].serial].probability(state=1) + probs = probability(results[ro_pulses[qubit].id], state=1) errors = np.sqrt(probs * (1 - probs) / params.nshots) data.register_qubit( CoherenceProbType, @@ -159,8 +122,8 @@ def _plot(data: T1Data, target: QubitId, fit: T1Results = None): x=np.concatenate((waits, waits[::-1])), y=np.concatenate((probs + error_bars, (probs - error_bars)[::-1])), fill="toself", - fillcolor=COLORBAND, - line=dict(color=COLORBAND_LINE), + fillcolor=utils.COLORBAND, + line=dict(color=utils.COLORBAND_LINE), showlegend=True, name="Errors", ), diff --git a/src/qibocal/protocols/coherence/t1_sequences.py b/src/qibocal/protocols/coherence/t1_sequences.py deleted file mode 100644 index d932e3acb..000000000 --- a/src/qibocal/protocols/coherence/t1_sequences.py +++ /dev/null @@ -1,88 +0,0 @@ -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Routine - -from . import t1_signal -from .utils import CoherenceType - - -def _acquisition( - params: t1_signal.T1SignalParameters, platform: Platform, targets: list[QubitId] -) -> t1_signal.T1SignalData: - r"""Data acquisition for T1 experiment. - In a T1 experiment, we measure an excited qubit after a delay. Due to decoherence processes - (e.g. amplitude damping channel), it is possible that, at the time of measurement, after the delay, - the qubit will not be excited anymore. The larger the delay time is, the more likely is the qubit to - fall to the ground state. The goal of the experiment is to characterize the decay rate of the qubit - towards the ground state. - - Args: - params: - platform (Platform): Qibolab platform object - targets (list): list of target qubits to perform the action - delay_before_readout_start (int): Initial time delay before ReadOut - delay_before_readout_end (list): Maximum time delay before ReadOut - delay_before_readout_step (int): Scan range step for the delay before ReadOut - software_averages (int): Number of executions of the routine for averaging results - points (int): Save data results in a file every number of points - """ - - # create a sequence of pulses for the experiment - # RX - wait t - MZ - qd_pulses = {} - ro_pulses = {} - sequence = PulseSequence() - for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].duration - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - # wait time before readout - ro_wait_range = np.arange( - params.delay_before_readout_start, - params.delay_before_readout_end, - params.delay_before_readout_step, - ) - - data = t1_signal.T1SignalData() - - # repeat the experiment as many times as defined by software_averages - # sweep the parameter - for wait in ro_wait_range: - for qubit in targets: - ro_pulses[qubit].start = qd_pulses[qubit].duration + wait - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - for qubit in targets: - result = results[ro_pulses[qubit].serial] - data.register_qubit( - CoherenceType, - (qubit), - dict( - wait=np.array([wait]), - signal=np.array([result.magnitude]), - phase=np.array([result.phase]), - ), - ) - return data - - -t1_sequences = Routine(_acquisition, t1_signal._fit, t1_signal._plot, t1_signal._update) -"""T1 Routine object.""" diff --git a/src/qibocal/protocols/coherence/t1_signal.py b/src/qibocal/protocols/coherence/t1_signal.py index 74c13fc90..c3a8d28a6 100644 --- a/src/qibocal/protocols/coherence/t1_signal.py +++ b/src/qibocal/protocols/coherence/t1_signal.py @@ -4,15 +4,20 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine - +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + PulseSequence, + Sweeper, +) + +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude, phase + +from ... import update from ..utils import table_dict, table_html from . import utils @@ -57,42 +62,35 @@ def average(self): return self +def t1_sequence(platform: CalibrationPlatform, targets: list[QubitId]): + """Create sequence for T1 experiment with a given optional delay.""" + sequence = PulseSequence() + ro_pulses, delays = {}, {} + for q in targets: + natives = platform.natives.single_qubit[q] + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + ro_pulses[q] = ro_pulse + delays[q] = Delay(duration=0) + + sequence.append((qd_channel, qd_pulse)) + sequence.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence.append((ro_channel, delays[q])) + sequence.append((ro_channel, ro_pulse)) + + return sequence, ro_pulses, delays + + def _acquisition( - params: T1SignalParameters, platform: Platform, targets: list[QubitId] + params: T1SignalParameters, platform: CalibrationPlatform, targets: list[QubitId] ) -> T1SignalData: - r"""Data acquisition for T1 experiment. - In a T1 experiment, we measure an excited qubit after a delay. Due to decoherence processes - (e.g. amplitude damping channel), it is possible that, at the time of measurement, after the delay, - the qubit will not be excited anymore. The larger the delay time is, the more likely is the qubit to - fall to the ground state. The goal of the experiment is to characterize the decay rate of the qubit - towards the ground state. - - Args: - params: - platform (Platform): Qibolab platform object - targets (list): list of target qubits to perform the action - delay_before_readout_start (int): Initial time delay before ReadOut - delay_before_readout_end (list): Maximum time delay before ReadOut - delay_before_readout_step (int): Scan range step for the delay before ReadOut - software_averages (int): Number of executions of the routine for averaging results - points (int): Save data results in a file every number of points - """ + """Data acquisition for T1 experiment. - # create a sequence of pulses for the experiment - # RX - wait t - MZ - qd_pulses = {} - ro_pulses = {} - sequence = PulseSequence() - for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].duration - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) + In this protocol the y axis is the magnitude of signal in the IQ plane.""" + + sequence, ro_pulses, delays = t1_sequence(platform, targets) - # define the parameter to sweep and its range: - # wait time before readout ro_wait_range = np.arange( params.delay_before_readout_start, params.delay_before_readout_end, @@ -100,38 +98,35 @@ def _acquisition( ) sweeper = Sweeper( - Parameter.start, - ro_wait_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=ro_wait_range, + pulses=[delays[q] for q in targets], ) - # sweep the parameter - # execute the pulse sequence - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=( - AveragingMode.SINGLESHOT if params.single_shot else AveragingMode.CYCLIC - ), + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=( + AveragingMode.SINGLESHOT if params.single_shot else AveragingMode.CYCLIC ), - sweeper, ) data = T1SignalData() - for qubit in targets: - result = results[ro_pulses[qubit].serial] + + for q in targets: + result = results[ro_pulses[q].id] + signal = magnitude(result) if params.single_shot: - _waits = np.array(len(result.magnitude) * [ro_wait_range]) + _waits = np.array(len(signal) * [ro_wait_range]) else: _waits = ro_wait_range data.register_qubit( utils.CoherenceType, - (qubit), - dict(wait=_waits, signal=result.magnitude, phase=result.phase), + (q), + dict(wait=_waits, signal=signal, phase=phase(result)), ) return data @@ -207,7 +202,7 @@ def _plot(data: T1SignalData, target: QubitId, fit: T1SignalResults = None): return figures, fitting_report -def _update(results: T1SignalResults, platform: Platform, target: QubitId): +def _update(results: T1SignalResults, platform: CalibrationPlatform, target: QubitId): update.t1(results.t1[target], platform, target) diff --git a/src/qibocal/protocols/coherence/t2.py b/src/qibocal/protocols/coherence/t2.py index 0b2be1f16..7d8111249 100644 --- a/src/qibocal/protocols/coherence/t2.py +++ b/src/qibocal/protocols/coherence/t2.py @@ -2,16 +2,13 @@ from typing import Optional import numpy as np -import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform -from ..utils import table_dict, table_html +from ...result import probability +from ..ramsey.utils import ramsey_sequence from . import t1, t2_signal, utils @@ -43,60 +40,39 @@ class T2Data(t1.T1Data): def _acquisition( params: T2Parameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> T2Data: - """Data acquisition for Ramsey Experiment (detuned).""" - # create a sequence of pulses for the experiment - # RX90 - t - RX90 - MZ - ro_pulses = {} - RX90_pulses1 = {} - RX90_pulses2 = {} - sequence = PulseSequence() - for qubit in targets: - RX90_pulses1[qubit] = platform.create_RX90_pulse(qubit, start=0) - RX90_pulses2[qubit] = platform.create_RX90_pulse( - qubit, - start=RX90_pulses1[qubit].finish, - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX90_pulses2[qubit].finish - ) - sequence.add(RX90_pulses1[qubit]) - sequence.add(RX90_pulses2[qubit]) - sequence.add(ro_pulses[qubit]) + """Data acquisition for T2 experiment.""" - # define the parameter to sweep and its range: waits = np.arange( - # wait time between RX90 pulses params.delay_between_pulses_start, params.delay_between_pulses_end, params.delay_between_pulses_step, ) + sequence, delays = ramsey_sequence(platform, targets) + data = T2Data() sweeper = Sweeper( - Parameter.start, - waits, - [RX90_pulses2[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=waits, + pulses=delays, ) - # execute the sweep - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) for qubit in targets: - probs = results[ro_pulses[qubit].serial].probability(state=1) + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + probs = probability(results[ro_pulse.id], state=1) errors = np.sqrt(probs * (1 - probs) / params.nshots) data.register_qubit( t1.CoherenceProbType, (qubit), dict(wait=waits, prob=probs, error=errors) @@ -115,80 +91,5 @@ def _fit(data: T2Data) -> T2Results: return T2Results(t2s, fitted_parameters, pcovs, chi2) -def _plot(data: T2Data, target: QubitId, fit: T2Results = None): - """Plotting function for Ramsey Experiment.""" - - figures = [] - fitting_report = "" - qubit_data = data[target] - waits = qubit_data.wait - probs = qubit_data.prob - error_bars = qubit_data.error - - fig = go.Figure( - [ - go.Scatter( - x=waits, - y=probs, - opacity=1, - name="Probability of 1", - showlegend=True, - legendgroup="Probability of 1", - mode="lines", - ), - go.Scatter( - x=np.concatenate((waits, waits[::-1])), - y=np.concatenate((probs + error_bars, (probs - error_bars)[::-1])), - fill="toself", - fillcolor=t1.COLORBAND, - line=dict(color=t1.COLORBAND_LINE), - showlegend=True, - name="Errors", - ), - ] - ) - - if fit is not None: - # add fitting trace - waitrange = np.linspace( - min(qubit_data.wait), - max(qubit_data.wait), - 2 * len(qubit_data), - ) - - params = fit.fitted_parameters[target] - fig.add_trace( - go.Scatter( - x=waitrange, - y=utils.exp_decay( - waitrange, - *params, - ), - name="Fit", - line=go.scatter.Line(dash="dot"), - ) - ) - fitting_report = table_html( - table_dict( - target, - [ - "T2 [ns]", - "chi2 reduced", - ], - [fit.t2[target], fit.chi2[target]], - display_error=True, - ) - ) - fig.update_layout( - showlegend=True, - xaxis_title="Time [ns]", - yaxis_title="Probability of State 1", - ) - - figures.append(fig) - - return figures, fitting_report - - -t2 = Routine(_acquisition, _fit, _plot, t2_signal._update) +t2 = Routine(_acquisition, _fit, utils.plot, t2_signal._update) """T2 Routine object.""" diff --git a/src/qibocal/protocols/coherence/t2_sequences.py b/src/qibocal/protocols/coherence/t2_sequences.py deleted file mode 100644 index 85a38159a..000000000 --- a/src/qibocal/protocols/coherence/t2_sequences.py +++ /dev/null @@ -1,79 +0,0 @@ -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Routine - -from .t2_signal import T2SignalData, T2SignalParameters, _fit, _plot, _update -from .utils import CoherenceType - - -def _acquisition( - params: T2SignalParameters, - platform: Platform, - targets: list[QubitId], -) -> T2SignalData: - """Data acquisition for Ramsey Experiment (detuned).""" - # create a sequence of pulses for the experiment - # RX90 - t - RX90 - MZ - ro_pulses = {} - RX90_pulses1 = {} - RX90_pulses2 = {} - sequence = PulseSequence() - for qubit in targets: - RX90_pulses1[qubit] = platform.create_RX90_pulse(qubit, start=0) - RX90_pulses2[qubit] = platform.create_RX90_pulse( - qubit, - start=RX90_pulses1[qubit].finish, - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX90_pulses2[qubit].finish - ) - sequence.add(RX90_pulses1[qubit]) - sequence.add(RX90_pulses2[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - waits = np.arange( - # wait time between RX90 pulses - params.delay_between_pulses_start, - params.delay_between_pulses_end, - params.delay_between_pulses_step, - ) - - data = T2SignalData() - - # sweep the parameter - for wait in waits: - for qubit in targets: - RX90_pulses2[qubit].start = RX90_pulses1[qubit].finish + wait - ro_pulses[qubit].start = RX90_pulses2[qubit].finish - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - for qubit in targets: - result = results[ro_pulses[qubit].serial] - data.register_qubit( - CoherenceType, - (qubit), - dict( - wait=np.array([wait]), - signal=np.array([result.magnitude]), - phase=np.array([result.phase]), - ), - ) - return data - - -t2_sequences = Routine(_acquisition, _fit, _plot, _update) -"""T2 Routine object.""" diff --git a/src/qibocal/protocols/coherence/t2_signal.py b/src/qibocal/protocols/coherence/t2_signal.py index 4c8ffc714..4f1a2dea4 100644 --- a/src/qibocal/protocols/coherence/t2_signal.py +++ b/src/qibocal/protocols/coherence/t2_signal.py @@ -3,15 +3,14 @@ import numpy as np import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Parameters, Results, Routine +from qibocal.auto.operation import Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from ...result import magnitude, phase +from ..ramsey.utils import ramsey_sequence from ..utils import table_dict, table_html from . import t1_signal, t2, utils @@ -53,69 +52,54 @@ class T2SignalData(t1_signal.T1SignalData): def _acquisition( params: T2SignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> T2SignalData: - """Data acquisition for Ramsey Experiment (detuned).""" - # create a sequence of pulses for the experiment - # RX90 - t - RX90 - MZ - ro_pulses = {} - RX90_pulses1 = {} - RX90_pulses2 = {} - sequence = PulseSequence() - for qubit in targets: - RX90_pulses1[qubit] = platform.create_RX90_pulse(qubit, start=0) - RX90_pulses2[qubit] = platform.create_RX90_pulse( - qubit, - start=RX90_pulses1[qubit].finish, - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX90_pulses2[qubit].finish - ) - sequence.add(RX90_pulses1[qubit]) - sequence.add(RX90_pulses2[qubit]) - sequence.add(ro_pulses[qubit]) + """Data acquisition for T2 experiment. + + In this protocol the y axis is the magnitude of signal in the IQ plane. + + """ - # define the parameter to sweep and its range: waits = np.arange( - # wait time between RX90 pulses params.delay_between_pulses_start, params.delay_between_pulses_end, params.delay_between_pulses_step, ) + sequence, delays = ramsey_sequence(platform, targets) + + data = T2SignalData() + sweeper = Sweeper( - Parameter.start, - waits, - [RX90_pulses2[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=waits, + pulses=delays, ) - # execute the sweep - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=( - AveragingMode.SINGLESHOT if params.single_shot else AveragingMode.CYCLIC - ), + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=( + AveragingMode.SINGLESHOT if params.single_shot else AveragingMode.CYCLIC ), - sweeper, ) - data = T2SignalData() - for qubit in targets: - result = results[ro_pulses[qubit].serial] + for q in targets: + ro_pulse = list(sequence.channel(platform.qubits[q].acquisition))[-1] + result = results[ro_pulse.id] + signal = magnitude(result) if params.single_shot: - _waits = np.array(len(result.magnitude) * [waits]) + _waits = np.array(len(signal) * [waits]) else: _waits = waits data.register_qubit( utils.CoherenceType, - (qubit), - dict(wait=_waits, signal=result.magnitude, phase=result.phase), + (q), + dict(wait=_waits, signal=signal, phase=phase(result)), ) return data @@ -191,7 +175,7 @@ def _plot(data: T2SignalData, target: QubitId, fit: T2SignalResults = None): return figures, fitting_report -def _update(results: T2SignalResults, platform: Platform, target: QubitId): +def _update(results: T2SignalResults, platform: CalibrationPlatform, target: QubitId): update.t2(results.t2[target], platform, target) diff --git a/src/qibocal/protocols/coherence/utils.py b/src/qibocal/protocols/coherence/utils.py index 099293daf..2054d2a86 100644 --- a/src/qibocal/protocols/coherence/utils.py +++ b/src/qibocal/protocols/coherence/utils.py @@ -1,15 +1,21 @@ import numpy as np +import plotly.graph_objects as go +from qibolab import Delay, Platform, PulseSequence from scipy.optimize import curve_fit +from qibocal.auto.operation import QubitId from qibocal.config import log -from ..utils import chi2_reduced +from ..utils import chi2_reduced, table_dict, table_html CoherenceType = np.dtype( [("wait", np.float64), ("signal", np.float64), ("phase", np.float64)] ) """Custom dtype for coherence routines.""" +COLORBAND = "rgba(0,100,80,0.2)" +COLORBAND_LINE = "rgba(255,255,255,0)" + def average_single_shots(data_type, single_shots): """Convert single shot acquisition results of signal routines to averaged. @@ -28,6 +34,57 @@ def average_single_shots(data_type, single_shots): return data +def dynamical_decoupling_sequence( + platform: Platform, + targets: list[QubitId], + wait: int = 0, + n: int = 1, + kind: str = "CPMG", +) -> tuple[PulseSequence, list[Delay]]: + """Create dynamical decoupling sequence. + + Two sequences are available: + - CP: RX90 (wait RX wait )^N RX90 + - CPMG: RX90 (wait RY wait )^N RX90 + """ + + assert kind in ["CPMG", "CP"], f"Unknown sequence {kind}, please use CP or CPMG" + sequence = PulseSequence() + all_delays = [] + for qubit in targets: + natives = platform.natives.single_qubit[qubit] + qd_channel, rx90_pulse = natives.R(theta=np.pi / 2)[0] + _, pulse = natives.R(phi=np.pi / 2)[0] if kind == "CPMG" else natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + drive_delays = 2 * n * [Delay(duration=wait)] + ro_delays = 2 * n * [Delay(duration=wait)] + + sequence.append((qd_channel, rx90_pulse)) + + for i in range(n): + sequence.append((qd_channel, drive_delays[2 * i])) + sequence.append((ro_channel, ro_delays[2 * i])) + sequence.append((qd_channel, pulse)) + sequence.append((qd_channel, drive_delays[2 * i + 1])) + sequence.append((ro_channel, ro_delays[2 * i + 1])) + + sequence.append((qd_channel, rx90_pulse)) + + sequence.append( + ( + ro_channel, + Delay(duration=2 * rx90_pulse.duration + n * pulse.duration), + ) + ) + + sequence.append((ro_channel, ro_pulse)) + all_delays.extend(drive_delays) + all_delays.extend(ro_delays) + + return sequence, all_delays + + def exp_decay(x, *p): return p[0] - p[1] * np.exp(-1 * x / p[2]) @@ -41,10 +98,7 @@ def exponential_fit(data, zeno=False): for qubit in qubits: voltages = data[qubit].signal - if zeno: - times = np.arange(1, len(data[qubit].signal) + 1) - else: - times = data[qubit].wait + times = data[qubit].wait try: y_max = np.max(voltages) @@ -72,7 +126,7 @@ def exponential_fit(data, zeno=False): ) popt = [ (y_max - y_min) * popt[0] + y_min, - (y_max - y_min) * popt[1] * np.exp(x_min * popt[2] / (x_max - x_min)), + (y_max - y_min) * popt[1] * np.exp(x_min / popt[2] / (x_max - x_min)), popt[2] * (x_max - x_min), ] fitted_parameters[qubit] = popt @@ -94,10 +148,8 @@ def exponential_fit_probability(data, zeno=False): pcovs = {} for qubit in qubits: - if zeno: - times = np.arange(1, len(data[qubit].signal) + 1) - else: - times = data[qubit].wait + + times = data[qubit].wait x_max = np.max(times) x_min = np.min(times) x = (times - x_min) / (x_max - x_min) @@ -123,9 +175,10 @@ def exponential_fit_probability(data, zeno=False): ) popt = [ popt[0], - popt[1] * np.exp(x_min * popt[2] / (x_max - x_min)), + popt[1] * np.exp(x_min / (x_max - x_min) / popt[2]), popt[2] * (x_max - x_min), ] + pcovs[qubit] = pcov.tolist() fitted_parameters[qubit] = popt dec = popt[2] @@ -143,3 +196,72 @@ def exponential_fit_probability(data, zeno=False): log.warning(f"Exponential decay fit failed for qubit {qubit} due to {e}") return decay, fitted_parameters, pcovs, chi2 + + +def plot(data, target: QubitId, fit=None) -> tuple[list[go.Figure], str]: + """Plotting function for spin-echo or CPMG protocol.""" + + figures = [] + fitting_report = "" + qubit_data = data[target] + waits = qubit_data.wait + probs = qubit_data.prob + error_bars = qubit_data.error + + fig = go.Figure( + [ + go.Scatter( + x=waits, + y=probs, + opacity=1, + name="Probability of 1", + showlegend=True, + legendgroup="Probability of 1", + mode="lines", + ), + go.Scatter( + x=np.concatenate((waits, waits[::-1])), + y=np.concatenate((probs + error_bars, (probs - error_bars)[::-1])), + fill="toself", + fillcolor=COLORBAND, + line=dict(color=COLORBAND_LINE), + showlegend=True, + name="Errors", + ), + ] + ) + + if fit is not None: + waitrange = np.linspace( + min(waits), + max(waits), + 2 * len(qubit_data), + ) + params = fit.fitted_parameters[target] + + fig.add_trace( + go.Scatter( + x=waitrange, + y=exp_decay(waitrange, *params), + name="Fit", + line=go.scatter.Line(dash="dot"), + ), + ) + fitting_report = table_html( + table_dict( + target, + ["T2", "chi2 reduced"], + [fit.t2[target], fit.chi2[target]], + display_error=True, + ) + ) + + fig.update_layout( + showlegend=True, + xaxis_title="Time [ns]", + yaxis_title="Probability of State 1", + ) + + figures.append(fig) + + return figures, fitting_report diff --git a/src/qibocal/protocols/coherence/zeno.py b/src/qibocal/protocols/coherence/zeno.py index 42561b60c..50b4e7eb4 100644 --- a/src/qibocal/protocols/coherence/zeno.py +++ b/src/qibocal/protocols/coherence/zeno.py @@ -3,40 +3,67 @@ import numpy as np import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, PulseSequence, Readout -from qibocal.auto.operation import Routine +from qibocal import update +from qibocal.auto.operation import Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform -from ..utils import table_dict, table_html +from ...result import probability +from ..utils import COLORBAND, COLORBAND_LINE, table_dict, table_html from . import t1, utils -from .zeno_signal import ZenoSignalParameters, ZenoSignalResults, _update @dataclass -class ZenoParameters(ZenoSignalParameters): +class ZenoParameters(Parameters): """Zeno runcard inputs.""" - -@dataclass -class ZenoData(t1.T1Data): - readout_duration: dict[QubitId, float] = field(default_factory=dict) - """Readout durations for each qubit""" + readouts: int + "Number of readout pulses" @dataclass -class ZenoResults(ZenoSignalResults): +class ZenoResults(Results): """Zeno outputs.""" + zeno_t1: dict[QubitId, int] + """T1 for each qubit.""" + fitted_parameters: dict[QubitId, dict[str, float]] + """Raw fitting output.""" + pcov: dict[QubitId, list[float]] + """Approximate covariance of fitted parameters.""" chi2: dict[QubitId, tuple[float, Optional[float]]] """Chi squared estimate mean value and error.""" +def zeno_sequence( + platform: CalibrationPlatform, targets: list[QubitId], readouts: int +) -> tuple[PulseSequence, dict[QubitId, int]]: + """Generating sequence for Zeno experiment.""" + + sequence = PulseSequence() + readout_duration = {} + for q in targets: + natives = platform.natives.single_qubit[q] + _, ro_pulse = natives.MZ()[0] + readout_duration[q] = ro_pulse.duration + qubit_sequence = natives.RX() | natives.MZ() + for _ in range(readouts - 1): + qubit_sequence += natives.MZ() + sequence += qubit_sequence + + return sequence, readout_duration + + +@dataclass +class ZenoData(t1.T1Data): + readout_duration: dict[QubitId, float] = field(default_factory=dict) + """Readout durations for each qubit""" + + def _acquisition( params: ZenoParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> ZenoData: """ @@ -48,50 +75,39 @@ def _acquisition( Reference: https://link.aps.org/accepted/10.1103/PhysRevLett.118.240401. """ - # create sequence of pulses: - sequence = PulseSequence() - RX_pulses = {} - ro_pulses = {} - ro_pulse_duration = {} - for qubit in targets: - RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - sequence.add(RX_pulses[qubit]) - start = RX_pulses[qubit].finish - ro_pulses[qubit] = [] - for _ in range(params.readouts): - ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start) - start += ro_pulse.duration - sequence.add(ro_pulse) - ro_pulses[qubit].append(ro_pulse) - ro_pulse_duration[qubit] = ro_pulse.duration - - # create a DataUnits object to store the results - data = ZenoData(readout_duration=ro_pulse_duration) - - # execute the first pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), + sequence, readout_duration = zeno_sequence( + platform, targets, readouts=params.readouts + ) + data = ZenoData(readout_duration=readout_duration) + + results = platform.execute( + [sequence], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) - # retrieve and store the results for every qubit - probs = {qubit: [] for qubit in targets} for qubit in targets: - for ro_pulse in ro_pulses[qubit]: - probs[qubit].append(results[ro_pulse.serial].probability(state=1)) - errors = [np.sqrt(prob * (1 - prob) / params.nshots) for prob in probs[qubit]] + probs = [] + readouts = [ + pulse + for pulse in sequence.channel(platform.qubits[qubit].acquisition) + if isinstance(pulse, Readout) + ] + for i in range(params.readouts): + ro_pulse = readouts[i] + probs.append(probability(results[ro_pulse.id], state=1)) + data.register_qubit( t1.CoherenceProbType, (qubit), dict( - wait=np.arange(1, len(probs[qubit]) + 1), - prob=probs[qubit], - error=errors, + wait=np.arange(params.readouts) + 1, + prob=np.array(probs), + error=np.array( + [np.sqrt(prob * (1 - prob) / params.nshots) for prob in probs] + ), ), ) return data @@ -117,7 +133,7 @@ def _plot(data: ZenoData, fit: ZenoResults, target: QubitId): qubit_data = data[target] probs = qubit_data.prob error_bars = qubit_data.error - readouts = np.arange(1, len(qubit_data.prob) + 1) + readouts = qubit_data.wait fig = go.Figure( [ @@ -134,8 +150,8 @@ def _plot(data: ZenoData, fit: ZenoResults, target: QubitId): x=np.concatenate((readouts, readouts[::-1])), y=np.concatenate((probs + error_bars, (probs - error_bars)[::-1])), fill="toself", - fillcolor=t1.COLORBAND, - line=dict(color=t1.COLORBAND_LINE), + fillcolor=COLORBAND, + line=dict(color=COLORBAND_LINE), showlegend=True, name="Errors", ), @@ -163,8 +179,8 @@ def _plot(data: ZenoData, fit: ZenoResults, target: QubitId): target, ["T1 [ns]", "Readout Pulse [ns]", "chi2 reduced"], [ - fit.zeno_t1[target], np.array(fit.zeno_t1[target]) * data.readout_duration[target], + (data.readout_duration[target], 0), fit.chi2[target], ], display_error=True, @@ -184,4 +200,8 @@ def _plot(data: ZenoData, fit: ZenoResults, target: QubitId): return figures, fitting_report +def _update(results: ZenoResults, platform: CalibrationPlatform, qubit: QubitId): + update.t1(results.zeno_t1[qubit], platform, qubit) + + zeno = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/coherence/zeno_signal.py b/src/qibocal/protocols/coherence/zeno_signal.py deleted file mode 100644 index 1d0240eeb..000000000 --- a/src/qibocal/protocols/coherence/zeno_signal.py +++ /dev/null @@ -1,194 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine - -from ..utils import table_dict, table_html -from . import utils - - -@dataclass -class ZenoSignalParameters(Parameters): - """Zeno runcard inputs.""" - - readouts: int - "Number of readout pulses" - - -ZenoSignalType = np.dtype([("signal", np.float64), ("phase", np.float64)]) -"""Custom dtype for Zeno.""" - - -@dataclass -class ZenoSignalData(Data): - - readout_duration: dict[QubitId, float] = field(default_factory=dict) - """Readout durations for each qubit""" - data: dict[QubitId, npt.NDArray] = field(default_factory=dict) - """Raw data acquired.""" - - def register_qubit(self, qubit, signal, phase): - """Store output for single qubit.""" - ar = np.empty((1,), dtype=ZenoSignalType) - ar["signal"] = signal - ar["phase"] = phase - if qubit in self.data: - self.data[qubit] = np.rec.array(np.concatenate((self.data[qubit], ar))) - else: - self.data[qubit] = np.rec.array(ar) - - -@dataclass -class ZenoSignalResults(Results): - """Zeno outputs.""" - - zeno_t1: dict[QubitId, int] - """T1 for each qubit.""" - fitted_parameters: dict[QubitId, dict[str, float]] - """Raw fitting output.""" - pcov: dict[QubitId, list[float]] - """Approximate covariance of fitted parameters.""" - - -def _acquisition( - params: ZenoSignalParameters, - platform: Platform, - targets: list[QubitId], -) -> ZenoSignalData: - """ - In a T1_Zeno experiment, we measure an excited qubit repeatedly. Due to decoherence processes, - it is possible that, at the time of measurement, the qubit will not be excited anymore. - The quantum zeno effect consists of measuring allowing a particle's time evolution to be slowed - down by measuring it frequently enough. However, in the experiments we see that due the QND-ness of the readout - pulse that the qubit decoheres faster. - Reference: https://link.aps.org/accepted/10.1103/PhysRevLett.118.240401. - """ - - # create sequence of pulses: - sequence = PulseSequence() - RX_pulses = {} - ro_pulses = {} - ro_pulse_duration = {} - for qubit in targets: - RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - sequence.add(RX_pulses[qubit]) - start = RX_pulses[qubit].finish - ro_pulses[qubit] = [] - for _ in range(params.readouts): - ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start) - start += ro_pulse.duration - sequence.add(ro_pulse) - ro_pulses[qubit].append(ro_pulse) - ro_pulse_duration[qubit] = ro_pulse.duration - - # create a DataUnits object to store the results - data = ZenoSignalData(readout_duration=ro_pulse_duration) - - # execute the first pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - - # retrieve and store the results for every qubit - for qubit in targets: - for ro_pulse in ro_pulses[qubit]: - result = results[ro_pulse.serial] - data.register_qubit( - qubit=qubit, signal=result.magnitude, phase=result.phase - ) - return data - - -def _fit(data: ZenoSignalData) -> ZenoSignalResults: - """ - Fitting routine for T1 experiment. The used model is - - .. math:: - - y = p_0-p_1 e^{-x p_2}. - """ - - t1s, fitted_parameters, pcovs = utils.exponential_fit(data, zeno=True) - - return ZenoSignalResults(t1s, fitted_parameters, pcovs) - - -def _plot(data: ZenoSignalData, fit: ZenoSignalResults, target: QubitId): - """Plotting function for T1 experiment.""" - figures = [] - fig = go.Figure() - - fitting_report = "" - qubit_data = data[target] - readouts = np.arange(1, len(qubit_data.signal) + 1) - - fig.add_trace( - go.Scatter( - x=readouts, - y=qubit_data.signal, - opacity=1, - name="Signal", - showlegend=True, - legendgroup="Signal", - ) - ) - - if fit is not None: - fitting_report = "" - waitrange = np.linspace( - min(readouts), - max(readouts), - 2 * len(qubit_data), - ) - params = fit.fitted_parameters[target] - fig.add_trace( - go.Scatter( - x=waitrange, - y=utils.exp_decay(waitrange, *params), - name="Fit", - line=go.scatter.Line(dash="dot"), - ) - ) - fitting_report = table_html( - table_dict( - target, - ["T1", "Readout Pulse"], - [ - np.round(fit.zeno_t1[target]), - np.round(fit.zeno_t1[target] * data.readout_duration[target]), - ], - ) - ) - # FIXME: Pulse duration (+ time of flight ?) - - # last part - fig.update_layout( - showlegend=True, - xaxis_title="Number of readouts", - yaxis_title="Signal [a.u.]", - ) - - figures.append(fig) - - return figures, fitting_report - - -def _update(results: ZenoSignalResults, platform: Platform, qubit: QubitId): - update.t1(results.zeno_t1[qubit], platform, qubit) - - -zeno_signal = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/couplers/__init__.py b/src/qibocal/protocols/couplers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/qibocal/protocols/couplers/coupler_chevron.py b/src/qibocal/protocols/couplers/coupler_chevron.py deleted file mode 100644 index dc69a71a9..000000000 --- a/src/qibocal/protocols/couplers/coupler_chevron.py +++ /dev/null @@ -1,141 +0,0 @@ -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence, PulseType -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Results, Routine - -from ..two_qubit_interaction.chevron.chevron import ( - ChevronData, - ChevronParameters, - _plot, -) -from ..two_qubit_interaction.utils import order_pair - - -def _acquisition( - params: ChevronParameters, - platform: Platform, - targets: list[QubitPairId], -) -> ChevronData: - r""" - Perform an CZ experiment between pairs of qubits by changing the coupler state, - qubits need to be pulses into their interaction point. - - Args: - platform: Platform to use. - params: Experiment parameters. - targets (list): List of pairs to use sequentially. - - Returns: - ChevronData: Acquisition data. - """ - # define the parameter to sweep and its range: - delta_amplitude_range = np.arange( - params.amplitude_min_factor, - params.amplitude_max_factor, - params.amplitude_step_factor, - ) - delta_duration_range = np.arange( - params.duration_min, params.duration_max, params.duration_step - ) - - data = ChevronData() - for pair in targets: - sequence = PulseSequence() - - ordered_pair = order_pair(pair, platform) - - # initialize system to state 11(CZ) or 10(iSWAP) - if params.native == "CZ": - initialize_lowfreq = platform.create_RX_pulse(ordered_pair[0], start=0) - sequence.add(initialize_lowfreq) - - initialize_highfreq = platform.create_RX_pulse(ordered_pair[1], start=0) - - sequence.add(initialize_highfreq) - - if params.native == "CZ": - native_gate, _ = platform.create_CZ_pulse_sequence( - (ordered_pair[1], ordered_pair[0]), - start=sequence.finish + params.dt, - ) - elif params.native == "iSWAP": - native_gate, _ = platform.create_iSWAP_pulse_sequence( - (ordered_pair[1], ordered_pair[0]), - start=sequence.finish + params.dt, - ) - data.native_amplitude[ordered_pair] = getattr( - native_gate.coupler_pulses(*pair)[:1][0], "amplitude" - ) - sequence.add(native_gate) - - ro_pulse1 = platform.create_MZ_pulse( - ordered_pair[1], start=sequence.finish + params.dt - ) - ro_pulse2 = platform.create_MZ_pulse( - ordered_pair[0], start=sequence.finish + params.dt - ) - - sequence += ro_pulse1 + ro_pulse2 - - coupler_flux_pulses = [p for p in native_gate.coupler_pulses(*pair)] - assert ( - len(coupler_flux_pulses) == 1 - ), f"coupler_chevron expects exactly one coupler flux pulse, but {len(coupler_flux_pulses)} are present." - qubit_flux_pulses = [ - p for p in native_gate.get_qubit_pulses(*pair) if p.type is PulseType.FLUX - ] - assert all( - len(list(filter(lambda x: x.qubit == q, qubit_flux_pulses))) < 2 - for q in pair - ), f"coupler_chevron expects no more than 1 flux pulse for each qubit, but more are present for the pair {pair}" - sweeper_amplitude = Sweeper( - Parameter.amplitude, - delta_amplitude_range, - pulses=coupler_flux_pulses, - type=SweeperType.FACTOR, - ) - sweeper_duration = Sweeper( - Parameter.duration, - delta_duration_range, - pulses=coupler_flux_pulses + qubit_flux_pulses, - ) - - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_duration, - sweeper_amplitude, - ) - - data.register_qubit( - ordered_pair[0], - ordered_pair[1], - delta_duration_range, - delta_amplitude_range * data.native_amplitude[ordered_pair], - results[ordered_pair[0]].probability(state=1), - results[ordered_pair[1]].probability(state=1), - ) - data.label = "Probability of state |1>" - - return data - - -def _fit(data: ChevronData) -> Results: - """Results for ChevronCouplers.""" - return Results() - - -def plot(data: ChevronData, fit: Results, target): - return _plot(data, None, target) - - -coupler_chevron = Routine(_acquisition, _fit, plot, two_qubit_gates=True) -"""Coupler cz/swap flux routine.""" diff --git a/src/qibocal/protocols/couplers/coupler_qubit_spectroscopy.py b/src/qibocal/protocols/couplers/coupler_qubit_spectroscopy.py deleted file mode 100644 index 8b5cb1c15..000000000 --- a/src/qibocal/protocols/couplers/coupler_qubit_spectroscopy.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional - -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Routine - -from ..two_qubit_interaction.utils import order_pair -from .coupler_resonator_spectroscopy import _fit, _plot, _update -from .utils import CouplerSpectroscopyData, CouplerSpectroscopyParameters - - -class CouplerSpectroscopyParametersQubit(CouplerSpectroscopyParameters): - drive_duration: Optional[int] = 2000 - """Drive pulse duration to excite the qubit before the measurement""" - - -def _acquisition( - params: CouplerSpectroscopyParametersQubit, - platform: Platform, - targets: list[QubitPairId], -) -> CouplerSpectroscopyData: - """ - Data acquisition for CouplerQubit spectroscopy. - - This consist on a frequency sweep on the qubit frequency while we change the flux coupler pulse amplitude of - the coupler pulse. We expect to enable the coupler during the amplitude sweep and detect an avoided crossing - that will be followed by the frequency sweep. This needs the qubits at resonance, the routine assumes a sweetspot - value for the higher frequency qubit that moves it to the lower frequency qubit instead of trying to calibrate both pulses at once. This should be run after - qubit_spectroscopy to further adjust the coupler sweetspot if needed and get some information - on the flux coupler pulse amplitude requiered to enable 2q interactions. - - """ - - # TODO: Do we want to measure both qubits on the pair ? - - # create a sequence of pulses for the experiment: - # Coupler pulse while Drive pulse - MZ - - if params.measured_qubits is None: - params.measured_qubits = [order_pair(pair, platform)[0] for pair in targets] - - sequence = PulseSequence() - ro_pulses = {} - qd_pulses = {} - offset = {} - couplers = [] - for i, pair in enumerate(targets): - ordered_pair = order_pair(pair, platform) - measured_qubit = params.measured_qubits[i] - - qubit = platform.qubits[measured_qubit].name - offset[qubit] = platform.pairs[tuple(sorted(ordered_pair))].coupler.sweetspot - coupler = platform.pairs[tuple(sorted(ordered_pair))].coupler.name - couplers.append(coupler) - - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=params.drive_duration - ) - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.drive_duration - ) - if params.amplitude is not None: - qd_pulses[qubit].amplitude = params.amplitude - - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - - sweeper_freq = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in params.measured_qubits], - type=SweeperType.OFFSET, - ) - - delta_bias_range = np.arange( - -params.bias_width / 2, params.bias_width / 2, params.bias_step - ) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=couplers, - type=SweeperType.OFFSET, - ) - ] - - data = CouplerSpectroscopyData( - resonator_type=platform.resonator_type, - offset=offset, - ) - - for bias_sweeper in sweepers: - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - bias_sweeper, - sweeper_freq, - ) - - # retrieve the results for every qubit - for i, pair in enumerate(targets): - # TODO: May measure both qubits on the pair - qubit = platform.qubits[params.measured_qubits[i]].name - result = results[ro_pulses[qubit].serial] - # store the results - data.register_qubit( - qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - bias=delta_bias_range, - ) - return data - - -coupler_qubit_spectroscopy = Routine(_acquisition, _fit, _plot, _update) -"""CouplerQubitSpectroscopy Routine object.""" diff --git a/src/qibocal/protocols/couplers/coupler_resonator_spectroscopy.py b/src/qibocal/protocols/couplers/coupler_resonator_spectroscopy.py deleted file mode 100644 index 9848a2a8c..000000000 --- a/src/qibocal/protocols/couplers/coupler_resonator_spectroscopy.py +++ /dev/null @@ -1,172 +0,0 @@ -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Routine - -from ..flux_dependence.utils import flux_dependence_plot -from ..two_qubit_interaction.utils import order_pair -from .utils import ( - CouplerSpectroscopyData, - CouplerSpectroscopyParameters, - CouplerSpectroscopyResults, -) - - -def _acquisition( - params: CouplerSpectroscopyParameters, - platform: Platform, - targets: list[QubitPairId], -) -> CouplerSpectroscopyData: - """ - Data acquisition for CouplerResonator spectroscopy. - - This consist on a frequency sweep on the readout frequency while we change the flux coupler pulse amplitude of - the coupler pulse. We expect to enable the coupler during the amplitude sweep and detect an avoided crossing - that will be followed by the frequency sweep. No need to have the qubits at resonance. This should be run after - resonator_spectroscopy to detect couplers and adjust the coupler sweetspot if needed and get some information - on the flux coupler pulse amplitude requiered to enable 2q interactions. - - """ - - # TODO: Do we want to measure both qubits on the pair ? - - # create a sequence of pulses for the experiment: - # Coupler pulse while MZ - - if params.measured_qubits is None: - params.measured_qubits = [order_pair(pair, platform)[0] for pair in targets] - - sequence = PulseSequence() - ro_pulses = {} - offset = {} - couplers = [] - for i, pair in enumerate(targets): - ordered_pair = order_pair(pair, platform) - measured_qubit = params.measured_qubits[i] - - qubit = platform.qubits[measured_qubit].name - offset[qubit] = platform.pairs[tuple(sorted(ordered_pair))].coupler.sweetspot - coupler = platform.pairs[tuple(sorted(ordered_pair))].coupler.name - couplers.append(coupler) - # TODO: May measure both qubits on the pair - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) - if params.amplitude is not None: - ro_pulses[qubit].amplitude = params.amplitude - - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - - sweeper_freq = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[ro_pulses[qubit] for qubit in params.measured_qubits], - type=SweeperType.OFFSET, - ) - - delta_bias_range = np.arange( - -params.bias_width / 2, params.bias_width / 2, params.bias_step - ) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=couplers, - type=SweeperType.OFFSET, - ) - ] - - data = CouplerSpectroscopyData( - resonator_type=platform.resonator_type, - offset=offset, - ) - - for bias_sweeper in sweepers: - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - bias_sweeper, - sweeper_freq, - ) - - # retrieve the results for every qubit - for i, pair in enumerate(targets): - # TODO: May measure both qubits on the pair - qubit = platform.qubits[params.measured_qubits[i]].name - result = results[ro_pulses[qubit].serial] - # store the results - data.register_qubit( - qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + ro_pulses[qubit].frequency, - bias=delta_bias_range, - ) - return data - - -def _fit(data: CouplerSpectroscopyData) -> CouplerSpectroscopyResults: - """Post-processing function for CouplerResonatorSpectroscopy.""" - qubits = data.qubits - pulse_amp = {} - sweetspot = {} - fitted_parameters = {} - - # TODO: Implement fit - """It should get two things: - Coupler sweetspot: the value that makes both features centered and symmetric - Pulse_amp: That turn on the feature taking into account the shift introduced by the coupler sweetspot - """ - - return CouplerSpectroscopyResults( - pulse_amp=pulse_amp, - sweetspot=sweetspot, - fitted_parameters=fitted_parameters, - ) - - -def _plot( - data: CouplerSpectroscopyData, - target: QubitPairId, - fit: CouplerSpectroscopyResults, -): - """ - We may want to measure both qubits on the pair, - that will require a different plotting that takes both. - """ - qubit_pair = target # TODO: Patch for 2q gate routines - - for qubit in qubit_pair: - if qubit in data.data.keys(): - fig = flux_dependence_plot(data, fit, qubit)[0] - - fig.layout.annotations[0].update( - text="Signal [a.u.] Qubit" + str(qubit), - ) - fig.layout.annotations[1].update( - text="Phase [rad] Qubit" + str(qubit), - ) - - return [fig], "" - - -def _update( - results: CouplerSpectroscopyResults, platform: Platform, target: QubitPairId -): - pass - - -coupler_resonator_spectroscopy = Routine(_acquisition, _fit, _plot, _update) -"""CouplerResonatorSpectroscopy Routine object.""" diff --git a/src/qibocal/protocols/couplers/utils.py b/src/qibocal/protocols/couplers/utils.py deleted file mode 100644 index 9e2648dce..000000000 --- a/src/qibocal/protocols/couplers/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional, Union - -import numpy as np -import numpy.typing as npt -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Results - -from ..flux_dependence.resonator_flux_dependence import ResonatorFluxParameters -from ..flux_dependence.utils import create_data_array - - -@dataclass -class CouplerSpectroscopyParameters(ResonatorFluxParameters): - """CouplerResonatorSpectroscopy and CouplerQubitSpectroscopy runcard inputs.""" - - measured_qubits: Optional[list[QubitId]] = None - """Qubit to measure from the pair""" - amplitude: Optional[Union[int, float]] = None - """Readout or qubit drive amplitude (optional). If defined, same amplitude will be used in all qubits. - Otherwise the default amplitude defined on the platform runcard will be used""" - - -CouplerSpecType = np.dtype( - [ - ("freq", np.float64), - ("bias", np.float64), - ("signal", np.float64), - ("phase", np.float64), - ] -) -"""Custom dtype for coupler resonator spectroscopy.""" - - -@dataclass -class CouplerSpectroscopyResults(Results): - """CouplerResonatorSpectroscopy or CouplerQubitSpectroscopy outputs.""" - - sweetspot: dict[QubitId, float] - """Sweetspot for each coupler.""" - pulse_amp: dict[QubitId, float] - """Pulse amplitude for the coupler.""" - fitted_parameters: dict[QubitId, dict[str, float]] - """Raw fitted parameters.""" - - -@dataclass -class CouplerSpectroscopyData(Data): - """Data structure for CouplerResonatorSpectroscopy or CouplerQubitSpectroscopy.""" - - resonator_type: str - """Resonator type.""" - offset: dict[QubitId, float] = field(default_factory=dict) - """Qubit bias offset.""" - data: dict[QubitId, npt.NDArray[CouplerSpecType]] = field(default_factory=dict) - """Raw data acquired.""" - - def register_qubit(self, qubit, freq, bias, signal, phase): - """Store output for single qubit.""" - self.data[qubit] = create_data_array( - freq, bias, signal, phase, dtype=CouplerSpecType - ) diff --git a/src/qibocal/protocols/dispersive_shift.py b/src/qibocal/protocols/dispersive_shift.py index a022709ec..3a63792f8 100644 --- a/src/qibocal/protocols/dispersive_shift.py +++ b/src/qibocal/protocols/dispersive_shift.py @@ -4,14 +4,11 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, PulseSequence, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.utils import ( HZ_TO_GHZ, lorentzian, @@ -20,6 +17,8 @@ table_html, ) +from ..result import magnitude, phase, unpack + @dataclass class DispersiveShiftParameters(Parameters): @@ -74,7 +73,9 @@ class DispersiveShiftData(Data): def _acquisition( - params: DispersiveShiftParameters, platform: Platform, targets: list[QubitId] + params: DispersiveShiftParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> DispersiveShiftData: r""" Data acquisition for dispersive shift experiment. @@ -83,75 +84,57 @@ def _acquisition( Args: params (DispersiveShiftParameters): experiment's parameters - platform (Platform): Qibolab platform object + platform (CalibrationPlatform): Qibolab platform object targets (list): list of target qubits to perform the action - """ - # create 2 sequences of pulses for the experiment: - # sequence_0: I - MZ - # sequence_1: RX - MZ - - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel sequence_0 = PulseSequence() sequence_1 = PulseSequence() - ro_pulses = {} - qd_pulses = {} for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].duration - ) - sequence_0.add(ro_pulses[qubit]) - sequence_1.add(qd_pulses[qubit]) - sequence_1.add(ro_pulses[qubit]) + natives = platform.natives.single_qubit[qubit] + sequence_0 += natives.MZ() + sequence_1 += natives.RX() | natives.MZ() - # define the parameter to sweep and its range: delta_frequency_range = np.arange( -params.freq_width / 2, params.freq_width / 2, params.freq_step ) - # create a DataUnits objects to store the results data = DispersiveShiftData(resonator_type=platform.resonator_type) - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - execution_pars = ExecutionParameters( + sweepers = [ + Sweeper( + parameter=Parameter.frequency, + values=platform.config(platform.qubits[q].probe).frequency + + delta_frequency_range, + channels=[platform.qubits[q].probe], + ) + for q in targets + ] + + results = platform.execute( + [sequence_0, sequence_1], + [sweepers], nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, averaging_mode=AveragingMode.CYCLIC, ) - results_0 = platform.sweep( - sequence_0, - execution_pars, - sweeper, - ) - - results_1 = platform.sweep( - sequence_1, - execution_pars, - sweeper, - ) - # retrieve the results for every qubit for qubit in targets: - for i, results in enumerate([results_0, results_1]): - result = results[ro_pulses[qubit].serial].average - # store the results + ro_frequency = platform.config(platform.qubits[qubit].probe).frequency + for state, sequence in enumerate([sequence_0, sequence_1]): + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] + i, q = unpack(result) data.register_qubit( DispersiveShiftType, - (qubit, i), + (qubit, state), dict( - freq=ro_pulses[qubit].frequency + delta_frequency_range, - signal=result.magnitude, - phase=result.phase, - i=result.voltage_i, - q=result.voltage_q, + freq=ro_frequency + delta_frequency_range, + signal=magnitude(result), + phase=phase(result), + i=i, + q=q, ), ) return data @@ -335,12 +318,23 @@ def _plot(data: DispersiveShiftData, target: QubitId, fit: DispersiveShiftResult return figures, fitting_report -def _update(results: DispersiveShiftResults, platform: Platform, target: QubitId): +def _update( + results: DispersiveShiftResults, platform: CalibrationPlatform, target: QubitId +): update.readout_frequency(results.best_freq[target], platform, target) if results.frequencies[target] is not None: - delta = platform.qubits[target].drive_frequency - results.frequencies[target][0] + delta = ( + platform.calibration.single_qubits[target].qubit.frequency_01 + - results.frequencies[target][0] + ) g = np.sqrt(np.abs(results.chi(target) * delta)) update.coupling(g, platform, target) + update.dressed_resonator_frequency( + results.frequencies[target][0], platform, target + ) + platform.calibration.single_qubits[target].readout.qudits_frequency[1] = ( + results.frequencies[target][1] + ) dispersive_shift = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/dispersive_shift_qutrit.py b/src/qibocal/protocols/dispersive_shift_qutrit.py index 79545826c..0ce0f09c8 100644 --- a/src/qibocal/protocols/dispersive_shift_qutrit.py +++ b/src/qibocal/protocols/dispersive_shift_qutrit.py @@ -1,16 +1,12 @@ -from copy import deepcopy from dataclasses import asdict, dataclass import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, PulseSequence, Sweeper -from qibocal.auto.operation import Results, Routine +from qibocal.auto.operation import QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.utils import ( GHZ_TO_HZ, HZ_TO_GHZ, @@ -20,6 +16,7 @@ table_html, ) +from ..result import magnitude, phase from .dispersive_shift import DispersiveShiftData, DispersiveShiftParameters from .resonator_spectroscopy import ResSpecType @@ -68,40 +65,37 @@ class DispersiveShiftQutritData(DispersiveShiftData): def _acquisition( - params: DispersiveShiftParameters, platform: Platform, targets: list[QubitId] + params: DispersiveShiftParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> DispersiveShiftQutritData: r""" - Data acquisition for dispersive shift experiment. - Perform spectroscopy on the readout resonator, with the qubit in ground and excited state, showing - the resonator shift produced by the coupling between the resonator and the qubit. + Data acquisition for dispersive shift qutrit experiment. + Perform spectroscopy on the readout resonator, with the qubit in ground, excited state and + second excited state showing the resonator shift produced by the coupling between the resonator and the qubit. Args: params (DispersiveShiftParameters): experiment's parameters - platform (Platform): Qibolab platform object + platform (CalibrationPlatform): Qibolab platform object targets (list): list of target qubits to perform the action """ - # create 3 sequences of pulses for the experiment: - # sequence_0: I - MZ - # sequence_1: RX - MZ - # sequence_2: RX - RX12 - MZ - - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel sequence_0 = PulseSequence() sequence_1 = PulseSequence() sequence_2 = PulseSequence() for qubit in targets: - rx_pulse = platform.create_RX_pulse(qubit, start=0) - rx_12_pulse = platform.create_RX12_pulse(qubit, start=rx_pulse.finish) - ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) - sequence_1.add(rx_pulse) - sequence_2.add(rx_pulse, rx_12_pulse) - for sequence in [sequence_0, sequence_1, sequence_2]: - readout_pulse = deepcopy(ro_pulse) - readout_pulse.start = sequence.qd_pulses.finish - sequence.add(readout_pulse) + natives = platform.natives.single_qubit[qubit] + # prepare 0 and measure + sequence_0 += natives.MZ() + + # prepare 1 and measure + sequence_1 += natives.RX() | natives.MZ() + + # prepare 2 and measure + assert natives.RX12 is not None, f"Missing RX12 calibration for qubit {qubit}" + sequence_2 += (natives.RX() + natives.RX12()) | natives.MZ() # define the parameter to sweep and its range: delta_frequency_range = np.arange( @@ -110,36 +104,37 @@ def _acquisition( data = DispersiveShiftQutritData(resonator_type=platform.resonator_type) - for state, sequence in enumerate([sequence_0, sequence_1, sequence_2]): - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=list(sequence.ro_pulses), - type=SweeperType.OFFSET, - ) - - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, + sweepers = [ + Sweeper( + parameter=Parameter.frequency, + values=platform.config(platform.qubits[q].probe).frequency + + delta_frequency_range, + channels=[platform.qubits[q].probe], ) + for q in targets + ] + + results = platform.execute( + [sequence_0, sequence_1, sequence_2], + [sweepers], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, + ) - for qubit in targets: - result = results[qubit].average - # store the results + for qubit in targets: + ro_frequency = platform.config(platform.qubits[qubit].probe).frequency + for state, sequence in enumerate([sequence_0, sequence_1, sequence_2]): + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] data.register_qubit( ResSpecType, (qubit, state), dict( - freq=sequence.get_qubit_pulses(qubit).ro_pulses[0].frequency - + delta_frequency_range, - signal=result.magnitude, - phase=result.phase, + freq=ro_frequency + delta_frequency_range, + signal=magnitude(result), + phase=phase(result), ), ) diff --git a/src/qibocal/protocols/drag.py b/src/qibocal/protocols/drag.py index d915d78bd..4afce36b4 100644 --- a/src/qibocal/protocols/drag.py +++ b/src/qibocal/protocols/drag.py @@ -4,20 +4,19 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, Delay, Drag, PulseSequence from scipy.optimize import curve_fit from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log +from qibocal.result import probability +from qibocal.update import replace from .utils import ( COLORBAND, COLORBAND_LINE, - HZ_TO_GHZ, chi2_reduced, fallback_period, guess_period, @@ -25,9 +24,8 @@ table_html, ) -# TODO: add errors in fitting - +# TODO: add errors in fitting @dataclass class DragTuningParameters(Parameters): """DragTuning runcard inputs.""" @@ -41,6 +39,8 @@ class DragTuningParameters(Parameters): unrolling: bool = False """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. Defaults to ``False``.""" + nflips: int = 1 + """Repetitions of (Xpi - Xmpi).""" @dataclass @@ -64,15 +64,13 @@ class DragTuningResults(Results): class DragTuningData(Data): """DragTuning acquisition outputs.""" - anharmonicity: dict[QubitId, float] = field(default_factory=dict) - """Anharmonicity of each qubit.""" data: dict[QubitId, npt.NDArray[DragTuningType]] = field(default_factory=dict) """Raw data acquired.""" def _acquisition( params: DragTuningParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> DragTuningData: r""" @@ -80,72 +78,91 @@ def _acquisition( See https://arxiv.org/pdf/1504.06597.pdf Fig. 2 (c). """ - data = DragTuningData( - anharmonicity={ - qubit: platform.qubits[qubit].anharmonicity * HZ_TO_GHZ for qubit in targets - } - ) - # define the parameter to sweep and its range: - # qubit drive DRAG pulse beta parameter + data = DragTuningData() beta_param_range = np.arange(params.beta_start, params.beta_end, params.beta_step) sequences, all_ro_pulses = [], [] for beta_param in beta_param_range: sequence = PulseSequence() ro_pulses = {} - for qubit in targets: - RX_drag_pulse = platform.create_RX_drag_pulse( - qubit, start=0, beta=beta_param / data.anharmonicity[qubit] - ) - RX_drag_pulse_minus = platform.create_RX_drag_pulse( - qubit, - start=RX_drag_pulse.finish, - beta=beta_param / data.anharmonicity[qubit], - relative_phase=np.pi, + for q in targets: + natives = platform.natives.single_qubit[q] + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + drag = replace( + qd_pulse, + envelope=Drag( + rel_sigma=qd_pulse.envelope.rel_sigma, + beta=beta_param, + ), ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX_drag_pulse_minus.finish + drag_negative = replace(drag, relative_phase=np.pi) + + for _ in range(params.nflips): + sequence.append((qd_channel, drag)) + sequence.append((qd_channel, drag_negative)) + sequence.append( + ( + ro_channel, + Delay( + duration=params.nflips + * (drag.duration + drag_negative.duration) + ), + ) ) + sequence.append((ro_channel, ro_pulse)) - sequence.add(RX_drag_pulse) - sequence.add(RX_drag_pulse_minus) - sequence.add(ro_pulses[qubit]) sequences.append(sequence) - all_ro_pulses.append(ro_pulses) + all_ro_pulses.append( + { + qubit: list(sequence.channel(platform.qubits[q].acquisition))[-1] + for qubit in targets + } + ) + + options = { + "nshots": params.nshots, + "relaxation_time": params.relaxation_time, + "acquisition_type": AcquisitionType.DISCRIMINATION, + "averaging_mode": AveragingMode.SINGLESHOT, + } - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ) # execute the pulse sequence if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) - - elif not params.unrolling: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] - - for ig, (beta, ro_pulses) in enumerate(zip(beta_param_range, all_ro_pulses)): - for qubit in targets: - serial = ro_pulses[qubit].serial - if params.unrolling: - result = results[serial][ig] - else: - result = results[ig][serial] - prob = result.probability(state=0) - # store the results - data.register_qubit( - DragTuningType, - (qubit), - dict( - prob=np.array([prob]), - error=np.array([np.sqrt(prob * (1 - prob) / params.nshots)]), - beta=np.array([beta]), - ), - ) + results = platform.execute(sequences, **options) + for beta, ro_pulses in zip(beta_param_range, all_ro_pulses): + for qubit in targets: + result = results[ro_pulses[qubit].id] + prob = probability(result, state=0) + # store the results + data.register_qubit( + DragTuningType, + (qubit), + dict( + prob=np.array([prob]), + error=np.array([np.sqrt(prob * (1 - prob) / params.nshots)]), + beta=np.array([beta]), + ), + ) + else: + for i, sequence in enumerate(sequences): + result = platform.execute([sequence], **options) + for qubit in targets: + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[ + -1 + ] + prob = probability(result[ro_pulse.id], state=0) + # store the results + data.register_qubit( + DragTuningType, + (qubit), + dict( + prob=np.array([prob]), + error=np.array([np.sqrt(prob * (1 - prob) / params.nshots)]), + beta=np.array([beta_param_range[i]]), + ), + ) return data @@ -162,22 +179,27 @@ def _fit(data: DragTuningData) -> DragTuningResults: for qubit in qubits: qubit_data = data[qubit] - beta_params = qubit_data.beta + + # normalize prob prob = qubit_data.prob + prob_min = np.min(prob) + prob_max = np.max(prob) + normalized_prob = (prob - prob_min) / (prob_max - prob_min) + # normalize beta + beta_params = qubit_data.beta beta_min = np.min(beta_params) beta_max = np.max(beta_params) normalized_beta = (beta_params - beta_min) / (beta_max - beta_min) # Guessing period using fourier transform - period = fallback_period(guess_period(normalized_beta, prob)) + period = fallback_period(guess_period(normalized_beta, normalized_prob)) pguess = [0.5, 0.5, period, 0] - try: popt, _ = curve_fit( drag_fit, normalized_beta, - prob, + normalized_prob, p0=pguess, maxfev=100000, bounds=( @@ -187,8 +209,8 @@ def _fit(data: DragTuningData) -> DragTuningResults: sigma=qubit_data.error, ) translated_popt = [ - popt[0], - popt[1], + popt[0] * (prob_max - prob_min) + prob_min, + popt[1] * (prob_max - prob_min), popt[2] * (beta_max - beta_min), popt[3] - 2 * np.pi * beta_min / popt[2] / (beta_max - beta_min), ] @@ -280,17 +302,12 @@ def _plot(data: DragTuningData, target: QubitId, fit: DragTuningResults): return figures, fitting_report -def _update(results: DragTuningResults, platform: Platform, target: QubitId): - try: - update.drag_pulse_beta( - results.betas[target] / platform.qubits[target].anharmonicity / HZ_TO_GHZ, - platform, - target, - ) - except ZeroDivisionError: - log.warning( - f"Beta parameter cannot be updated since the anharmoncity for qubit {target} is 0." - ) +def _update(results: DragTuningResults, platform: CalibrationPlatform, target: QubitId): + update.drag_pulse_beta( + results.betas[target], + platform, + target, + ) drag_tuning = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/drag_simple.py b/src/qibocal/protocols/drag_simple.py new file mode 100644 index 000000000..a93acac0a --- /dev/null +++ b/src/qibocal/protocols/drag_simple.py @@ -0,0 +1,258 @@ +from dataclasses import dataclass + +import numpy as np +import plotly.graph_objects as go +from qibolab import AcquisitionType, AveragingMode, Delay, Drag, Pulse, PulseSequence +from scipy.optimize import curve_fit + +from qibocal import update +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.protocols import drag +from qibocal.result import probability +from qibocal.update import replace + +from .utils import COLORBAND, COLORBAND_LINE, table_dict, table_html + +SEQUENCES = ["YpX9", "XpY9"] +"""Sequences used to fit drag parameter.""" + + +@dataclass +class DragTuningSimpleParameters(drag.DragTuningParameters): + """DragTuningSimple runcard inputs.""" + + +@dataclass +class DragTuningSimpleResults(drag.DragTuningResults): + """DragTuningSimple outputs.""" + + def __contains__(self, key): + return key in self.betas + + +@dataclass +class DragTuningSimpleData(drag.DragTuningData): + """DragTuningSimple acquisition outputs.""" + + def __getitem__(self, key: tuple[QubitId, str]): + qubit, setup = key + if setup == "YpX9": + return self.data[qubit][::2] + return self.data[qubit][1::2] + + +def add_drag(pulse: Pulse, beta: float) -> Pulse: + """Add DRAG component to Gaussian Pulse.""" + return replace(pulse, envelope=Drag(rel_sigma=pulse.envelope.rel_sigma, beta=beta)) + + +def _acquisition( + params: DragTuningSimpleParameters, + platform: CalibrationPlatform, + targets: list[QubitId], +) -> DragTuningSimpleData: + """Acquisition function for DRAG experiments. + + We execute two sequences YpX9 and XpY9 following + https://rsl.yale.edu/sites/default/files/2024-08/2011-RSL-Thesis-Matthew-Reed.pdf + for different value of the DRAG parameter. + """ + + data = DragTuningSimpleData() + beta_range = np.arange(params.beta_start, params.beta_end, params.beta_step) + + sequences, all_ro_pulses = [], [] + for beta in beta_range: + for setup in SEQUENCES: + sequence = PulseSequence() + ro_pulses = {} + for q in targets: + natives = platform.natives.single_qubit[q] + ro_channel, ro_pulse = natives.MZ()[0] + if setup == "YpX9": + qd_channel, ry = natives.R(phi=np.pi / 2)[0] + _, rx90 = natives.R(theta=np.pi / 2)[0] + sequence.append((qd_channel, add_drag(ry, beta=beta))) + sequence.append((qd_channel, add_drag(rx90, beta=beta))) + sequence.append( + (ro_channel, Delay(duration=rx90.duration + ry.duration)) + ) + else: + qd_channel, rx = natives.RX()[0] + _, ry90 = natives.R(theta=np.pi / 2, phi=np.pi / 2)[0] + sequence.append((qd_channel, add_drag(rx, beta=beta))) + sequence.append((qd_channel, add_drag(ry90, beta=beta))) + sequence.append( + (ro_channel, Delay(duration=rx.duration + ry90.duration)) + ) + sequence.append((ro_channel, ro_pulse)) + + sequences.append(sequence) + all_ro_pulses.append( + { + qubit: list(sequence.channel(platform.qubits[q].acquisition))[-1] + for qubit in targets + } + ) + + options = { + "nshots": params.nshots, + "relaxation_time": params.relaxation_time, + "acquisition_type": AcquisitionType.DISCRIMINATION, + "averaging_mode": AveragingMode.SINGLESHOT, + } + + # execute the pulse sequence + if params.unrolling: + results = platform.execute(sequences, **options) + for beta, ro_pulses in zip(np.repeat(beta_range, 2), all_ro_pulses): + for qubit in targets: + result = results[ro_pulses[qubit].id] + prob = probability(result, state=1) + # store the results + data.register_qubit( + drag.DragTuningType, + (qubit), + dict( + prob=np.array([prob]), + error=np.array([np.sqrt(prob * (1 - prob) / params.nshots)]), + beta=np.array([beta]), + ), + ) + else: + for i, sequence in enumerate(sequences): + result = platform.execute([sequence], **options) + setup = "YpX9" if i % 2 == 2 else "XpY9" + for qubit in targets: + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[ + -1 + ] + prob = probability(result[ro_pulse.id], state=1) + # store the results + data.register_qubit( + drag.DragTuningType, + (qubit), + dict( + prob=np.array([prob]), + error=np.array([np.sqrt(prob * (1 - prob) / params.nshots)]), + beta=np.array([beta_range[i // 2]]), + ), + ) + + return data + + +def _fit(data: DragTuningSimpleData) -> DragTuningSimpleResults: + """Post-processing for DRAG protocol. + + A linear fit is applied for the probability of both sequences. + The optimal is determined as the point in which the two lines met. + """ + qubits = data.qubits + fitted_parameters = {} + betas_optimal = {} + for qubit in qubits: + for setup in SEQUENCES: + qubit_data = data[qubit, setup] + popt, _ = curve_fit( + lambda x, a, b: a * x + b, qubit_data.beta, qubit_data.prob + ) + fitted_parameters[qubit, setup] = popt.tolist() + betas_optimal[qubit] = -( + fitted_parameters[qubit, "YpX9"][1] - fitted_parameters[qubit, "XpY9"][1] + ) / (fitted_parameters[qubit, "YpX9"][0] - fitted_parameters[qubit, "XpY9"][0]) + + return DragTuningSimpleResults(betas_optimal, fitted_parameters) + + +def _plot(data: DragTuningSimpleData, target: QubitId, fit: DragTuningSimpleResults): + """Plotting function for DragTuning.""" + + figures = [] + fitting_report = "" + + fig = go.Figure() + for setup in SEQUENCES: + qubit_data = data[target, setup] + fig.add_trace( + go.Scatter( + x=qubit_data.beta, + y=qubit_data.prob, + opacity=1, + mode="lines", + name=setup, + showlegend=True, + legendgroup=setup, + ) + ) + + fig.add_trace( + go.Scatter( + x=np.concatenate((qubit_data.beta, qubit_data.beta[::-1])), + y=np.concatenate( + ( + qubit_data.prob + qubit_data.error, + (qubit_data.prob - qubit_data.error)[::-1], + ) + ), + fill="toself", + fillcolor=COLORBAND, + line=dict(color=COLORBAND_LINE), + name=setup, + showlegend=False, + legendgroup=setup, + ) + ) + + # # add fitting traces + if fit is not None: + for setup in SEQUENCES: + qubit_data = data[target, setup] + betas = qubit_data.beta + beta_range = np.linspace( + min(betas), + max(betas), + 20, + ) + + fig.add_trace( + go.Scatter( + x=beta_range, + y=fit.fitted_parameters[target, setup][0] * betas + + fit.fitted_parameters[target, setup][1], + name=f"Fit {setup}", + line=go.scatter.Line(dash="dot"), + ), + ) + fitting_report = table_html( + table_dict( + target, + ["Best DRAG parameter"], + [np.round(fit.betas[target], 4)], + ) + ) + + fig.update_layout( + showlegend=True, + xaxis_title="Beta parameter", + yaxis_title="Excited state probability", + ) + + figures.append(fig) + + return figures, fitting_report + + +def _update( + results: DragTuningSimpleResults, platform: CalibrationPlatform, target: QubitId +): + update.drag_pulse_beta( + results.betas[target], + platform, + target, + ) + + +drag_simple = Routine(_acquisition, _fit, _plot, _update) +"""DragTuning Routine object.""" diff --git a/src/qibocal/protocols/fast_reset/_init__.py b/src/qibocal/protocols/fast_reset/_init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/qibocal/protocols/fast_reset/fast_reset.py b/src/qibocal/protocols/fast_reset/fast_reset.py deleted file mode 100644 index a6081b424..000000000 --- a/src/qibocal/protocols/fast_reset/fast_reset.py +++ /dev/null @@ -1,234 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from plotly.subplots import make_subplots -from qibolab import ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols.utils import table_dict, table_html - -# TODO: IBM Fast Reset until saturation loop -# https://quantum-computing.ibm.com/lab/docs/iql/manage/systems/reset/backend_reset - - -@dataclass -class FastResetParameters(Parameters): - """FastReset runcard inputs.""" - - -@dataclass -class FastResetResults(Results): - """FastReset outputs.""" - - fidelity_nfr: dict[QubitId, float] - "Fidelity of the measurement with relaxation time" - Lambda_M_nfr: dict[QubitId, float] - "Mapping between a given initial state to an outcome adter the measurement with relaxation time" - fidelity_fr: dict[QubitId, float] - "Fidelity of the measurement with fast reset" - Lambda_M_fr: dict[QubitId, float] - "Mapping between a given initial state to an outcome adter the measurement with fast reset" - - -FastResetType = np.dtype( - [ - ("probability", np.float64), - ] -) -"""Custom dtype for FastReset.""" - - -@dataclass -class FastResetData(Data): - """FastReset acquisition outputs.""" - - data: dict[tuple, npt.NDArray[FastResetType]] = field(default_factory=dict) - """Raw data acquired.""" - - -def _acquisition( - params: FastResetParameters, platform: Platform, targets: list[QubitId] -) -> FastResetData: - """Data acquisition for resonator spectroscopy.""" - - data = FastResetData() - for state in [0, 1]: - for fast_reset in [True, False]: - # Define the pulse sequences - if state == 1: - RX_pulses = {} - ro_pulses = {} - sequence = PulseSequence() - for qubit in targets: - if state == 1: - RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=RX_pulses[qubit].finish - ) - sequence.add(RX_pulses[qubit]) - else: - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=0 - ) - sequence.add(ro_pulses[qubit]) - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - fast_reset=fast_reset, - ), - ) - - # Save the data - for ro_pulse in ro_pulses.values(): - result = results[ro_pulse.serial] - qubit = ro_pulse.qubit - data.register_qubit( - FastResetType, - (qubit, state, fast_reset), - dict(probability=result.samples), - ) - - return data - - -def _fit(data: FastResetData) -> FastResetResults: - """Post-processing function for FastReset.""" - - qubits = data.qubits - fidelity_nfr = {} - Lambda_M_nfr = {} - fidelity_fr = {} - Lambda_M_fr = {} - for qubit in qubits: - # state 1 - fr_states = data[qubit, 1, True].probability - nfr_states = data[qubit, 1, False].probability - - nshots = len(fr_states) - - state1_count_1fr = np.count_nonzero(fr_states) - state0_count_1fr = nshots - state1_count_1fr - - state1_count_1nfr = np.count_nonzero(nfr_states) - state0_count_1nfr = nshots - state1_count_1nfr - - # state 0 - fr_states = data[qubit, 0, True].probability - nfr_states = data[qubit, 0, False].probability - - state1_count_0fr = np.count_nonzero(fr_states) - state0_count_0fr = nshots - state1_count_0fr - - state1_count_0nfr = np.count_nonzero(nfr_states) - state0_count_0nfr = nshots - state1_count_0nfr - - # Repeat Lambda and fidelity for each measurement ? - Lambda_M_nfr[qubit] = [ - [state0_count_0nfr / nshots, state0_count_1nfr / nshots], - [state1_count_0nfr / nshots, state1_count_1nfr / nshots], - ] - - # Repeat Lambda and fidelity for each measurement ? - Lambda_M_fr[qubit] = [ - [state0_count_0fr / nshots, state0_count_1fr / nshots], - [state1_count_0fr / nshots, state1_count_1fr / nshots], - ] - - fidelity_nfr[qubit] = ( - 1 - (state1_count_0nfr / nshots + state0_count_1nfr / nshots) / 2 - ) - - fidelity_fr[qubit] = ( - 1 - (state1_count_0fr / nshots + state0_count_1fr / nshots) / 2 - ) - - return FastResetResults(fidelity_nfr, Lambda_M_nfr, fidelity_fr, Lambda_M_fr) - - -def _plot(data: FastResetData, fit: FastResetResults, target: QubitId): - """Plotting function for FastReset.""" - - # Maybe the plot can just be something like a confusion matrix between 0s and 1s ??? - - figures = [] - fitting_report = "" - fig = make_subplots( - rows=1, - cols=2, - horizontal_spacing=0.1, - vertical_spacing=0.1, - subplot_titles=( - "Fast Reset", - "Relaxation Time [ns]", - ), - ) - - if fit is not None: - fig.add_trace( - go.Heatmap( - z=fit.Lambda_M_fr[target], - coloraxis="coloraxis", - ), - row=1, - col=1, - ) - fitting_report = table_html( - table_dict( - target, - ["Fidelity [Fast Reset]", "Fidelity [Relaxation Time]"], - [ - np.round(fit.fidelity_fr[target], 6), - np.round(fit.fidelity_nfr[target], 6), - ], - ) - ) - - fig.add_trace( - go.Heatmap( - z=fit.Lambda_M_nfr[target], - coloraxis="coloraxis", - ), - row=1, - col=2, - ) - - fig.update_xaxes( - title_text=f"{target}: Fast Reset", - row=1, - col=1, - ) - fig.update_yaxes(title_text="State", row=1, col=1) - fig.update_yaxes(tickvals=[0, 1]) - fig.update_xaxes(tickvals=[0, 1]) - - fig.update_layout(coloraxis={"colorscale": "viridis"}) - - fig.update_xaxes( - title_text="State prepared", - row=1, - col=2, - ) - - # last part - fig.update_layout( - showlegend=False, - xaxis_title="State prepared", - yaxis_title="State measured", - ) - - figures.append(fig) - - return figures, fitting_report - - -fast_reset = Routine(_acquisition, _fit, _plot) -"""FastReset Routine object.""" diff --git a/src/qibocal/protocols/flipping.py b/src/qibocal/protocols/flipping.py index 981be1b08..ba3247f75 100644 --- a/src/qibocal/protocols/flipping.py +++ b/src/qibocal/protocols/flipping.py @@ -1,15 +1,15 @@ from dataclasses import dataclass, field +from typing import Union import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, PulseSequence from scipy.optimize import curve_fit -from qibocal.auto.operation import Routine +from qibocal import update +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import ( fallback_period, @@ -18,26 +18,77 @@ table_html, ) -from .flipping_signal import ( - FlippingSignalData, - FlippingSignalParameters, - FlippingSignalResults, - _update, - flipping_fit, - flipping_sequence, -) +from ..result import probability from .utils import COLORBAND, COLORBAND_LINE, chi2_reduced +def flipping_sequence( + platform: CalibrationPlatform, + qubit: QubitId, + delta_amplitude: float, + flips: int, + rx90: bool, +): + """Pulse sequence for flipping experiment.""" + + sequence = PulseSequence() + natives = platform.natives.single_qubit[qubit] + + sequence |= natives.R(theta=np.pi / 2) + + for _ in range(flips): + + if rx90: + qd_channel, qd_pulse = natives.RX90()[0] + else: + qd_channel, qd_pulse = natives.RX()[0] + + qd_detuned = update.replace( + qd_pulse, amplitude=qd_pulse.amplitude + delta_amplitude + ) + sequence.append((qd_channel, qd_detuned)) + sequence.append((qd_channel, qd_detuned)) + + if rx90: + sequence.append((qd_channel, qd_detuned)) + sequence.append((qd_channel, qd_detuned)) + + sequence |= natives.MZ() + + return sequence + + @dataclass -class FlippingParameters(FlippingSignalParameters): +class FlippingParameters(Parameters): """Flipping runcard inputs.""" + nflips_max: int + """Maximum number of flips ([RX(pi) - RX(pi)] sequences). """ + nflips_step: int + """Flip step.""" + unrolling: bool = False + """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. + Defaults to ``False``.""" + delta_amplitude: float = 0 + """Amplitude detuning.""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" + @dataclass -class FlippingResults(FlippingSignalResults): +class FlippingResults(Results): """Flipping outputs.""" + amplitude: dict[QubitId, Union[float, list[float]]] + """Drive amplitude for each qubit.""" + delta_amplitude: dict[QubitId, Union[float, list[float]]] + """Difference in amplitude between initial value and fit.""" + delta_amplitude_detuned: dict[QubitId, Union[float, list[float]]] + """Difference in amplitude between detuned value and fit.""" + fitted_parameters: dict[QubitId, dict[str, float]] + """Raw fitting output.""" + rx90: bool + """Pi or Pi_half calibration""" chi2: dict[QubitId, list[float]] = field(default_factory=dict) """Chi squared estimate mean value and error. """ @@ -48,16 +99,24 @@ class FlippingResults(FlippingSignalResults): @dataclass -class FlippingData(FlippingSignalData): +class FlippingData(Data): """Flipping acquisition outputs.""" + resonator_type: str + """Resonator type.""" + delta_amplitude: float + """Amplitude detuning.""" + pulse_amplitudes: dict[QubitId, float] + """Pulse amplitudes for each qubit.""" + rx90: bool + """Pi or Pi_half calibration""" data: dict[QubitId, npt.NDArray[FlippingType]] = field(default_factory=dict) """Raw data acquired.""" def _acquisition( params: FlippingParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> FlippingData: r""" @@ -65,10 +124,11 @@ def _acquisition( The flipping experiment correct the delta amplitude in the qubit drive pulse. We measure a qubit after applying a Rx(pi/2) and N flips (Rx(pi) rotations). After fitting we can obtain the delta amplitude to refine pi pulses. + On the y axis we measure the excited state probability. Args: params (:class:`SingleShotClassificationParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform + platform (:class:`CalibrationPlatform`): Qibolab's platform qubits (dict): dict of target :class:`Qubit` objects to be characterized Returns: @@ -78,23 +138,26 @@ def _acquisition( data = FlippingData( resonator_type=platform.resonator_type, delta_amplitude=params.delta_amplitude, - pi_pulse_amplitudes={ - qubit: platform.qubits[qubit].native_gates.RX.amplitude for qubit in targets + pulse_amplitudes={ + qubit: getattr( + platform.natives.single_qubit[qubit], "RX90" if params.rx90 else "RX" + )[0][1].amplitude + for qubit in targets }, + rx90=params.rx90, ) - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ) + options = { + "nshots": params.nshots, + "relaxation_time": params.relaxation_time, + "acquisition_type": AcquisitionType.DISCRIMINATION, + "averaging_mode": AveragingMode.SINGLESHOT, + } + + sequences = [] - # sweep the parameter - sequences, all_ro_pulses = [], [] flips_sweep = range(0, params.nflips_max, params.nflips_step) for flips in flips_sweep: - # create a sequence of pulses for the experiment sequence = PulseSequence() for qubit in targets: sequence += flipping_sequence( @@ -102,42 +165,43 @@ def _acquisition( qubit=qubit, delta_amplitude=params.delta_amplitude, flips=flips, + rx90=params.rx90, ) sequences.append(sequence) - all_ro_pulses.append(sequence.ro_pulses) - # execute the pulse sequence if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) - - elif not params.unrolling: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] + results = platform.execute(sequences, **options) + else: + results = [platform.execute([sequence], **options) for sequence in sequences] - for ig, (flips, ro_pulses) in enumerate(zip(flips_sweep, all_ro_pulses)): + for i in range(len(sequences)): for qubit in targets: - serial = ro_pulses.get_qubit_pulses(qubit)[0].serial + ro_pulse = list(sequences[i].channel(platform.qubits[qubit].acquisition))[ + -1 + ] if params.unrolling: - result = results[serial][0] + result = results[ro_pulse.id] else: - result = results[ig][serial] - prob = result.probability(state=1) + result = results[i][ro_pulse.id] + prob = probability(result, state=1) error = np.sqrt(prob * (1 - prob) / params.nshots) data.register_qubit( FlippingType, (qubit), dict( - flips=np.array([flips]), + flips=np.array([flips_sweep[i]]), prob=np.array([prob]), error=np.array([error]), ), ) - return data +def flipping_fit(x, offset, amplitude, omega, phase, gamma): + return np.sin(x * omega + phase) * amplitude * np.exp(-x * gamma) + offset + + def _fit(data: FlippingData) -> FlippingResults: r"""Post-processing function for Flipping. @@ -155,9 +219,7 @@ def _fit(data: FlippingData) -> FlippingResults: chi2 = {} for qubit in qubits: qubit_data = data[qubit] - detuned_pi_pulse_amplitude = ( - data.pi_pulse_amplitudes[qubit] + data.delta_amplitude - ) + detuned_pulse_amplitude = data.pulse_amplitudes[qubit] + data.delta_amplitude y = qubit_data.prob x = qubit_data.flips @@ -180,10 +242,14 @@ def _fit(data: FlippingData) -> FlippingResults: perr = np.sqrt(np.diag(perr)).tolist() popt = popt.tolist() correction = popt[2] / 2 + + if data.rx90: + correction /= 2 + corrected_amplitudes[qubit] = [ - float(detuned_pi_pulse_amplitude * np.pi / (np.pi + correction)), + float(detuned_pulse_amplitude * np.pi / (np.pi + correction)), float( - detuned_pi_pulse_amplitude + detuned_pulse_amplitude * np.pi * 1 / (np.pi + correction) ** 2 @@ -195,11 +261,9 @@ def _fit(data: FlippingData) -> FlippingResults: fitted_parameters[qubit] = popt delta_amplitude_detuned[qubit] = [ - -correction * detuned_pi_pulse_amplitude / (np.pi + correction), + -correction * detuned_pulse_amplitude / (np.pi + correction), np.abs( - np.pi - * detuned_pi_pulse_amplitude - * np.power(np.pi + correction, -2) + np.pi * detuned_pulse_amplitude * np.power(np.pi + correction, -2) ) * perr[2] / 2, @@ -225,6 +289,7 @@ def _fit(data: FlippingData) -> FlippingResults: delta_amplitude, delta_amplitude_detuned, fitted_parameters, + data.rx90, chi2, ) @@ -315,5 +380,9 @@ def _plot(data: FlippingData, target: QubitId, fit: FlippingResults = None): return figures, fitting_report +def _update(results: FlippingResults, platform: CalibrationPlatform, qubit: QubitId): + update.drive_amplitude(results.amplitude[qubit], results.rx90, platform, qubit) + + flipping = Routine(_acquisition, _fit, _plot, _update) """Flipping Routine object.""" diff --git a/src/qibocal/protocols/flipping_signal.py b/src/qibocal/protocols/flipping_signal.py deleted file mode 100644 index 33e7900c4..000000000 --- a/src/qibocal/protocols/flipping_signal.py +++ /dev/null @@ -1,334 +0,0 @@ -from dataclasses import dataclass, field -from typing import Union - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from scipy.optimize import curve_fit - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.config import log -from qibocal.protocols.utils import ( - fallback_period, - guess_period, - table_dict, - table_html, -) - - -@dataclass -class FlippingSignalParameters(Parameters): - """Flipping runcard inputs.""" - - nflips_max: int - """Maximum number of flips ([RX(pi) - RX(pi)] sequences). """ - nflips_step: int - """Flip step.""" - unrolling: bool = False - """If ``True`` it uses sequence unrolling to deploy multiple sequences in a single instrument call. - Defaults to ``False``.""" - delta_amplitude: float = 0 - """Amplitude detuning.""" - - -@dataclass -class FlippingSignalResults(Results): - """Flipping outputs.""" - - amplitude: dict[QubitId, Union[float, list[float]]] - """Drive amplitude for each qubit.""" - delta_amplitude: dict[QubitId, Union[float, list[float]]] - """Difference in amplitude between initial value and fit.""" - delta_amplitude_detuned: dict[QubitId, Union[float, list[float]]] - """Difference in amplitude between detuned value and fit.""" - fitted_parameters: dict[QubitId, dict[str, float]] - """Raw fitting output.""" - - -FlippingType = np.dtype([("flips", np.float64), ("signal", np.float64)]) - - -@dataclass -class FlippingSignalData(Data): - """Flipping acquisition outputs.""" - - resonator_type: str - """Resonator type.""" - delta_amplitude: float - """Amplitude detuning.""" - pi_pulse_amplitudes: dict[QubitId, float] - """Pi pulse amplitudes for each qubit.""" - data: dict[QubitId, npt.NDArray[FlippingType]] = field(default_factory=dict) - """Raw data acquired.""" - - -def flipping_sequence( - platform: Platform, qubit: QubitId, delta_amplitude: float, flips: int -): - - sequence = PulseSequence() - RX90_pulse = platform.create_RX90_pulse(qubit, start=0) - sequence.add(RX90_pulse) - # execute sequence RX(pi/2) - [RX(pi) - RX(pi)] from 0...flips times - RO - start1 = RX90_pulse.duration - drive_amplitude = platform.qubits[qubit].native_gates.RX.amplitude - for _ in range(flips): - RX_pulse1 = platform.create_RX_pulse(qubit, start=start1) - RX_pulse1.amplitude = drive_amplitude + delta_amplitude - start2 = start1 + RX_pulse1.duration - RX_pulse2 = platform.create_RX_pulse(qubit, start=start2) - RX_pulse2.amplitude = drive_amplitude + delta_amplitude - - sequence.add(RX_pulse1) - sequence.add(RX_pulse2) - start1 = start2 + RX_pulse2.duration - - # add ro pulse at the end of the sequence - sequence.add(platform.create_qubit_readout_pulse(qubit, start=start1)) - - return sequence - - -def _acquisition( - params: FlippingSignalParameters, - platform: Platform, - targets: list[QubitId], -) -> FlippingSignalData: - r""" - Data acquisition for flipping. - - The flipping experiment correct the delta amplitude in the qubit drive pulse. We measure a qubit after applying - a Rx(pi/2) and N flips (Rx(pi) rotations). After fitting we can obtain the delta amplitude to refine pi pulses. - - Args: - params (:class:`FlippingSignalParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - qubits (dict): dict of target :class:`Qubit` objects to be characterized - - Returns: - data (:class:`FlippingSignalData`) - """ - - data = FlippingSignalData( - resonator_type=platform.resonator_type, - delta_amplitude=params.delta_amplitude, - pi_pulse_amplitudes={ - qubit: platform.qubits[qubit].native_gates.RX.amplitude for qubit in targets - }, - ) - - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ) - - # sweep the parameter - sequences, all_ro_pulses = [], [] - flips_sweep = range(0, params.nflips_max, params.nflips_step) - for flips in flips_sweep: - # create a sequence of pulses for the experiment - sequence = PulseSequence() - for qubit in targets: - sequence += flipping_sequence( - platform=platform, - qubit=qubit, - delta_amplitude=params.delta_amplitude, - flips=flips, - ) - - sequences.append(sequence) - all_ro_pulses.append(sequence.ro_pulses) - - # execute the pulse sequence - if params.unrolling: - results = platform.execute_pulse_sequences(sequences, options) - - elif not params.unrolling: - results = [ - platform.execute_pulse_sequence(sequence, options) for sequence in sequences - ] - - for ig, (flips, ro_pulses) in enumerate(zip(flips_sweep, all_ro_pulses)): - for qubit in targets: - serial = ro_pulses.get_qubit_pulses(qubit)[0].serial - if params.unrolling: - result = results[serial][0] - else: - result = results[ig][serial] - data.register_qubit( - FlippingType, - (qubit), - dict( - flips=np.array([flips]), - signal=np.array([result.magnitude]), - ), - ) - - return data - - -def flipping_fit(x, offset, amplitude, omega, phase, gamma): - return np.sin(x * omega + phase) * amplitude * np.exp(-x * gamma) + offset - - -def _fit(data: FlippingSignalData) -> FlippingSignalResults: - r"""Post-processing function for Flipping. - - The used model is - - .. math:: - - y = p_0 sin\Big(\frac{2 \pi x}{p_2} + p_3\Big)*\exp{-x*p4} + p_1. - """ - qubits = data.qubits - corrected_amplitudes = {} - fitted_parameters = {} - delta_amplitude = {} - delta_amplitude_detuned = {} - for qubit in qubits: - qubit_data = data[qubit] - detuned_pi_pulse_amplitude = ( - data.pi_pulse_amplitudes[qubit] + data.delta_amplitude - ) - voltages = qubit_data.signal - flips = qubit_data.flips - - x_min = np.min(flips) - x_max = np.max(flips) - x = (flips - x_min) / (x_max - x_min) - y_max = np.max(voltages) - y_min = np.min(voltages) - # normalize between 0 and 1 - y = (voltages - y_min) / (y_max - y_min) - - period = fallback_period(guess_period(x, y)) - pguess = [0.5, 0.5, 2 * np.pi / period, 0, 0] - - try: - popt, _ = curve_fit( - flipping_fit, - x, - y, - p0=pguess, - maxfev=2000000, - bounds=( - [0.4, 0.4, -np.inf, -np.pi / 4, 0], - [0.6, 0.6, np.inf, np.pi / 4, np.inf], - ), - ) - - translated_popt = [ - y_min + (y_max - y_min) * popt[0], - (y_max - y_min) * popt[1] * np.exp(x_min * popt[4] / (x_max - x_min)), - popt[2] / (x_max - x_min), - popt[3] - x_min / (x_max - x_min) * popt[2], - popt[4] / (x_max - x_min), - ] - # TODO: this might be related to the resonator type - signed_correction = translated_popt[2] / 2 - # The amplitude is directly proportional to the rotation angle - corrected_amplitudes[qubit] = (detuned_pi_pulse_amplitude * np.pi) / ( - np.pi + signed_correction - ) - fitted_parameters[qubit] = translated_popt - delta_amplitude_detuned[qubit] = ( - -signed_correction - * detuned_pi_pulse_amplitude - / (np.pi + signed_correction) - ) - delta_amplitude[qubit] = ( - delta_amplitude_detuned[qubit] - data.delta_amplitude - ) - except Exception as e: - log.warning(f"Error in flipping fit for qubit {qubit} due to {e}.") - - return FlippingSignalResults( - corrected_amplitudes, - delta_amplitude, - delta_amplitude_detuned, - fitted_parameters, - ) - - -def _plot(data: FlippingSignalData, target, fit: FlippingSignalResults = None): - """Plotting function for Flipping.""" - - figures = [] - fig = go.Figure() - fitting_report = "" - qubit_data = data[target] - - fig.add_trace( - go.Scatter( - x=qubit_data.flips, - y=qubit_data.signal, - opacity=1, - name="Signal", - showlegend=True, - legendgroup="Signal", - ), - ) - - if fit is not None: - flips_range = np.linspace( - min(qubit_data.flips), - max(qubit_data.flips), - 2 * len(qubit_data), - ) - - fig.add_trace( - go.Scatter( - x=flips_range, - y=flipping_fit( - flips_range, - float(fit.fitted_parameters[target][0]), - float(fit.fitted_parameters[target][1]), - float(fit.fitted_parameters[target][2]), - float(fit.fitted_parameters[target][3]), - float(fit.fitted_parameters[target][4]), - ), - name="Fit", - line=go.scatter.Line(dash="dot"), - ), - ) - fitting_report = table_html( - table_dict( - target, - [ - "Delta amplitude [a.u.]", - "Delta amplitude (with detuning) [a.u.]", - "Corrected amplitude [a.u.]", - ], - [ - np.round(fit.delta_amplitude[target], 4), - np.round(fit.delta_amplitude_detuned[target], 4), - np.round(fit.amplitude[target], 4), - ], - ) - ) - - # last part - fig.update_layout( - showlegend=True, - xaxis_title="Flips", - yaxis_title="Signal [a.u.]", - ) - - figures.append(fig) - - return figures, fitting_report - - -def _update(results: FlippingSignalResults, platform: Platform, qubit: QubitId): - update.drive_amplitude(results.amplitude[qubit], platform, qubit) - - -flipping_signal = Routine(_acquisition, _fit, _plot, _update) -"""Flipping Routine object.""" diff --git a/src/qibocal/protocols/flux_dependence/avoided_crossing.py b/src/qibocal/protocols/flux_dependence/avoided_crossing.py deleted file mode 100644 index f9f9ce796..000000000 --- a/src/qibocal/protocols/flux_dependence/avoided_crossing.py +++ /dev/null @@ -1,364 +0,0 @@ -from copy import deepcopy -from dataclasses import dataclass, field -from enum import Enum -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from plotly.subplots import make_subplots -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId - -from qibocal.auto.operation import Data, Results, Routine -from qibocal.protocols.two_qubit_interaction.utils import order_pair -from qibocal.protocols.utils import HZ_TO_GHZ, table_dict, table_html - -from .qubit_flux_dependence import QubitFluxParameters, QubitFluxType -from .qubit_flux_dependence import _acquisition as flux_acquisition - -STEP = 60 -POINT_SIZE = 10 - - -@dataclass -class AvoidedCrossingParameters(QubitFluxParameters): - """Avoided Crossing Parameters""" - - -@dataclass -class AvoidedCrossingResults(Results): - """Avoided crossing outputs""" - - parabolas: dict[tuple, list] - """Extracted parabolas""" - fits: dict[tuple, list] - """Fits parameters""" - cz: dict[tuple, list] - """CZ intersection points """ - iswap: dict[tuple, list] - """iSwap intersection points""" - - -@dataclass -class AvoidedCrossingData(Data): - """Avoided crossing acquisition outputs""" - - qubit_pairs: list - """list of qubit pairs ordered following the drive frequency""" - drive_frequency_low: dict = field(default_factory=dict) - """Lowest drive frequency in each qubit pair""" - data: dict[tuple[QubitId, str], npt.NDArray[QubitFluxType]] = field( - default_factory=dict - ) - """Raw data acquired.""" - - -def _acquisition( - params: AvoidedCrossingParameters, - platform: Platform, - targets: list[QubitPairId], # qubit pairs -) -> AvoidedCrossingData: - """ - Data acquisition for avoided crossing. - This routine performs the qubit flux dependency for the "01" and "02" transition - on the qubit pair. It returns the bias and frequency values to perform a CZ - and a iSwap gate. - - Args: - params (AvoidedCrossingParameters): experiment's parameters. - platform (Platform): Qibolab platform object. - qubits (dict): list of targets qubit pairs to perform the action. - """ - order_pairs = np.array([order_pair(pair, platform) for pair in targets]) - data = AvoidedCrossingData(qubit_pairs=order_pairs.tolist()) - # Extract the qubits in the qubits pairs and evaluate their flux dep - unique_qubits = np.unique( - order_pairs[:, 1] - ) # select qubits with high freq in each couple - new_qubits = {key: platform.qubits[key] for key in unique_qubits} - excitations = [Excitations.ge, Excitations.gf] - for transition in excitations: - params.transition = transition - data_transition = flux_acquisition( - params=params, - platform=platform, - targets=new_qubits, - ) - for qubit in unique_qubits: - qubit_data = data_transition.data[qubit] - freq = qubit_data["freq"] - bias = qubit_data["bias"] - signal = qubit_data["signal"] - phase = qubit_data["phase"] - data.register_qubit( - QubitFluxType, - (float(qubit), transition), - dict( - freq=freq.tolist(), - bias=bias.tolist(), - signal=signal.tolist(), - phase=phase.tolist(), - ), - ) - - unique_low_qubits = np.unique(order_pairs[:, 0]) - data.drive_frequency_low = { - str(qubit): float(platform.qubits[qubit].drive_frequency) - for qubit in unique_low_qubits - } - return data - - -def _fit(data: AvoidedCrossingData) -> AvoidedCrossingResults: - """ - Post-Processing for avoided crossing. - """ - qubit_data = data.data - fits = {} - cz = {} - iswap = {} - curves = {tuple(key): find_parabola(val) for key, val in qubit_data.items()} - for qubit_pair in data.qubit_pairs: - qubit_pair = tuple(qubit_pair) - fits[qubit_pair] = {} - low = qubit_pair[0] - high = qubit_pair[1] - # Fit the 02*2 curve - curve_02 = np.array(curves[high, Excitations.gf]) * 2 - x_02 = np.unique(qubit_data[high, Excitations.gf]["bias"]) - fit_02 = np.polyfit(x_02, curve_02, 2) - fits[qubit_pair][Excitations.gf] = fit_02.tolist() - - # Fit the 01+10 curve - curve_01 = np.array(curves[high, Excitations.ge]) - x_01 = np.unique(qubit_data[high, Excitations.ge]["bias"]) - fit_01_10 = np.polyfit(x_01, curve_01 + data.drive_frequency_low[str(low)], 2) - fits[qubit_pair][Excitations.all_ge] = fit_01_10.tolist() - # find the intersection of the two parabolas - delta_fit = fit_02 - fit_01_10 - x1, x2 = solve_eq(delta_fit) - cz[qubit_pair] = [ - [x1, np.polyval(fit_02, x1)], - [x2, np.polyval(fit_02, x2)], - ] - # find the intersection of the 01 parabola and the 10 line - fit_01 = np.polyfit(x_01, curve_01, 2) - fits[qubit_pair][Excitations.ge] = fit_01.tolist() - fit_pars = deepcopy(fit_01) - line_val = data.drive_frequency_low[str(low)] - fit_pars[2] -= line_val - x1, x2 = solve_eq(fit_pars) - iswap[qubit_pair] = [[x1, line_val], [x2, line_val]] - - return AvoidedCrossingResults(curves, fits, cz, iswap) - - -def _plot( - data: AvoidedCrossingData, - fit: Optional[AvoidedCrossingResults], - target: QubitPairId, -): - """Plotting function for avoided crossing""" - fitting_report = "" - figures = [] - order_pair = tuple(index(data.qubit_pairs, target)) - heatmaps = make_subplots( - rows=1, - cols=2, - subplot_titles=[ - f"{i} transition qubit {target[0]}" - for i in [Excitations.ge, Excitations.gf] - ], - ) - parabolas = make_subplots(rows=1, cols=1, subplot_titles=["Parabolas"]) - for i, transition in enumerate([Excitations.ge, Excitations.gf]): - data_high = data.data[order_pair[1], transition] - bias_unique = np.unique(data_high.bias) - min_bias = min(bias_unique) - max_bias = max(bias_unique) - plot_heatmap( - heatmaps, fit, transition, bias_unique, order_pair, data_high, i + 1 - ) - - figures.append(heatmaps) - - if fit is not None: - cz = np.array(fit.cz[order_pair]) - iswap = np.array(fit.iswap[order_pair]) - min_bias = min(min_bias, *cz[:, 0], *iswap[:, 0]) - max_bias = max(max_bias, *cz[:, 0], *iswap[:, 0]) - bias_range = np.linspace(min_bias, max_bias, STEP) - plot_curves(parabolas, fit, data, order_pair, bias_range) - plot_intersections(parabolas, cz, iswap) - - parabolas.update_layout( - xaxis_title="Bias[V]", - yaxis_title="Frequency[GHz]", - ) - heatmaps.update_layout( - coloraxis_colorbar=dict( - yanchor="top", - y=1, - x=-0.08, - ticks="outside", - ), - xaxis_title="Frequency[GHz]", - yaxis_title="Bias[V]", - xaxis2_title="Frequency[GHz]", - yaxis2_title="Bias[V]", - ) - figures.append(parabolas) - fitting_report = table_html( - table_dict( - target, - ["CZ bias", "iSwap bias"], - [np.round(cz[:, 0], 3), np.round(iswap[:, 0], 3)], - ) - ) - return figures, fitting_report - - -avoided_crossing = Routine(_acquisition, _fit, _plot) - - -def find_parabola(data: dict) -> list: - """ - Finds the parabola in `data` - """ - freqs = data["freq"] - currs = data["bias"] - biass = sorted(np.unique(currs)) - frequencies = [] - for bias in biass: - data_bias = data[currs == bias] - index = data_bias["signal"].argmax() - frequencies.append(freqs[index]) - return frequencies - - -def solve_eq(pars: list) -> tuple: - """ - Solver of the quadratic equation - - .. math:: - a x^2 + b x + c = 0 - - `pars` is the list [a, b, c]. - """ - first_term = -1 * pars[1] - second_term = np.sqrt(pars[1] ** 2 - 4 * pars[0] * pars[2]) - x1 = (first_term + second_term) / pars[0] / 2 - x2 = (first_term - second_term) / pars[0] / 2 - return x1, x2 - - -def index(pairs: list, item: list) -> list: - """Find the ordered pair""" - for pair in pairs: - if set(pair) == set(item): - return pair - raise ValueError(f"{item} not in pairs") - - -class Excitations(str, Enum): - """ - Excited two qubits states. - """ - - ge = "01" - """First qubit in ground state, second qubit in excited state""" - gf = "02" - """First qubit in ground state, second qubit in the first excited state out - of the computational basis.""" - all_ge = "01+10" - """One of the qubit in the ground state and the other one in the excited state.""" - - -def plot_heatmap(heatmaps, fit, transition, bias_unique, order_pair, data_high, col): - heatmaps.add_trace( - go.Heatmap( - x=data_high.freq * HZ_TO_GHZ, - y=data_high.bias, - z=data_high.signal, - coloraxis="coloraxis", - ), - row=1, - col=col, - ) - if fit is not None: - # the fit of the parabola in 02 transition was done doubling the frequencies - heatmaps.add_trace( - go.Scatter( - x=np.polyval(fit.fits[order_pair][transition], bias_unique) - / col - * HZ_TO_GHZ, - y=bias_unique, - mode="markers", - marker_color="lime", - showlegend=True, - marker=dict(size=POINT_SIZE), - name=f"Curve estimation {transition}", - ), - row=1, - col=col, - ) - heatmaps.add_trace( - go.Scatter( - x=np.array(fit.parabolas[order_pair[1], transition]) * HZ_TO_GHZ, - y=bias_unique, - mode="markers", - marker_color="black", - showlegend=True, - marker=dict(symbol="cross", size=POINT_SIZE), - name=f"Parabola {transition}", - ), - row=1, - col=col, - ) - - -def plot_curves(parabolas, fit, data, order_pair, bias_range): - for transition in [Excitations.ge, Excitations.gf, Excitations.all_ge]: - parabolas.add_trace( - go.Scatter( - x=bias_range, - y=np.polyval(fit.fits[order_pair][transition], bias_range) * HZ_TO_GHZ, - showlegend=True, - name=transition, - ) - ) - parabolas.add_trace( - go.Scatter( - x=bias_range, - y=np.array([data.drive_frequency_low[str(order_pair[0])]] * STEP) - * HZ_TO_GHZ, - showlegend=True, - name="10", - ) - ) - - -def plot_intersections(parabolas, cz, iswap): - parabolas.add_trace( - go.Scatter( - x=cz[:, 0], - y=cz[:, 1] * HZ_TO_GHZ, - showlegend=True, - name="CZ", - marker_color="black", - mode="markers", - marker=dict(symbol="cross", size=POINT_SIZE), - ) - ) - parabolas.add_trace( - go.Scatter( - x=iswap[:, 0], - y=iswap[:, 1] * HZ_TO_GHZ, - showlegend=True, - name="iswap", - marker_color="blue", - mode="markers", - marker=dict(symbol="cross", size=10), - ) - ) diff --git a/src/qibocal/protocols/flux_dependence/qubit_crosstalk.py b/src/qibocal/protocols/flux_dependence/qubit_crosstalk.py index dac8d8c8c..e7e956318 100644 --- a/src/qibocal/protocols/flux_dependence/qubit_crosstalk.py +++ b/src/qibocal/protocols/flux_dependence/qubit_crosstalk.py @@ -3,19 +3,24 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + PulseSequence, + Sweeper, +) from scipy.optimize import curve_fit from qibocal import update -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log -from ..qubit_spectroscopy_ef import DEFAULT_ANHARMONICITY -from ..utils import HZ_TO_GHZ, extract_feature, table_dict, table_html +from ...result import magnitude, phase +from ...update import replace +from ..utils import GHZ_TO_HZ, HZ_TO_GHZ, extract_feature, table_dict, table_html from . import utils from .qubit_flux_dependence import ( QubitFluxData, @@ -23,7 +28,6 @@ QubitFluxResults, QubitFluxType, ) -from .qubit_flux_dependence import _fit as diagonal_fit @dataclass @@ -38,7 +42,6 @@ class QubitCrosstalkParameters(QubitFluxParameters): If given flux will be swept on the given qubits in a sequential fashion (n qubits will result to n different executions). Multiple qubits may be measured in each execution as specified by the ``qubits`` option in the runcard. """ - # TODO: add voltage parameters to bias qubits off sweetspot (absolute) @dataclass @@ -68,28 +71,6 @@ def register_qubit(self, qubit, flux_qubit, freq, bias, signal, phase): else: self.data[qubit, flux_qubit] = ar - @property - def diagonal(self) -> Optional[QubitFluxData]: - instance = QubitFluxData( - resonator_type=self.resonator_type, - charging_energy=self.charging_energy, - qubit_frequency=self.qubit_frequency, - ) - for qubit in self.qubits: - try: - instance.data[qubit] = self.data[qubit, qubit] - except KeyError: - log.info( - f"Diagonal acquisition not found for qubit {qubit}. Runcard values will be used to perform the off-diagonal fit." - ) - - if len(instance.data) > 0: - return instance - return QubitFluxData( - resonator_type=self.resonator_type, - charging_energy=self.charging_energy, - ) - @dataclass class QubitCrosstalkResults(QubitFluxResults): @@ -97,11 +78,11 @@ class QubitCrosstalkResults(QubitFluxResults): Qubit Crosstalk outputs. """ - qubit_frequency_bias_point: dict[QubitId, float] + qubit_frequency_bias_point: dict[QubitId, float] = field(default_factory=dict) """Expected qubit frequency at bias point.""" - crosstalk_matrix: dict[QubitId, dict[QubitId, float]] + crosstalk_matrix: dict[QubitId, dict[QubitId, float]] = field(default_factory=dict) """Crosstalk matrix element.""" - fitted_parameters: dict[tuple[QubitId, QubitId], dict] + fitted_parameters: dict[tuple[QubitId, QubitId], dict] = field(default_factory=dict) """Fitted parameters for each couple target-flux qubit.""" def __contains__(self, key: QubitId): @@ -111,119 +92,120 @@ def __contains__(self, key: QubitId): def _acquisition( params: QubitCrosstalkParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> QubitCrosstalkData: """Data acquisition for Crosstalk Experiment.""" - # TODO: pass voltage as parameter + assert set(targets).isdisjoint( + set(params.flux_qubits) + ), "Flux qubits must be different from targets." + sequence = PulseSequence() ro_pulses = {} qd_pulses = {} offset = {} charging_energy = {} matrix_element = {} - qubit_frequency = {} - bias_point = {} - for qubit in targets: - - charging_energy[qubit] = -platform.qubits[qubit].anharmonicity - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.drive_duration - ) - try: - qubit_frequency[qubit] = platform.qubits[qubit].drive_frequency - matrix_element[qubit] = platform.qubits[qubit].crosstalk_matrix[qubit] - offset[qubit] = -platform.qubits[qubit].sweetspot * matrix_element[qubit] - bias_point[qubit] = params.bias_point.get( - qubit, platform.qubits[qubit].sweetspot - ) - - except KeyError: - log.warning(f"Missing flux parameters for qubit {qubit}.") + maximum_frequency = {} + freq_sweepers = [] + offset_sweepers = [] - if params.transition == "02": - if platform.qubits[qubit].anharmonicity: - qd_pulses[qubit].frequency -= platform.qubits[qubit].anharmonicity / 2 - else: - qd_pulses[qubit].frequency -= DEFAULT_ANHARMONICITY / 2 + delta_frequency_range = np.arange( + -params.freq_width / 2, params.freq_width / 2, params.freq_step + ) + for qubit in targets: + natives = platform.natives.single_qubit[qubit] + charging_energy[qubit] = platform.calibration.single_qubits[ + qubit + ].qubit.charging_energy + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + qd_pulse = replace(qd_pulse, duration=params.drive_duration) if params.drive_amplitude is not None: - qd_pulses[qubit].amplitude = params.drive_amplitude - - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish + qd_pulse = replace(qd_pulse, amplitude=params.drive_amplitude) + + qd_pulses[qubit] = qd_pulse + ro_pulses[qubit] = ro_pulse + + # store calibration parameters + maximum_frequency[qubit] = platform.calibration.single_qubits[ + qubit + ].qubit.maximum_frequency + matrix_element[qubit] = platform.calibration.get_crosstalk_element(qubit, qubit) + charging_energy[qubit] = platform.calibration.single_qubits[ + qubit + ].qubit.charging_energy + offset[qubit] = ( + -platform.calibration.single_qubits[qubit].qubit.sweetspot + * matrix_element[qubit] ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - # define the parameters to sweep and their range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - # TODO : abstract common lines with qubit flux dep routine - if params.flux_qubits is None: - flux_qubits = list(platform.qubits) - else: - flux_qubits = params.flux_qubits + sequence.append((qd_channel, qd_pulse)) + sequence.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence.append((ro_channel, ro_pulse)) + + freq_sweepers.append( + Sweeper( + parameter=Parameter.frequency, + values=platform.config(qd_channel).frequency + delta_frequency_range, + channels=[qd_channel], + ) + ) - delta_bias_range = np.arange( + delta_offset_range = np.arange( -params.bias_width / 2, params.bias_width / 2, params.bias_step ) - sequences = [sequence] * len(flux_qubits) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=[platform.qubits[flux_qubit]], - type=SweeperType.OFFSET, + for q in params.flux_qubits: + flux_channel = platform.qubits[q].flux + offset0 = platform.config(flux_channel).offset + offset_sweepers.append( + Sweeper( + parameter=Parameter.offset, + values=offset0 + delta_offset_range, + channels=[flux_channel], + ) ) - for flux_qubit in flux_qubits - ] data = QubitCrosstalkData( resonator_type=platform.resonator_type, matrix_element=matrix_element, offset=offset, - qubit_frequency=qubit_frequency, + qubit_frequency=maximum_frequency, charging_energy=charging_energy, - bias_point=bias_point, + bias_point=params.bias_point, ) - options = ExecutionParameters( + options = dict( nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, averaging_mode=AveragingMode.CYCLIC, ) - # update bias configuration + + updates = [] for qubit in targets: if qubit in params.bias_point: - platform.qubits[qubit].flux.offset = params.bias_point[qubit] + channel = platform.qubits[qubit].flux + updates.append({channel: {"offset": params.bias_point[qubit]}}) - for flux_qubit, bias_sweeper, sequence in zip(flux_qubits, sweepers, sequences): - results = platform.sweep(sequence, options, bias_sweeper, freq_sweeper) + for flux_qubit, offset_sweeper in zip(params.flux_qubits, offset_sweepers): + results = platform.execute( + [sequence], [[offset_sweeper], freq_sweepers], **options, updates=updates + ) # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] - if flux_qubit is None: - sweetspot = platform.qubits[qubit].flux.offset - else: - sweetspot = platform.qubits[flux_qubit].flux.offset + for i, qubit in enumerate(targets): + result = results[ro_pulses[qubit].id] data.register_qubit( qubit, flux_qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - bias=delta_bias_range + sweetspot, + signal=magnitude(result), + phase=phase(result), + freq=freq_sweepers[i].values, + bias=offset_sweeper.values, ) return data @@ -233,29 +215,11 @@ def _fit(data: QubitCrosstalkData) -> QubitCrosstalkResults: crosstalk_matrix = {qubit: {} for qubit in data.qubit_frequency} fitted_parameters = {} - bias_point = {} - sweetspot = {} matrix_element = {} qubit_frequency = {} qubit_frequency_bias_point = {} offset = {} - diagonal = diagonal_fit(data.diagonal) - for qubit in data.qubits: - condition = qubit in diagonal - bias_point[qubit] = data.bias_point[qubit] - matrix_element[qubit] = ( - diagonal.matrix_element[qubit] if condition else data.matrix_element[qubit] - ) - qubit_frequency[qubit] = ( - diagonal.frequency[qubit] if condition else data.qubit_frequency[qubit] - ) - offset[qubit] = ( - -diagonal.sweetspot[qubit] * diagonal.matrix_element[qubit] - if condition - else data.offset[qubit] - ) - for target_flux_qubit, qubit_data in data.data.items(): frequencies, biases = extract_feature( qubit_data.freq, @@ -265,70 +229,59 @@ def _fit(data: QubitCrosstalkData) -> QubitCrosstalkResults: ) target_qubit, flux_qubit = target_flux_qubit - if target_qubit != flux_qubit: - # fit function needs to be defined here to pass correct parameters - # at runtime - qubit_frequency_bias_point[target_qubit] = utils.transmon_frequency( - xi=bias_point[target_qubit], + qubit_frequency_bias_point[target_qubit] = ( + utils.transmon_frequency( + xi=data.bias_point[target_qubit], xj=0, d=0, - w_max=qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=offset[qubit], - normalization=matrix_element[target_qubit], + w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, + offset=data.offset[target_qubit], + normalization=data.matrix_element[target_qubit], charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, crosstalk_element=1, ) + * GHZ_TO_HZ + ) - def fit_function(x, crosstalk_element, offset): - return utils.transmon_frequency( - xi=bias_point[target_qubit], - xj=x, - d=0, - w_max=qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=offset, - normalization=matrix_element[target_qubit], - charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, - crosstalk_element=crosstalk_element, - ) - - try: - popt, _ = curve_fit( - fit_function, - biases, - frequencies * HZ_TO_GHZ, - bounds=((-np.inf, -1), (np.inf, 1)), - maxfev=100000, - ) - fitted_parameters[target_qubit, flux_qubit] = dict( - xi=bias_point[target_qubit], - d=0, - w_max=qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=popt[1], - normalization=matrix_element[target_qubit], - charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, - crosstalk_element=float(popt[0]), - ) - crosstalk_matrix[target_qubit][flux_qubit] = ( - popt[0] * matrix_element[target_qubit] - ) - except RuntimeError as e: - log.error( - f"Off-diagonal flux fit failed for qubit {flux_qubit} due to {e}." - ) + def fit_function(x, crosstalk_element, offset): + return utils.transmon_frequency( + xi=data.bias_point[target_qubit], + xj=x, + d=0, + w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, + offset=offset, + normalization=data.matrix_element[target_qubit], + charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, + crosstalk_element=crosstalk_element, + ) - else: - fitted_parameters[target_qubit, flux_qubit] = diagonal.fitted_parameters[ - target_qubit - ] - crosstalk_matrix[target_qubit][flux_qubit] = diagonal.matrix_element[ - target_qubit - ] + try: + popt, _ = curve_fit( + fit_function, + biases, + frequencies * HZ_TO_GHZ, + bounds=((-np.inf, -1), (np.inf, 1)), + maxfev=100000, + ) + fitted_parameters[target_qubit, flux_qubit] = dict( + xi=data.bias_point[target_qubit], + d=0, + w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, + offset=popt[1], + normalization=data.matrix_element[target_qubit], + charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, + crosstalk_element=float(popt[0]), + ) + crosstalk_matrix[target_qubit][flux_qubit] = ( + popt[0] * data.matrix_element[target_qubit] + ) + except RuntimeError as e: # pragma: no cover + log.error( + f"Off-diagonal flux fit failed for qubit {flux_qubit} due to {e}." + ) return QubitCrosstalkResults( - frequency=qubit_frequency, qubit_frequency_bias_point=qubit_frequency_bias_point, - sweetspot=sweetspot, - matrix_element=matrix_element, crosstalk_matrix=crosstalk_matrix, fitted_parameters=fitted_parameters, ) @@ -341,11 +294,9 @@ def _plot(data: QubitCrosstalkData, fit: QubitCrosstalkResults, target: QubitId) ) if fit is not None: labels = [ - "Qubit Frequency at Sweetspot [Hz]", "Qubit Frequency at Bias point [Hz]", ] values = [ - np.round(fit.frequency[target], 4), np.round(fit.qubit_frequency_bias_point[target], 4), ] for flux_qubit in fit.crosstalk_matrix[target]: @@ -364,7 +315,9 @@ def _plot(data: QubitCrosstalkData, fit: QubitCrosstalkResults, target: QubitId) return figures, fitting_report -def _update(results: QubitCrosstalkResults, platform: Platform, qubit: QubitId): +def _update( + results: QubitCrosstalkResults, platform: CalibrationPlatform, qubit: QubitId +): """Update crosstalk matrix.""" for flux_qubit, element in results.crosstalk_matrix[qubit].items(): diff --git a/src/qibocal/protocols/flux_dependence/qubit_flux_dependence.py b/src/qibocal/protocols/flux_dependence/qubit_flux_dependence.py index d784eb35a..458d3fabe 100644 --- a/src/qibocal/protocols/flux_dependence/qubit_flux_dependence.py +++ b/src/qibocal/protocols/flux_dependence/qubit_flux_dependence.py @@ -3,18 +3,23 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + PulseSequence, + Sweeper, +) from scipy.optimize import curve_fit -from qibocal import update -from qibocal.auto.operation import Data, Results, Routine +from qibocal.auto.operation import Data, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log -from qibocal.protocols.qubit_spectroscopy_ef import DEFAULT_ANHARMONICITY +from qibocal.result import magnitude, phase +from qibocal.update import replace +from ... import update from ..utils import GHZ_TO_HZ, HZ_TO_GHZ, extract_feature, table_dict, table_html from . import utils from .resonator_flux_dependence import ResonatorFluxParameters @@ -27,8 +32,6 @@ class QubitFluxParameters(ResonatorFluxParameters): drive_amplitude: Optional[float] = None """Drive amplitude (optional). If defined, same amplitude will be used in all qubits. Otherwise the default amplitude defined on the platform runcard will be used""" - transition: Optional[str] = "01" - """Flux spectroscopy transition type ("01" or "02"). Default value is 01""" drive_duration: int = 2000 """Duration of the drive pulse.""" @@ -37,13 +40,13 @@ class QubitFluxParameters(ResonatorFluxParameters): class QubitFluxResults(Results): """QubitFlux outputs.""" - sweetspot: dict[QubitId, float] + sweetspot: dict[QubitId, float] = field(default_factory=dict) """Sweetspot for each qubit.""" - frequency: dict[QubitId, float] + frequency: dict[QubitId, float] = field(default_factory=dict) """Drive frequency for each qubit.""" - fitted_parameters: dict[QubitId, dict[str, float]] + fitted_parameters: dict[QubitId, dict[str, float]] = field(default_factory=dict) """Raw fitting output.""" - matrix_element: dict[QubitId, float] + matrix_element: dict[QubitId, float] = field(default_factory=dict) """V_ii coefficient.""" @@ -80,86 +83,86 @@ def register_qubit(self, qubit, freq, bias, signal, phase): def _acquisition( params: QubitFluxParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> QubitFluxData: """Data acquisition for QubitFlux Experiment.""" - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel + delta_frequency_range = np.arange( + -params.freq_width / 2, params.freq_width / 2, params.freq_step + ) + delta_offset_range = np.arange( + -params.bias_width / 2, params.bias_width / 2, params.bias_step + ) + sequence = PulseSequence() ro_pulses = {} qd_pulses = {} qubit_frequency = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.drive_duration - ) - qubit_frequency[qubit] = platform.qubits[qubit].drive_frequency - - if params.transition == "02": - if platform.qubits[qubit].anharmonicity: - qd_pulses[qubit].frequency -= platform.qubits[qubit].anharmonicity / 2 - else: - qd_pulses[qubit].frequency -= DEFAULT_ANHARMONICITY / 2 - + freq_sweepers = [] + offset_sweepers = [] + for q in targets: + natives = platform.natives.single_qubit[q] + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + qd_pulse = replace(qd_pulse, duration=params.drive_duration) if params.drive_amplitude is not None: - qd_pulses[qubit].amplitude = params.drive_amplitude - - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish + qd_pulse = replace(qd_pulse, amplitude=params.drive_amplitude) + + qd_pulses[q] = qd_pulse + ro_pulses[q] = ro_pulse + qubit_frequency[q] = frequency0 = platform.config(qd_channel).frequency + + sequence.append((qd_channel, qd_pulse)) + sequence.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence.append((ro_channel, ro_pulse)) + + # define the parameters to sweep and their range: + freq_sweepers.append( + Sweeper( + parameter=Parameter.frequency, + values=frequency0 + delta_frequency_range, + channels=[qd_channel], + ) ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - # define the parameters to sweep and their range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - - delta_bias_range = np.arange( - -params.bias_width / 2, params.bias_width / 2, params.bias_step - ) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=[platform.qubits[qubit] for qubit in targets], - type=SweeperType.OFFSET, + flux_channel = platform.qubits[q].flux + offset0 = platform.config(flux_channel).offset + offset_sweepers.append( + Sweeper( + parameter=Parameter.offset, + values=offset0 + delta_offset_range, + channels=[flux_channel], + ) ) - ] data = QubitFluxData( resonator_type=platform.resonator_type, charging_energy={ - qubit: -platform.qubits[qubit].anharmonicity for qubit in targets + qubit: platform.calibration.single_qubits[qubit].qubit.charging_energy + for qubit in targets }, qubit_frequency=qubit_frequency, ) - options = ExecutionParameters( + results = platform.execute( + [sequence], + [offset_sweepers, freq_sweepers], nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, averaging_mode=AveragingMode.CYCLIC, ) - for bias_sweeper in sweepers: - results = platform.sweep(sequence, options, bias_sweeper, freq_sweeper) - # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] - sweetspot = platform.qubits[qubit].sweetspot - data.register_qubit( - qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - bias=delta_bias_range + sweetspot, - ) + + for i, qubit in enumerate(targets): + result = results[ro_pulses[qubit].id] + data.register_qubit( + qubit, + signal=magnitude(result), + phase=phase(result), + freq=freq_sweepers[i].values, + bias=offset_sweepers[i].values, + ) return data @@ -277,9 +280,13 @@ def _plot(data: QubitFluxData, fit: QubitFluxResults, target: QubitId): return figures, "" -def _update(results: QubitFluxResults, platform: Platform, qubit: QubitId): +def _update(results: QubitFluxResults, platform: CalibrationPlatform, qubit: QubitId): update.drive_frequency(results.frequency[qubit], platform, qubit) update.sweetspot(results.sweetspot[qubit], platform, qubit) + update.flux_offset(results.sweetspot[qubit], platform, qubit) + platform.calibration.single_qubits[qubit].qubit.maximum_frequency = int( + results.frequency[qubit] + ) update.crosstalk_matrix(results.matrix_element[qubit], platform, qubit, qubit) diff --git a/src/qibocal/protocols/flux_dependence/qubit_flux_tracking.py b/src/qibocal/protocols/flux_dependence/qubit_flux_tracking.py deleted file mode 100644 index a4f2ef407..000000000 --- a/src/qibocal/protocols/flux_dependence/qubit_flux_tracking.py +++ /dev/null @@ -1,164 +0,0 @@ -from dataclasses import dataclass - -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Routine -from qibocal.config import raise_error - -from ..qubit_spectroscopy_ef import DEFAULT_ANHARMONICITY -from . import qubit_flux_dependence, utils - - -@dataclass -class QubitFluxTrackParameters(qubit_flux_dependence.QubitFluxParameters): - """QubitFluxTrack runcard inputs.""" - - -@dataclass -class QubitFluxTrackResults(qubit_flux_dependence.QubitFluxParameters): - """QubitFluxTrack outputs.""" - - -@dataclass -class QubitFluxTrackData(qubit_flux_dependence.QubitFluxData): - """QubitFluxTrack acquisition outputs.""" - - def register_qubit_track(self, qubit, freq, bias, signal, phase): - """Store output for single qubit.""" - # to be able to handle the 1D sweeper case - size = len(freq) - ar = np.empty(size, dtype=qubit_flux_dependence.QubitFluxType) - ar["freq"] = freq - ar["bias"] = [bias] * size - ar["signal"] = signal - ar["phase"] = phase - if qubit in self.data: - self.data[qubit] = np.rec.array(np.concatenate((self.data[qubit], ar))) - else: - self.data[qubit] = np.rec.array(ar) - - -def _acquisition( - params: QubitFluxTrackResults, - platform: Platform, - targets: list[QubitId], -) -> QubitFluxTrackData: - """Data acquisition for QubitFlux Experiment.""" - # create a sequence of pulses for the experiment: - # MZ - - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel - sequence = PulseSequence() - ro_pulses = {} - qd_pulses = {} - qubit_frequency = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.drive_duration - ) - qubit_frequency[qubit] = platform.qubits[qubit].drive_frequency - if params.transition == "02": - if platform.qubits[qubit].anharmonicity != 0: - qd_pulses[qubit].frequency -= platform.qubits[qubit].anharmonicity / 2 - else: - qd_pulses[qubit].frequency -= DEFAULT_ANHARMONICITY / 2 - - if params.drive_amplitude is not None: - qd_pulses[qubit].amplitude = params.drive_amplitude - - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameters to sweep and their range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - - delta_bias_range = np.arange( - -params.bias_width / 2, params.bias_width / 2, params.bias_step - ) - - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - - data = QubitFluxTrackData( - resonator_type=platform.resonator_type, - qubit_frequency=qubit_frequency, - charging_energy={ - qubit: -platform.qubits[qubit].anharmonicity for qubit in targets - }, - ) - - for bias in delta_bias_range: - for qubit in targets: - try: - freq_resonator = utils.transmon_readout_frequency( - xi=bias, - xj=0, - w_max=platform.qubits[qubit].drive_frequency, - d=0, - normalization=platform.qubits[qubit].normalization, - crosstalk_element=1, - offset=-platform.qubits[qubit].sweetspot - * platform.qubits[qubit].normalization, - resonator_freq=platform.qubits[qubit].bare_resonator_frequency, - g=platform.qubits[qubit].coupling, - charging_energy=data.charging_energy[qubit], - ) - # modify qubit resonator frequency - platform.qubits[qubit].readout_frequency = freq_resonator - except: - raise_error - ( - RuntimeError, - "qubit_flux_track: Not enough parameters to estimate the resonator freq for the given bias. Please run resonator spectroscopy flux and update the runcard", - ) - - # modify qubit flux - platform.qubits[qubit].flux.offset = bias - - # execute pulse sequence sweeping only qubit resonator - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - freq_sweeper, - ) - - # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] - data.register_qubit_track( - qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - bias=bias + platform.qubits[qubit].sweetspot, - ) - - return data - - -qubit_flux_tracking = Routine( - _acquisition, - qubit_flux_dependence._fit, - qubit_flux_dependence._plot, - qubit_flux_dependence._update, -) -"""QubitFluxTrack Routine object.""" diff --git a/src/qibocal/protocols/flux_dependence/resonator_crosstalk.py b/src/qibocal/protocols/flux_dependence/resonator_crosstalk.py deleted file mode 100644 index 790d61a55..000000000 --- a/src/qibocal/protocols/flux_dependence/resonator_crosstalk.py +++ /dev/null @@ -1,380 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType -from scipy.optimize import curve_fit - -from ... import update -from ...auto.operation import Routine -from ...config import log -from ..utils import HZ_TO_GHZ, extract_feature, table_dict, table_html -from . import utils -from .resonator_flux_dependence import ( - ResFluxType, - ResonatorFluxData, - ResonatorFluxParameters, - ResonatorFluxResults, -) -from .resonator_flux_dependence import _fit as diagonal_fit - - -@dataclass -class ResCrosstalkParameters(ResonatorFluxParameters): - """ResonatorFlux runcard inputs.""" - - bias_point: Optional[dict[QubitId, float]] = field(default_factory=dict) - """Dictionary with {qubit_id: bias_point_qubit_id}.""" - flux_qubits: Optional[list[QubitId]] = None - """IDs of the qubits that we will sweep the flux on. - If ``None`` flux will be swept on all qubits that we are running the routine on in a multiplex fashion. - If given flux will be swept on the given qubits in a sequential fashion (n qubits will result to n different executions). - Multiple qubits may be measured in each execution as specified by the ``qubits`` option in the runcard. - """ - - -@dataclass -class ResCrosstalkResults(ResonatorFluxResults): - """ResCrosstalk outputs.""" - - resonator_frequency_bias_point: dict[QubitId, dict[QubitId, float]] = field( - default_factory=dict - ) - """Resonator frequency at bias point.""" - crosstalk_matrix: dict[QubitId, dict[QubitId, float]] = field(default_factory=dict) - """Crosstalk matrix element.""" - fitted_parameters: dict[tuple[QubitId, QubitId], dict] = field(default_factory=dict) - """Fitted parameters for each couple target-flux qubit.""" - - def __contains__(self, key: QubitId): - """Checking if qubit is in crosstalk_matrix attribute.""" - return key in self.crosstalk_matrix - - -@dataclass -class ResCrosstalkData(ResonatorFluxData): - """ResFlux acquisition outputs when ``flux_qubits`` are given.""" - - coupling: dict[QubitId, float] = field(default_factory=dict) - """Coupling parameter g for each qubit.""" - bias_point: dict[QubitId, float] = field(default_factory=dict) - """Voltage provided to each qubit.""" - bare_resonator_frequency: dict[QubitId, float] = field(default_factory=dict) - """Readout resonator frequency for each qubit.""" - resonator_frequency: dict[QubitId, float] = field(default_factory=dict) - """Readout resonator frequency for each qubit.""" - matrix_element: dict[QubitId, float] = field(default_factory=dict) - """Diagonal crosstalk matrix element.""" - offset: dict[QubitId, float] = field(default_factory=dict) - """Diagonal offset.""" - asymmetry: dict[QubitId, float] = field(default_factory=dict) - """Diagonal asymmetry.""" - data: dict[tuple[QubitId, QubitId], npt.NDArray[ResFluxType]] = field( - default_factory=dict - ) - """Raw data acquired for (qubit, qubit_flux) pairs saved in nested dictionaries.""" - - def register_qubit(self, qubit, flux_qubit, freq, bias, signal, phase): - """Store output for single qubit.""" - ar = utils.create_data_array(freq, bias, signal, phase, dtype=ResFluxType) - if (qubit, flux_qubit) in self.data: - self.data[qubit, flux_qubit] = np.rec.array( - np.concatenate((self.data[qubit, flux_qubit], ar)) - ) - else: - self.data[qubit, flux_qubit] = ar - - @property - def diagonal(self) -> ResonatorFluxData: - """Returns diagonal data acquired.""" - instance = ResonatorFluxData( - resonator_type=self.resonator_type, - qubit_frequency=self.qubit_frequency, - bare_resonator_frequency=self.bare_resonator_frequency, - charging_energy=self.charging_energy, - ) - for qubit in self.qubits: - try: - instance.data[qubit] = self.data[qubit, qubit] - except KeyError: - log.info( - f"Diagonal acquisition not found for qubit {qubit}. Runcard values will be used to perform the off-diagonal fit." - ) - - return instance - - -def _acquisition( - params: ResCrosstalkParameters, platform: Platform, targets: list[QubitId] -) -> ResCrosstalkData: - """Data acquisition for ResonatorFlux experiment.""" - sequence = PulseSequence() - ro_pulses = {} - bare_resonator_frequency = {} - resonator_frequency = {} - qubit_frequency = {} - coupling = {} - asymmetry = {} - charging_energy = {} - bias_point = {} - offset = {} - matrix_element = {} - for qubit in targets: - charging_energy[qubit] = -platform.qubits[qubit].anharmonicity - bias_point[qubit] = params.bias_point.get( - qubit, platform.qubits[qubit].sweetspot - ) - coupling[qubit] = platform.qubits[qubit].g - asymmetry[qubit] = platform.qubits[qubit].asymmetry - matrix_element[qubit] = platform.qubits[qubit].crosstalk_matrix[qubit] - offset[qubit] = -platform.qubits[qubit].sweetspot * matrix_element[qubit] - bare_resonator_frequency[qubit] = platform.qubits[ - qubit - ].bare_resonator_frequency - qubit_frequency[qubit] = platform.qubits[qubit].drive_frequency - resonator_frequency[qubit] = platform.qubits[qubit].readout_frequency - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) - sequence.add(ro_pulses[qubit]) - - # define the parameters to sweep and their range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - - if params.flux_qubits is None: - flux_qubits = list(platform.qubits) - - else: - flux_qubits = params.flux_qubits - - delta_bias_range = np.arange( - -params.bias_width / 2, params.bias_width / 2, params.bias_step - ) - sequences = [sequence] * len(flux_qubits) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=[platform.qubits[flux_qubit]], - type=SweeperType.OFFSET, - ) - for flux_qubit in flux_qubits - ] - data = ResCrosstalkData( - resonator_type=platform.resonator_type, - qubit_frequency=qubit_frequency, - offset=offset, - asymmetry=asymmetry, - resonator_frequency=resonator_frequency, - charging_energy=charging_energy, - bias_point=bias_point, - matrix_element=matrix_element, - coupling=coupling, - bare_resonator_frequency=bare_resonator_frequency, - ) - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ) - for qubit in targets: - if qubit in params.bias_point: - platform.qubits[qubit].flux.offset = params.bias_point[qubit] - - for flux_qubit, bias_sweeper, sequence in zip(flux_qubits, sweepers, sequences): - results = platform.sweep(sequence, options, bias_sweeper, freq_sweeper) - # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] - if flux_qubit is None: - sweetspot = platform.qubits[qubit].flux.offset - else: - sweetspot = platform.qubits[flux_qubit].flux.offset - data.register_qubit( - qubit, - flux_qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + ro_pulses[qubit].frequency, - bias=delta_bias_range + sweetspot, - ) - return data - - -def _fit(data: ResCrosstalkData) -> ResCrosstalkResults: - """ "PostProcessing for resonator crosstalk protocol.""" - - # perform first fit where corresponding qubit is moved - diagonal = diagonal_fit(data.diagonal) - - fitted_parameters = {} - crosstalk_matrix = {qubit: {} for qubit in data.qubit_frequency} - offset = {} - coupling = {} - matrix_element = {} - asymmetry = {} - resonator_frequency = {} - resonator_frequency_bias_point = {} - - for qubit in data.qubits: - - # retrieve parameters from diagonal fit if performed - condition = qubit in diagonal - coupling[qubit] = ( - diagonal.coupling[qubit] if condition else data.coupling[qubit] - ) - asymmetry[qubit] = ( - diagonal.asymmetry[qubit] if condition else data.asymmetry[qubit] - ) - matrix_element[qubit] = ( - diagonal.matrix_element[qubit] if condition else data.matrix_element[qubit] - ) - resonator_frequency[qubit] = ( - diagonal.frequency[qubit] if condition else data.resonator_frequency[qubit] - ) - offset[qubit] = ( - diagonal.fitted_parameters[qubit]["offset"] - if condition - else data.offset[qubit] - ) - - for target_flux_qubit, qubit_data in data.data.items(): - target_qubit, flux_qubit = target_flux_qubit - frequencies, biases = extract_feature( - qubit_data.freq, - qubit_data.bias, - qubit_data.signal, - "min" if data.resonator_type == "2D" else "max", - ) - - # fit valid only for non-diagonal case - # (the diagonal case was handled before) - if target_qubit != flux_qubit: - resonator_frequency_bias_point[target_qubit] = ( - utils.transmon_readout_frequency( - xi=data.bias_point[target_qubit], - xj=0, - d=asymmetry[target_qubit], - w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=data.offset[target_qubit], - normalization=matrix_element[target_qubit], - charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, - g=coupling[target_qubit], - resonator_freq=data.bare_resonator_frequency[target_qubit] - * HZ_TO_GHZ, - crosstalk_element=1, - ) - ) - - def fit_function(x, crosstalk_element): - return utils.transmon_readout_frequency( - xi=data.bias_point[target_qubit], - xj=x, - d=0, - w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=offset[target_qubit], - normalization=data.matrix_element[target_qubit], - charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, - g=coupling[target_qubit], - resonator_freq=data.bare_resonator_frequency[target_qubit] - * HZ_TO_GHZ, - crosstalk_element=crosstalk_element, - ) - - try: - popt, _ = curve_fit( - fit_function, - biases, - frequencies * HZ_TO_GHZ, - bounds=(-1, 1), - ) - fitted_parameters[target_qubit, flux_qubit] = dict( - xi=data.bias_point[qubit], - d=asymmetry[qubit], - w_max=data.qubit_frequency[target_qubit] * HZ_TO_GHZ, - offset=offset[qubit], - normalization=data.matrix_element[target_qubit], - charging_energy=data.charging_energy[target_qubit] * HZ_TO_GHZ, - g=coupling[target_qubit], - resonator_freq=data.bare_resonator_frequency[target_qubit] - * HZ_TO_GHZ, - crosstalk_element=float(popt[0]), - ) - crosstalk_matrix[target_qubit][flux_qubit] = ( - popt[0] * data.matrix_element[target_qubit] - ) - except (ValueError, RuntimeError) as e: - log.error( - f"Off-diagonal flux fit failed for qubit {flux_qubit} due to {e}." - ) - else: - fitted_parameters[target_qubit, flux_qubit] = diagonal.fitted_parameters[ - target_qubit - ] - crosstalk_matrix[target_qubit][flux_qubit] = matrix_element[qubit] - - return ResCrosstalkResults( - frequency=resonator_frequency, - asymmetry=asymmetry, - resonator_frequency_bias_point=resonator_frequency_bias_point, - coupling=coupling, - crosstalk_matrix=crosstalk_matrix, - fitted_parameters=fitted_parameters, - ) - - -def _plot(data: ResCrosstalkData, fit: ResCrosstalkResults, target: QubitId): - """Plotting function for ResonatorFlux Experiment.""" - figures, fitting_report = utils.flux_crosstalk_plot( - data, target, fit, fit_function=utils.transmon_readout_frequency - ) - if fit is not None: - labels = [ - "Resonator Frequency at Sweetspot [Hz]", - "Coupling g [MHz]", - "Asymmetry d", - "Resonator Frequency at Bias point [Hz]", - ] - values = [ - np.round(fit.frequency[target], 4), - np.round(fit.coupling[target] * 1e3, 2), - np.round(fit.asymmetry[target], 2), - np.round(fit.resonator_frequency_bias_point[target], 4), - ] - for flux_qubit in fit.crosstalk_matrix[target]: - if flux_qubit != target: - labels.append(f"Crosstalk with qubit {flux_qubit}") - else: - labels.append(f"Flux dependence") - values.append(np.round(fit.crosstalk_matrix[target][flux_qubit], 4)) - - fitting_report = table_html( - table_dict( - target, - labels, - values, - ) - ) - return figures, fitting_report - - -def _update(results: ResCrosstalkResults, platform: Platform, qubit: QubitId): - """Update crosstalk matrix.""" - for flux_qubit, element in results.crosstalk_matrix[qubit].items(): - update.crosstalk_matrix(element, platform, qubit, flux_qubit) - - -resonator_crosstalk = Routine(_acquisition, _fit, _plot, _update) -"""Resonator crosstalk Routine object""" diff --git a/src/qibocal/protocols/flux_dependence/resonator_flux_dependence.py b/src/qibocal/protocols/flux_dependence/resonator_flux_dependence.py index f560409de..5be9e1472 100644 --- a/src/qibocal/protocols/flux_dependence/resonator_flux_dependence.py +++ b/src/qibocal/protocols/flux_dependence/resonator_flux_dependence.py @@ -3,16 +3,15 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, PulseSequence, Sweeper from scipy.optimize import curve_fit +from qibocal.calibration import CalibrationPlatform + from ... import update -from ...auto.operation import Data, Parameters, Results, Routine +from ...auto.operation import Data, Parameters, QubitId, Results, Routine from ...config import log +from ...result import magnitude, phase from ..utils import GHZ_TO_HZ, HZ_TO_GHZ, extract_feature, table_dict, table_html from . import utils @@ -85,46 +84,59 @@ def register_qubit(self, qubit, freq, bias, signal, phase): def _acquisition( - params: ResonatorFluxParameters, platform: Platform, targets: list[QubitId] + params: ResonatorFluxParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> ResonatorFluxData: """Data acquisition for ResonatorFlux experiment.""" - sequence = PulseSequence() - ro_pulses = {} - qubit_frequency = {} - bare_resonator_frequency = {} - charging_energy = {} - for qubit in targets: - qubit_frequency[qubit] = platform.qubits[qubit].drive_frequency - bare_resonator_frequency[qubit] = platform.qubits[ - qubit - ].bare_resonator_frequency - charging_energy[qubit] = -platform.qubits[qubit].anharmonicity - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) - sequence.add(ro_pulses[qubit]) - - # define the parameters to sweep and their range: delta_frequency_range = np.arange( -params.freq_width / 2, params.freq_width / 2, params.freq_step ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - - delta_bias_range = np.arange( + delta_offset_range = np.arange( -params.bias_width / 2, params.bias_width / 2, params.bias_step ) - sweepers = [ - Sweeper( - Parameter.bias, - delta_bias_range, - qubits=[platform.qubits[qubit] for qubit in targets], - type=SweeperType.OFFSET, + # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel + sequence = PulseSequence() + ro_pulses = {} + qubit_frequency = {} + bare_resonator_frequency = {} + charging_energy = {} + matrix_element = {} + offset = {} + freq_sweepers = [] + offset_sweepers = [] + for q in targets: + ro_sequence = platform.natives.single_qubit[q].MZ() + ro_pulses[q] = ro_sequence[0][1] + sequence += ro_sequence + + qubit = platform.qubits[q] + offset0 = platform.config(qubit.flux).offset + freq0 = platform.config(qubit.probe).frequency + + freq_sweepers.append( + Sweeper( + parameter=Parameter.frequency, + values=freq0 + delta_frequency_range, + channels=[qubit.probe], + ) ) - ] + offset_sweepers.append( + Sweeper( + parameter=Parameter.offset, + values=offset0 + delta_offset_range, + channels=[qubit.flux], + ) + ) + + qubit_frequency[q] = platform.config(qubit.drive).frequency + bare_resonator_frequency[q] = platform.calibration.single_qubits[ + q + ].resonator.bare_frequency + matrix_element[q] = platform.calibration.get_crosstalk_element(q, q) + offset[q] = -offset0 * matrix_element[q] + charging_energy[q] = platform.calibration.single_qubits[q].qubit.charging_energy data = ResonatorFluxData( resonator_type=platform.resonator_type, @@ -132,34 +144,24 @@ def _acquisition( bare_resonator_frequency=bare_resonator_frequency, charging_energy=charging_energy, ) - options = ExecutionParameters( + results = platform.execute( + [sequence], + [offset_sweepers, freq_sweepers], nshots=params.nshots, relaxation_time=params.relaxation_time, acquisition_type=AcquisitionType.INTEGRATION, averaging_mode=AveragingMode.CYCLIC, ) - for bias_sweeper in sweepers: - results = platform.sweep(sequence, options, bias_sweeper, freq_sweeper) - # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] - sweetspot = platform.qubits[qubit].sweetspot - - frequency =delta_frequency_range + ro_pulses[qubit].frequency - - if params.phase_delay is not None: - phase = result.average.phase - phase = np.unwrap(phase)-(frequency-frequency[0])*1e-6*params.phase_delay - else: - phase = result.average.phase - - data.register_qubit( - qubit, - signal=result.magnitude, - phase=phase, - freq=frequency, - bias=delta_bias_range + sweetspot, - ) + # retrieve the results for every qubit + for i, qubit in enumerate(targets): + result = results[ro_pulses[qubit].id] + data.register_qubit( + qubit, + signal=magnitude(result), + phase=phase(result), + freq=freq_sweepers[i].values, + bias=offset_sweepers[i].values, + ) return data @@ -291,12 +293,14 @@ def _plot(data: ResonatorFluxData, fit: ResonatorFluxResults, target: QubitId): return figures, "" -def _update(results: ResonatorFluxResults, platform: Platform, qubit: QubitId): +def _update( + results: ResonatorFluxResults, platform: CalibrationPlatform, qubit: QubitId +): + update.dressed_resonator_frequency(results.frequency[qubit], platform, qubit) update.readout_frequency(results.frequency[qubit], platform, qubit) update.coupling(results.coupling[qubit], platform, qubit) - update.asymmetry(results.coupling[qubit], platform, qubit) + update.flux_offset(results.sweetspot[qubit], platform, qubit) update.sweetspot(results.sweetspot[qubit], platform, qubit) - update.crosstalk_matrix(results.matrix_element[qubit], platform, qubit, qubit) resonator_flux = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/flux_dependence/utils.py b/src/qibocal/protocols/flux_dependence/utils.py index 5e56f7733..cf4395446 100644 --- a/src/qibocal/protocols/flux_dependence/utils.py +++ b/src/qibocal/protocols/flux_dependence/utils.py @@ -1,8 +1,6 @@ import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab.platform import Platform -from qibolab.qubits import QubitId from ..utils import HZ_TO_GHZ @@ -174,25 +172,6 @@ def flux_crosstalk_plot(data, qubit, fit, fit_function): row=1, col=col + 1, ) - elif flux_qubit in fit.fitted_parameters: - diagonal_params = fit.fitted_parameters[qubit, qubit] - fig.add_trace( - go.Scatter( - x=fit_function( - qubit_data.bias, - **diagonal_params, - ), - y=qubit_data.bias, - showlegend=not any( - isinstance(trace, go.Scatter) for trace in fig.data - ), - legendgroup="Fit", - name="Fit", - marker=dict(color="green"), - ), - row=1, - col=col + 1, - ) fig.update_xaxes( title_text="Frequency [GHz]", @@ -346,73 +325,3 @@ def qubit_flux_dependence_fit_bounds(qubit_frequency: float): 1, ], ) - - -def crosstalk_matrix(platform: Platform, qubits: list[QubitId]) -> np.ndarray: - """Computing crosstalk matrix for number of qubits selected. - The matrix returns has the following matrix element: - (M)ij = qubits[i].crosstalk_matrix[qubits[j]] - """ - size = len(qubits) - matrix = np.ones((size, size)) - for i in range(size): - for j in range(size): - matrix[i, j] = platform.qubits[qubits[i]].crosstalk_matrix[qubits[j]] - - return matrix - - -def compensation_matrix(platform: Platform, qubits: list[QubitId]) -> np.ndarray: - """Compensation matrix C computed as M C = diag(M') where M is the - crosstalk matrix. - For more details check: https://web.physics.ucsb.edu/~martinisgroup/theses/Chen2018.pdf - 8.2.3 - """ - size = len(qubits) - matrix = np.ones((size, size)) - crosstalk = crosstalk_matrix(platform, qubits) - for i in range(size): - for j in range(size): - if i == j: - matrix[i, j] = 1 - else: - matrix[i, j] = -crosstalk[i, j] / crosstalk[i, i] - - return matrix - - -def invert_transmon_freq(target_freq: float, platform: Platform, qubit: QubitId): - """Return right side of equation matrix * total_flux = f(target_freq). - Target frequency shoudl be expressed in GHz. - """ - charging_energy = -platform.qubits[qubit].anharmonicity * HZ_TO_GHZ - offset = ( - -platform.qubits[qubit].sweetspot - * platform.qubits[qubit].crosstalk_matrix[qubit] - ) - w_max = platform.qubits[qubit].drive_frequency * HZ_TO_GHZ - d = platform.qubits[qubit].asymmetry - angle = np.sqrt( - 1 - / (1 - d**2) - * (((target_freq + charging_energy) / (w_max + charging_energy)) ** 4 - d**2) - ) - return 1 / np.pi * np.arccos(angle) - offset - - -def frequency_to_bias( - target_freqs: dict[QubitId, float], platform: Platform -) -> np.ndarray: - """Starting from set of target_freqs computes bias points using the compensation matrix.""" - qubits = list(target_freqs) - inverted_crosstalk_matrix = np.linalg.inv( - crosstalk_matrix(platform, qubits) @ compensation_matrix(platform, qubits) - ) - transmon_freq = np.array( - [ - invert_transmon_freq(freq, platform, qubit) - for qubit, freq in target_freqs.items() - ] - ) - bias_array = inverted_crosstalk_matrix @ transmon_freq - return {qubit: bias_array[index] for index, qubit in enumerate(qubits)} diff --git a/src/qibocal/protocols/qubit_power_spectroscopy.py b/src/qibocal/protocols/qubit_power_spectroscopy.py index 5b77172bb..0380d9295 100644 --- a/src/qibocal/protocols/qubit_power_spectroscopy.py +++ b/src/qibocal/protocols/qubit_power_spectroscopy.py @@ -4,14 +4,20 @@ import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Parameters, Results, Routine - +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + PulseSequence, + Sweeper, +) + +from qibocal.auto.operation import Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform + +from ..result import magnitude, phase +from ..update import replace from .qubit_spectroscopy import QubitSpectroscopyResults from .resonator_punchout import ResonatorPunchoutData from .utils import HZ_TO_GHZ @@ -25,16 +31,14 @@ class QubitPowerSpectroscopyParameters(Parameters): """Width for frequency sweep relative to the drive frequency [Hz].""" freq_step: int """Frequency step for sweep [Hz].""" - min_amp_factor: float - """Minimum amplitude multiplicative factor.""" - max_amp_factor: float - """Maximum amplitude multiplicative factor.""" - step_amp_factor: float - """Step amplitude multiplicative factor.""" + min_amp: float + """Minimum amplitude.""" + max_amp: float + """Maximum amplitude.""" + step_amp: float + """Step amplitude.""" duration: int """Drive duration.""" - amplitude: Optional[float] = None - """Initial drive amplitude.""" @dataclass @@ -44,7 +48,7 @@ class QubitPowerSpectroscopyData(ResonatorPunchoutData): def _acquisition( params: QubitPowerSpectroscopyParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> QubitPowerSpectroscopyData: """Perform a qubit spectroscopy experiment with different amplitudes. @@ -58,71 +62,61 @@ def _acquisition( sequence = PulseSequence() ro_pulses = {} qd_pulses = {} - amplitudes = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.duration - ) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish - ) - if params.amplitude is not None: - qd_pulses[qubit].amplitude = params.amplitude - amplitudes[qubit] = qd_pulses[qubit].amplitude - - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameters to sweep and their range: - # drive frequency + freq_sweepers = {} delta_frequency_range = np.arange( -params.freq_width / 2, params.freq_width / 2, params.freq_step ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - # drive amplitude - amplitude_range = np.arange( - params.min_amp_factor, params.max_amp_factor, params.step_amp_factor - ) + for qubit in targets: + natives = platform.natives.single_qubit[qubit] + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + + qd_pulse = replace(qd_pulse, duration=params.duration) + + qd_pulses[qubit] = qd_pulse + ro_pulses[qubit] = ro_pulse + + sequence.append((qd_channel, qd_pulse)) + sequence.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence.append((ro_channel, ro_pulse)) + + f0 = platform.config(qd_channel).frequency + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=f0 + delta_frequency_range, + channels=[qd_channel], + ) + amp_sweeper = Sweeper( - Parameter.amplitude, - amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) # data data = QubitPowerSpectroscopyData( - amplitudes=amplitudes, resonator_type=platform.resonator_type, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - amp_sweeper, - freq_sweeper, + results = platform.execute( + [sequence], + [[amp_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) # retrieve the results for every qubit for qubit, ro_pulse in ro_pulses.items(): # average signal, phase, i and q over the number of shots defined in the runcard - result = results[ro_pulse.serial] + result = results[ro_pulse.id] data.register_qubit( qubit, - signal=result.magnitude, - phase=result.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - amp=amplitude_range * amplitudes[qubit], + signal=magnitude(result), + phase=phase(result), + freq=freq_sweepers[qubit].values, + amp=amp_sweeper.values, ) return data diff --git a/src/qibocal/protocols/qubit_spectroscopy.py b/src/qibocal/protocols/qubit_spectroscopy.py index ae60f93f7..7362de178 100644 --- a/src/qibocal/protocols/qubit_spectroscopy.py +++ b/src/qibocal/protocols/qubit_spectroscopy.py @@ -2,14 +2,14 @@ from typing import Optional import numpy as np -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import Delay, Parameter, PulseSequence, Sweeper -from qibocal import update -from qibocal.auto.operation import Parameters, Results, Routine +from qibocal.auto.operation import Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude, phase +from qibocal.update import replace +from .. import update from .resonator_spectroscopy import ResonatorSpectroscopyData, ResSpecType from .utils import chi2_reduced, lorentzian, lorentzian_fit, spectroscopy_plot @@ -53,67 +53,84 @@ class QubitSpectroscopyData(ResonatorSpectroscopyData): def _acquisition( - params: QubitSpectroscopyParameters, platform: Platform, targets: list[QubitId] + params: QubitSpectroscopyParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> QubitSpectroscopyData: """Data acquisition for qubit spectroscopy.""" # create a sequence of pulses for the experiment: # long drive probing pulse - MZ + delta_frequency_range = np.arange( + -params.freq_width / 2, params.freq_width / 2, params.freq_step + ) + # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel sequence = PulseSequence() ro_pulses = {} qd_pulses = {} amplitudes = {} + sweepers = [] for qubit in targets: - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.drive_duration - ) - if params.drive_amplitude is not None: - qd_pulses[qubit].amplitude = params.drive_amplitude - - amplitudes[qubit] = qd_pulses[qubit].amplitude + natives = platform.natives.single_qubit[qubit] + qd_channel, qd_pulse = natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish + qd_pulse = replace(qd_pulse, duration=params.drive_duration) + if params.drive_amplitude is not None: + qd_pulse = replace(qd_pulse, amplitude=params.drive_amplitude) + + amplitudes[qubit] = qd_pulse.amplitude + qd_pulses[qubit] = qd_pulse + ro_pulses[qubit] = ro_pulse + + sequence.append((qd_channel, qd_pulse)) + sequence.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence.append((ro_channel, ro_pulse)) + + f0 = platform.config(qd_channel).frequency + sweepers.append( + Sweeper( + parameter=Parameter.frequency, + values=f0 + delta_frequency_range, + channels=[qd_channel], + ) ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) # Create data structure for data acquisition. data = QubitSpectroscopyData( resonator_type=platform.resonator_type, amplitudes=amplitudes ) - results = platform.sweep( - sequence, - params.execution_parameters, - sweeper, + results = platform.execute( + [sequence], + [sweepers], + **params.execution_parameters, ) # retrieve the results for every qubit for qubit, ro_pulse in ro_pulses.items(): - result = results[ro_pulse.serial] + result = results[ro_pulse.id] # store the results + f0 = platform.config(platform.qubits[qubit].drive).frequency + signal = magnitude(result) + _phase = phase(result) + if len(signal.shape) > 1: + error_signal = np.std(signal, axis=0, ddof=1) / np.sqrt(signal.shape[0]) + signal = np.mean(signal, axis=0) + error_phase = np.std(_phase, axis=0, ddof=1) / np.sqrt(_phase.shape[0]) + _phase = np.mean(_phase, axis=0) + else: + error_signal, error_phase = None, None data.register_qubit( ResSpecType, (qubit), dict( - signal=result.average.magnitude, - phase=result.average.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - error_signal=result.average.std, - error_phase=result.phase_std, + signal=signal, + phase=_phase, + freq=delta_frequency_range + f0, + error_signal=error_signal, + error_phase=error_phase, ), ) return data @@ -156,7 +173,12 @@ def _plot(data: QubitSpectroscopyData, target: QubitId, fit: QubitSpectroscopyRe return spectroscopy_plot(data, target, fit) -def _update(results: QubitSpectroscopyResults, platform: Platform, target: QubitId): +def _update( + results: QubitSpectroscopyResults, platform: CalibrationPlatform, target: QubitId +): + platform.calibration.single_qubits[target].qubit.frequency_01 = results.frequency[ + target + ] update.drive_frequency(results.frequency[target], platform, target) diff --git a/src/qibocal/protocols/qubit_spectroscopy_ef.py b/src/qibocal/protocols/qubit_spectroscopy_ef.py index 72ac82f86..c43473f2f 100644 --- a/src/qibocal/protocols/qubit_spectroscopy_ef.py +++ b/src/qibocal/protocols/qubit_spectroscopy_ef.py @@ -1,14 +1,14 @@ from dataclasses import asdict, dataclass, field import numpy as np -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import Delay, Parameter, PulseSequence, Sweeper -from qibocal import update -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.update import replace +from .. import update +from ..result import magnitude, phase from .qubit_spectroscopy import ( QubitSpectroscopyData, QubitSpectroscopyParameters, @@ -18,9 +18,6 @@ from .resonator_spectroscopy import ResSpecType from .utils import spectroscopy_plot, table_dict, table_html -DEFAULT_ANHARMONICITY = 300e6 -"""Initial guess for anharmonicity.""" - @dataclass class QubitSpectroscopyEFParameters(QubitSpectroscopyParameters): @@ -53,7 +50,9 @@ def fit_ef(data: QubitSpectroscopyEFData) -> QubitSpectroscopyEFResults: def _acquisition( - params: QubitSpectroscopyEFParameters, platform: Platform, targets: list[QubitId] + params: QubitSpectroscopyEFParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> QubitSpectroscopyEFData: """Data acquisition for qubit spectroscopy ef protocol. @@ -71,77 +70,81 @@ def _acquisition( # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel sequence = PulseSequence() ro_pulses = {} - qd_pulses = {} - rx_pulses = {} amplitudes = {} + sweepers = [] drive_frequencies = {} - for qubit in targets: - rx_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - drive_frequencies[qubit] = rx_pulses[qubit].frequency - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=rx_pulses[qubit].finish, duration=params.drive_duration - ) - if platform.qubits[qubit].native_gates.RX12.frequency is None: + delta_frequency_range = np.arange( + -params.freq_width, params.freq_width, params.freq_step + ) + for qubit in targets: + natives = platform.natives.single_qubit[qubit] - qd_pulses[qubit].frequency = ( - rx_pulses[qubit].frequency + DEFAULT_ANHARMONICITY - ) - else: - qd_pulses[qubit].frequency = platform.qubits[ - qubit - ].native_gates.RX12.frequency + qd_channel, qd_pulse = natives.RX()[0] + qd12_channel, qd12_pulse = natives.RX12()[0] + ro_channel, ro_pulse = natives.MZ()[0] + qd12_pulse = replace(qd12_pulse, duration=params.drive_duration) if params.drive_amplitude is not None: - qd_pulses[qubit].amplitude = params.drive_amplitude + qd12_pulse = replace(qd12_pulse, amplitude=params.drive_amplitude) - amplitudes[qubit] = qd_pulses[qubit].amplitude + amplitudes[qubit] = qd12_pulse.amplitude + ro_pulses[qubit] = ro_pulse - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish + sequence.append((qd_channel, qd_pulse)) + sequence.append((qd12_channel, Delay(duration=qd_pulse.duration))) + sequence.append((qd12_channel, qd12_pulse)) + sequence.append( + (ro_channel, Delay(duration=qd_pulse.duration + qd12_pulse.duration)) + ) + sequence.append((ro_channel, ro_pulse)) + + drive_frequencies[qubit] = platform.config(qd_channel).frequency + sweepers.append( + Sweeper( + parameter=Parameter.frequency, + values=platform.config(qd12_channel).frequency + delta_frequency_range, + channels=[qd12_channel], + ) ) - sequence.add(rx_pulses[qubit]) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - # sweep only before qubit frequency - delta_frequency_range = np.arange( - -params.freq_width, params.freq_width, params.freq_step - ) - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - # Create data structure for data acquisition. data = QubitSpectroscopyEFData( resonator_type=platform.resonator_type, amplitudes=amplitudes, drive_frequencies=drive_frequencies, ) - results = platform.sweep( - sequence, - params.execution_parameters, - sweeper, + results = platform.execute( + [sequence], + [sweepers], + **params.execution_parameters, ) # retrieve the results for every qubit for qubit, ro_pulse in ro_pulses.items(): - result = results[ro_pulse.serial] - # store the results + result = results[ro_pulse.id] + + f0 = platform.config(platform.qubits[qubit].drive_qudits[1, 2]).frequency + + signal = magnitude(result) + _phase = phase(result) + if len(signal.shape) > 1: + error_signal = np.std(signal, axis=0, ddof=1) / np.sqrt(signal.shape[0]) + signal = np.mean(signal, axis=0) + error_phase = np.std(_phase, axis=0, ddof=1) / np.sqrt(_phase.shape[0]) + _phase = np.mean(_phase, axis=0) + else: + error_signal, error_phase = None, None + data.register_qubit( ResSpecType, (qubit), dict( - signal=result.average.magnitude, - phase=result.average.phase, - freq=delta_frequency_range + qd_pulses[qubit].frequency, - error_signal=result.average.std, - error_phase=result.phase_std, + signal=signal, + phase=_phase, + freq=delta_frequency_range + f0, + error_signal=error_signal, + error_phase=error_phase, ), ) return data @@ -194,10 +197,14 @@ def _plot( return figures, report -def _update(results: QubitSpectroscopyEFResults, platform: Platform, target: QubitId): +def _update( + results: QubitSpectroscopyEFResults, platform: CalibrationPlatform, target: QubitId +): """Update w12 frequency""" update.frequency_12_transition(results.frequency[target], platform, target) - update.anharmonicity(results.anharmonicity[target], platform, target) + platform.calibration.single_qubits[target].qubit.frequency_12 = results.frequency[ + target + ] qubit_spectroscopy_ef = Routine(_acquisition, fit_ef, _plot, _update) diff --git a/src/qibocal/protocols/qutrit_classification.py b/src/qibocal/protocols/qutrit_classification.py index fdb3fecaa..45a223d32 100644 --- a/src/qibocal/protocols/qutrit_classification.py +++ b/src/qibocal/protocols/qutrit_classification.py @@ -1,12 +1,10 @@ from dataclasses import dataclass, field from typing import Optional -from qibolab import AcquisitionType, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, PulseSequence -from qibocal.auto.operation import Results, Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.classification import ( ClassificationType, SingleShotClassificationData, @@ -14,6 +12,9 @@ ) from qibocal.protocols.utils import plot_results +from ..auto.operation import Results +from ..config import log + COLUMNWIDTH = 600 LEGEND_FONT_SIZE = 20 TITLE_SIZE = 25 @@ -41,7 +42,7 @@ class QutritClassificationResults(Results): def _acquisition( params: QutritClassificationParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> QutritClassificationData: """ @@ -62,49 +63,96 @@ def _acquisition( """ # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel - states_sequences = [PulseSequence() for _ in range(3)] - ro_pulses = {} + states = [0, 1, 2] + sequences, all_ro_pulses = [], [] + native = platform.natives.single_qubit + + updates = [] for qubit in targets: - rx_pulse = platform.create_RX_pulse(qubit, start=0) - rx12_pulse = platform.create_RX12_pulse(qubit, start=rx_pulse.finish) - drive_pulses = [rx_pulse, rx12_pulse] - ro_pulses[qubit] = [] - for i, sequence in enumerate(states_sequences): - sequence.add(*drive_pulses[:i]) - start = drive_pulses[i - 1].finish if i != 0 else 0 - ro_pulses[qubit].append( - platform.create_qubit_readout_pulse(qubit, start=start) + channel = platform.qubits[qubit].probe + try: + updates.append( + { + channel: { + "frequency": platform.calibration.single_qubits[ + qubit + ].readout.qudits_frequency[1] + } + } ) - sequence.add(ro_pulses[qubit][-1]) + except KeyError: + log.warning(f"No readout frequency for state 1 for qubit {qubit}.") + + for state in states: + ro_pulses = {} + sequence = PulseSequence() + for q in targets: + ro_sequence = native[q].MZ() + ro_pulses[q] = ro_sequence[0][1].id + sequence += ro_sequence + + if state == 1: + rx_sequence = PulseSequence() + for q in targets: + rx_sequence += native[q].RX() + sequence = rx_sequence | sequence + + if state == 2: + rx12_sequence = PulseSequence() + for q in targets: + rx12_sequence += native[q].RX() | native[q].RX12() + sequence = rx12_sequence | sequence + + sequences.append(sequence) + all_ro_pulses.append(ro_pulses) data = QutritClassificationData( nshots=params.nshots, classifiers_list=params.classifiers_list, savedir=params.savedir, ) - states_results = [] - for sequence in states_sequences: - states_results.append( - platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), - ) - ) + options = dict( + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + ) + + updates = [] for qubit in targets: - for state, state_result in enumerate(states_results): - result = state_result[ro_pulses[qubit][state].serial] + channel = platform.qubits[qubit].probe + try: + # we readout at the readout frequency of |1> for better discrimination + updates.append( + { + channel: { + "frequency": platform.calibration.single_qubits[ + qubit + ].readout.qudits_frequency[1] + } + } + ) + except KeyError: + log.warning(f"No readout frequency for state 1 for qubit {qubit}.") + + if params.unrolling: + results = platform.execute(sequences, **options, updates=updates) + else: + results = {} + for sequence in sequences: + results.update(platform.execute([sequence], **options, updates=updates)) + + for state, ro_pulses in zip(states, all_ro_pulses): + for qubit in targets: + serial = ro_pulses[qubit] + result = results[serial] data.register_qubit( ClassificationType, (qubit), dict( + i=result[..., 0], + q=result[..., 1], state=[state] * params.nshots, - i=result.voltage_i, - q=result.voltage_q, ), ) @@ -120,7 +168,7 @@ def _plot( target: QubitId, fit: QutritClassificationResults, ): - figures = plot_results(data, target, 3, fit) + figures = plot_results(data, target, 3, None) fitting_report = "" return figures, fitting_report diff --git a/src/qibocal/protocols/rabi/amplitude.py b/src/qibocal/protocols/rabi/amplitude.py index 4a3d4b209..bf38d443c 100644 --- a/src/qibocal/protocols/rabi/amplitude.py +++ b/src/qibocal/protocols/rabi/amplitude.py @@ -2,14 +2,13 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Routine +from qibocal.auto.operation import Data, QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log +from qibocal.result import probability from ..utils import chi2_reduced, fallback_period, guess_period from . import utils @@ -38,6 +37,8 @@ class RabiAmplitudeResults(RabiAmplitudeSignalResults): class RabiAmplitudeData(Data): """RabiAmplitude data acquisition.""" + rx90: bool + """Pi or Pi_half calibration""" durations: dict[QubitId, float] = field(default_factory=dict) """Pulse durations provided by the user.""" data: dict[QubitId, npt.NDArray[RabiAmpType]] = field(default_factory=dict) @@ -45,7 +46,9 @@ class RabiAmplitudeData(Data): def _acquisition( - params: RabiAmplitudeParameters, platform: Platform, targets: list[QubitId] + params: RabiAmplitudeParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiAmplitudeData: r""" Data acquisition for Rabi experiment sweeping amplitude. @@ -53,43 +56,34 @@ def _acquisition( to find the drive pulse amplitude that creates a rotation of a desired angle. """ - sequence, qd_pulses, _, durations = utils.sequence_amplitude( - targets, params, platform - ) - # define the parameter to sweep and its range: - # qubit drive pulse amplitude - qd_pulse_amplitude_range = np.arange( - params.min_amp_factor, - params.max_amp_factor, - params.step_amp_factor, + sequence, qd_pulses, ro_pulses, durations = utils.sequence_amplitude( + targets, params, platform, params.rx90 ) + sweeper = Sweeper( - Parameter.amplitude, - qd_pulse_amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) - data = RabiAmplitudeData(durations=durations) + data = RabiAmplitudeData(durations=durations, rx90=params.rx90) # sweep the parameter - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) for qubit in targets: - prob = results[qubit].probability(state=1) + prob = probability(results[ro_pulses[qubit].id], state=1) data.register_qubit( RabiAmpType, (qubit), dict( - amp=qd_pulses[qubit].amplitude * qd_pulse_amplitude_range, + amp=sweeper.values, prob=prob.tolist(), error=np.sqrt(prob * (1 - prob) / params.nshots).tolist(), ), @@ -136,17 +130,21 @@ def _fit(data: RabiAmplitudeData) -> RabiAmplitudeResults: except Exception as e: log.warning(f"Rabi fit failed for qubit {qubit} due to {e}.") - return RabiAmplitudeResults(pi_pulse_amplitudes, durations, fitted_parameters, chi2) + return RabiAmplitudeResults( + pi_pulse_amplitudes, durations, fitted_parameters, data.rx90, chi2 + ) def _plot(data: RabiAmplitudeData, target: QubitId, fit: RabiAmplitudeResults = None): """Plotting function for RabiAmplitude.""" - return utils.plot_probabilities(data, target, fit) + return utils.plot_probabilities(data, target, fit, data.rx90) -def _update(results: RabiAmplitudeResults, platform: Platform, target: QubitId): - update.drive_amplitude(results.amplitude[target], platform, target) - update.drive_duration(results.length[target], platform, target) +def _update( + results: RabiAmplitudeResults, platform: CalibrationPlatform, target: QubitId +): + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) + update.drive_duration(results.length[target], results.rx90, platform, target) rabi_amplitude = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/rabi/amplitude_frequency.py b/src/qibocal/protocols/rabi/amplitude_frequency.py index 1de63c3f2..ec7f03b05 100644 --- a/src/qibocal/protocols/rabi/amplitude_frequency.py +++ b/src/qibocal/protocols/rabi/amplitude_frequency.py @@ -6,12 +6,10 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import ( HZ_TO_GHZ, @@ -22,6 +20,7 @@ table_html, ) +from ...result import probability from .amplitude_frequency_signal import ( RabiAmplitudeFreqSignalData, RabiAmplitudeFrequencySignalParameters, @@ -74,60 +73,52 @@ def register_qubit(self, qubit, freq, amp, prob, error): def _acquisition( - params: RabiAmplitudeFrequencyParameters, platform: Platform, targets: list[QubitId] + params: RabiAmplitudeFrequencyParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiAmplitudeFreqData: """Data acquisition for Rabi experiment sweeping amplitude.""" sequence, qd_pulses, ro_pulses, durations = sequence_amplitude( - targets, params, platform + targets, params, platform, params.rx90 ) - - # qubit drive pulse amplitude - amplitude_range = np.arange( - params.min_amp_factor, - params.max_amp_factor, - params.step_amp_factor, - ) - sweeper_amp = Sweeper( - Parameter.amplitude, - amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, - ) - - # qubit drive pulse amplitude frequency_range = np.arange( params.min_freq, params.max_freq, params.step_freq, ) - sweeper_freq = Sweeper( - Parameter.frequency, - frequency_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, + freq_sweepers = {} + for qubit in targets: + channel = platform.qubits[qubit].drive + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=platform.config(channel).frequency + frequency_range, + channels=[channel], + ) + amp_sweeper = Sweeper( + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) - data = RabiAmplitudeFreqData(durations=durations) + data = RabiAmplitudeFreqData(durations=durations, rx90=params.rx90) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper_amp, - sweeper_freq, + results = platform.execute( + [sequence], + [[amp_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) + for qubit in targets: - result = results[ro_pulses[qubit].serial] - prob = result.probability(state=1) + result = results[ro_pulses[qubit].id] + prob = probability(result, state=1) data.register_qubit( qubit=qubit, - freq=qd_pulses[qubit].frequency + frequency_range, - amp=qd_pulses[qubit].amplitude * amplitude_range, + freq=freq_sweepers[qubit].values, + amp=amp_sweeper.values, prob=prob.tolist(), error=np.sqrt(prob * (1 - prob) / params.nshots).tolist(), ) @@ -171,11 +162,12 @@ def _fit(data: RabiAmplitudeFreqData) -> RabiAmplitudeFrequencyResults: pguess, sigma=error, signal=False, + x_limits=(x_min, x_max), + y_limits=(y_min, y_max), ) - fitted_frequencies[qubit] = frequency fitted_amplitudes[qubit] = [pi_pulse_parameter, perr[2] / 2] - fitted_parameters[qubit] = popt.tolist() + fitted_parameters[qubit] = popt if isinstance(popt, list) else popt.tolist() chi2[qubit] = ( chi2_reduced( @@ -194,6 +186,7 @@ def _fit(data: RabiAmplitudeFreqData) -> RabiAmplitudeFrequencyResults: fitted_parameters=fitted_parameters, frequency=fitted_frequencies, chi2=chi2, + rx90=data.rx90, ) @@ -221,16 +214,17 @@ def _plot( figures.append(fig) + fig.add_trace( + go.Heatmap( + x=amplitudes, + y=frequencies, + z=qubit_data.prob, + ), + row=1, + col=1, + ) + if fit is not None: - fig.add_trace( - go.Heatmap( - x=amplitudes, - y=frequencies, - z=qubit_data.prob, - ), - row=1, - col=1, - ) fig.add_trace( go.Scatter( x=[min(amplitudes), max(amplitudes)], @@ -241,10 +235,12 @@ def _plot( row=1, col=1, ) + pulse_name = "Pi-half pulse" if data.rx90 else "Pi pulse" + fitting_report = table_html( table_dict( target, - ["Optimal rabi frequency", "Pi-pulse amplitude"], + ["Optimal rabi frequency", f"{pulse_name} amplitude"], [ fit.frequency[target], f"{fit.amplitude[target][0]:.6f} +- {fit.amplitude[target][1]:.6f} [a.u.]", diff --git a/src/qibocal/protocols/rabi/amplitude_frequency_signal.py b/src/qibocal/protocols/rabi/amplitude_frequency_signal.py index 5bd24ed6e..a03b30658 100644 --- a/src/qibocal/protocols/rabi/amplitude_frequency_signal.py +++ b/src/qibocal/protocols/rabi/amplitude_frequency_signal.py @@ -7,13 +7,11 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import ( HZ_TO_GHZ, @@ -23,6 +21,7 @@ table_html, ) +from ...result import magnitude, phase from .amplitude_signal import RabiAmplitudeSignalResults from .utils import fit_amplitude_function, sequence_amplitude @@ -31,18 +30,20 @@ class RabiAmplitudeFrequencySignalParameters(Parameters): """RabiAmplitudeFrequency runcard inputs.""" - min_amp_factor: float - """Minimum amplitude multiplicative factor.""" - max_amp_factor: float - """Maximum amplitude multiplicative factor.""" - step_amp_factor: float - """Step amplitude multiplicative factor.""" + min_amp: float + """Minimum amplitude.""" + max_amp: float + """Maximum amplitude.""" + step_amp: float + """Step amplitude.""" min_freq: int """Minimum frequency as an offset.""" max_freq: int """Maximum frequency as an offset.""" step_freq: int """Frequency to use as step for the scan.""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" pulse_length: Optional[float] = None """RX pulse duration [ns].""" @@ -53,6 +54,8 @@ class RabiAmplitudeFrequencySignalResults(RabiAmplitudeSignalResults): frequency: dict[QubitId, Union[float, list[float]]] """Drive frequency for each qubit.""" + rx90: bool + """Pi or Pi_half calibration""" RabiAmpFreqSignalType = np.dtype( @@ -70,6 +73,8 @@ class RabiAmplitudeFrequencySignalResults(RabiAmplitudeSignalResults): class RabiAmplitudeFreqSignalData(Data): """RabiAmplitudeFreqSignal data acquisition.""" + rx90: bool + """Pi or Pi_half calibration""" durations: dict[QubitId, float] = field(default_factory=dict) """Pulse durations provided by the user.""" data: dict[QubitId, npt.NDArray[RabiAmpFreqSignalType]] = field( @@ -99,62 +104,52 @@ def frequencies(self, qubit): def _acquisition( params: RabiAmplitudeFrequencySignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RabiAmplitudeFreqSignalData: """Data acquisition for Rabi experiment sweeping amplitude.""" sequence, qd_pulses, ro_pulses, durations = sequence_amplitude( - targets, params, platform + targets, params, platform, params.rx90 ) - # qubit drive pulse amplitude - amplitude_range = np.arange( - params.min_amp_factor, - params.max_amp_factor, - params.step_amp_factor, - ) - sweeper_amp = Sweeper( - Parameter.amplitude, - amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, - ) - - # qubit drive pulse amplitude frequency_range = np.arange( params.min_freq, params.max_freq, params.step_freq, ) - sweeper_freq = Sweeper( - Parameter.frequency, - frequency_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, + freq_sweepers = {} + for qubit in targets: + channel = platform.qubits[qubit].drive + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=platform.config(channel).frequency + frequency_range, + channels=[channel], + ) + amp_sweeper = Sweeper( + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) - data = RabiAmplitudeFreqSignalData(durations=durations) + data = RabiAmplitudeFreqSignalData(durations=durations, rx90=params.rx90) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_amp, - sweeper_freq, + results = platform.execute( + [sequence], + [[amp_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) for qubit in targets: - result = results[ro_pulses[qubit].serial] + result = results[ro_pulses[qubit].id] data.register_qubit( qubit=qubit, - freq=qd_pulses[qubit].frequency + frequency_range, - amp=qd_pulses[qubit].amplitude * amplitude_range, - signal=result.magnitude, - phase=result.phase, + freq=freq_sweepers[qubit].values, + amp=amp_sweeper.values, + signal=magnitude(result), + phase=phase(result), ) return data @@ -209,6 +204,7 @@ def _fit(data: RabiAmplitudeFreqSignalData) -> RabiAmplitudeFrequencySignalResul length=data.durations, fitted_parameters=fitted_parameters, frequency=fitted_frequencies, + rx90=data.rx90, ) @@ -283,10 +279,12 @@ def _plot( row=1, col=2, ) + pulse_name = "Pi-half pulse" if data.rx90 else "Pi pulse" + fitting_report = table_html( table_dict( target, - ["Optimal rabi frequency", "Pi-pulse amplitude"], + ["Optimal rabi frequency", f"{pulse_name} amplitude"], [ fit.frequency[target], f"{fit.amplitude[target]:.6f} [a.u]", @@ -302,10 +300,12 @@ def _plot( def _update( - results: RabiAmplitudeFrequencySignalResults, platform: Platform, target: QubitId + results: RabiAmplitudeFrequencySignalResults, + platform: CalibrationPlatform, + target: QubitId, ): - update.drive_duration(results.length[target], platform, target) - update.drive_amplitude(results.amplitude[target], platform, target) + update.drive_duration(results.length[target], results.rx90, platform, target) + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) update.drive_frequency(results.frequency[target], platform, target) diff --git a/src/qibocal/protocols/rabi/amplitude_signal.py b/src/qibocal/protocols/rabi/amplitude_signal.py index 31a8eb04a..587e0e19c 100644 --- a/src/qibocal/protocols/rabi/amplitude_signal.py +++ b/src/qibocal/protocols/rabi/amplitude_signal.py @@ -3,15 +3,14 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import fallback_period, guess_period +from qibocal.result import magnitude, phase from . import utils @@ -20,14 +19,16 @@ class RabiAmplitudeSignalParameters(Parameters): """RabiAmplitude runcard inputs.""" - min_amp_factor: float - """Minimum amplitude multiplicative factor.""" - max_amp_factor: float - """Maximum amplitude multiplicative factor.""" - step_amp_factor: float - """Step amplitude multiplicative factor.""" + min_amp: float + """Minimum amplitude.""" + max_amp: float + """Maximum amplitude.""" + step_amp: float + """Step amplitude.""" pulse_length: Optional[float] = None """RX pulse duration [ns].""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" @dataclass @@ -40,6 +41,8 @@ class RabiAmplitudeSignalResults(Results): """Drive pulse duration. Same for all qubits.""" fitted_parameters: dict[QubitId, dict[str, float]] """Raw fitted parameters.""" + rx90: bool + """Pi or Pi_half calibration""" RabiAmpSignalType = np.dtype( @@ -52,6 +55,8 @@ class RabiAmplitudeSignalResults(Results): class RabiAmplitudeSignalData(Data): """RabiAmplitudeSignal data acquisition.""" + rx90: bool + """Pi or Pi_half calibration""" durations: dict[QubitId, float] = field(default_factory=dict) """Pulse durations provided by the user.""" data: dict[QubitId, npt.NDArray[RabiAmpSignalType]] = field(default_factory=dict) @@ -59,7 +64,9 @@ class RabiAmplitudeSignalData(Data): def _acquisition( - params: RabiAmplitudeSignalParameters, platform: Platform, targets: list[QubitId] + params: RabiAmplitudeSignalParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiAmplitudeSignalData: r""" Data acquisition for Rabi experiment sweeping amplitude. @@ -69,45 +76,35 @@ def _acquisition( # create a sequence of pulses for the experiment sequence, qd_pulses, ro_pulses, durations = utils.sequence_amplitude( - targets, params, platform + targets, params, platform, params.rx90 ) - # define the parameter to sweep and its range: - # qubit drive pulse amplitude - qd_pulse_amplitude_range = np.arange( - params.min_amp_factor, - params.max_amp_factor, - params.step_amp_factor, - ) sweeper = Sweeper( - Parameter.amplitude, - qd_pulse_amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) - data = RabiAmplitudeSignalData(durations=durations) + data = RabiAmplitudeSignalData(durations=durations, rx90=params.rx90) # sweep the parameter - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) for qubit in targets: - result = results[ro_pulses[qubit].serial] + result = results[ro_pulses[qubit].id] data.register_qubit( RabiAmpSignalType, (qubit), dict( - amp=qd_pulses[qubit].amplitude * qd_pulse_amplitude_range, - signal=result.magnitude, - phase=result.phase, + amp=sweeper.values, + signal=magnitude(result), + phase=phase(result), ), ) return data @@ -151,7 +148,7 @@ def _fit(data: RabiAmplitudeSignalData) -> RabiAmplitudeSignalResults: log.warning(f"Rabi fit failed for qubit {qubit} due to {e}.") return RabiAmplitudeSignalResults( - pi_pulse_amplitudes, data.durations, fitted_parameters + pi_pulse_amplitudes, data.durations, fitted_parameters, data.rx90 ) @@ -161,12 +158,14 @@ def _plot( fit: RabiAmplitudeSignalResults = None, ): """Plotting function for RabiAmplitude.""" - return utils.plot(data, target, fit) + return utils.plot(data, target, fit, data.rx90) -def _update(results: RabiAmplitudeSignalResults, platform: Platform, target: QubitId): - update.drive_amplitude(results.amplitude[target], platform, target) - update.drive_duration(results.length[target], platform, target) +def _update( + results: RabiAmplitudeSignalResults, platform: CalibrationPlatform, target: QubitId +): + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) + update.drive_duration(results.length[target], results.rx90, platform, target) rabi_amplitude_signal = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/rabi/ef.py b/src/qibocal/protocols/rabi/ef.py index 5797daf7c..541a0f16a 100644 --- a/src/qibocal/protocols/rabi/ef.py +++ b/src/qibocal/protocols/rabi/ef.py @@ -1,15 +1,20 @@ from dataclasses import dataclass -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal import update -from qibocal.auto.operation import Routine - +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + PulseSequence, + Sweeper, +) + +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.update import replace + +from ... import update +from ...result import magnitude, phase from . import amplitude_signal, utils @@ -29,7 +34,9 @@ class RabiAmplitudeEFData(amplitude_signal.RabiAmplitudeSignalData): def _acquisition( - params: RabiAmplitudeEFParameters, platform: Platform, targets: list[QubitId] + params: RabiAmplitudeEFParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiAmplitudeEFData: r""" Data acquisition for Rabi EF experiment sweeping amplitude. @@ -46,58 +53,59 @@ def _acquisition( ro_pulses = {} rx_pulses = {} durations = {} - for qubit in targets: - rx_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - qd_pulses[qubit] = platform.create_RX_pulse( - qubit, start=rx_pulses[qubit].finish - ) + for q in targets: + natives = platform.natives.single_qubit[q] + qd_channel, qd_pulse = natives.RX()[0] + qd12_channel, qd12_pulse = natives.RX12()[0] + ro_channel, ro_pulse = natives.MZ()[0] + if params.pulse_length is not None: - qd_pulses[qubit].duration = params.pulse_length + qd12_pulse = replace(qd_pulse, duration=params.pulse_length) + + durations[q] = qd12_pulse.duration + qd_pulses[q] = qd12_pulse + ro_pulses[q] = ro_pulse - durations[qubit] = qd_pulses[qubit].duration - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish + sequence.append((qd_channel, qd_pulse)) + sequence.append((qd12_channel, Delay(duration=qd_pulse.duration))) + sequence.append((qd12_channel, qd12_pulse)) + sequence.append( + (qd_channel, Delay(duration=qd_pulse.duration + qd12_pulse.duration)) ) - sequence.add(rx_pulses[qubit]) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - # qubit drive pulse amplitude - qd_pulse_amplitude_range = np.arange( - params.min_amp_factor, - params.max_amp_factor, - params.step_amp_factor, - ) + sequence.append((qd_channel, qd_pulse)) + sequence.append( + (ro_channel, Delay(duration=2 * qd_pulse.duration + qd12_pulse.duration)) + ) + sequence.append((ro_channel, ro_pulse)) + sweeper = Sweeper( - Parameter.amplitude, - qd_pulse_amplitude_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[qd_pulses[qubit] for qubit in targets], ) - data = RabiAmplitudeEFData(durations=durations) + assert not params.rx90, "Rabi ef available only for RX pulses." + + data = RabiAmplitudeEFData(durations=durations, rx90=False) # sweep the parameter - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) for qubit in targets: - result = results[ro_pulses[qubit].serial] + result = results[ro_pulses[qubit].id] data.register_qubit( amplitude_signal.RabiAmpSignalType, (qubit), dict( - amp=qd_pulses[qubit].amplitude * qd_pulse_amplitude_range, - signal=result.magnitude, - phase=result.phase, + amp=sweeper.values, + signal=magnitude(result), + phase=phase(result), ), ) return data @@ -107,13 +115,15 @@ def _plot( data: RabiAmplitudeEFData, target: QubitId, fit: RabiAmplitudeEFResults = None ): """Plotting function for RabiAmplitude.""" - figures, report = utils.plot(data, target, fit) + figures, report = utils.plot(data, target, fit, data.rx90) if report is not None: report = report.replace("Pi pulse", "Pi pulse 12") return figures, report -def _update(results: RabiAmplitudeEFResults, platform: Platform, target: QubitId): +def _update( + results: RabiAmplitudeEFResults, platform: CalibrationPlatform, target: QubitId +): """Update RX2 amplitude_signal""" update.drive_12_amplitude(results.amplitude[target], platform, target) update.drive_12_duration(results.length[target], platform, target) diff --git a/src/qibocal/protocols/rabi/length.py b/src/qibocal/protocols/rabi/length.py index 1b529322b..7f64770d6 100644 --- a/src/qibocal/protocols/rabi/length.py +++ b/src/qibocal/protocols/rabi/length.py @@ -3,18 +3,17 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Parameters, Routine +from qibocal.auto.operation import Parameters, QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.rabi.length_signal import ( RabiLengthSignalData, RabiLengthSignalResults, ) +from qibocal.result import probability from ..utils import chi2_reduced, fallback_period, guess_period from . import utils @@ -32,6 +31,10 @@ class RabiLengthParameters(Parameters): """Step pi pulse duration [ns].""" pulse_amplitude: Optional[float] = None """Pi pulse amplitude. Same for all qubits.""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" + interpolated_sweeper: bool = False + """Use real-time interpolation if supported by instruments.""" @dataclass @@ -56,7 +59,7 @@ class RabiLengthData(RabiLengthSignalData): def _acquisition( - params: RabiLengthParameters, platform: Platform, targets: list[QubitId] + params: RabiLengthParameters, platform: CalibrationPlatform, targets: list[QubitId] ) -> RabiLengthData: r""" Data acquisition for RabiLength Experiment. @@ -64,45 +67,46 @@ def _acquisition( to find the drive pulse length that creates a rotation of a desired angle. """ - sequence, qd_pulses, _, amplitudes = utils.sequence_length( - targets, params, platform + sequence, qd_pulses, delays, ro_pulses, amplitudes = utils.sequence_length( + targets, params, platform, params.rx90, use_align=params.interpolated_sweeper ) - # define the parameter to sweep and its range: - # qubit drive pulse duration time - qd_pulse_duration_range = np.arange( + sweep_range = ( params.pulse_duration_start, params.pulse_duration_end, params.pulse_duration_step, ) + if params.interpolated_sweeper: + sweeper = Sweeper( + parameter=Parameter.duration_interpolated, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets], + ) + else: + sweeper = Sweeper( + parameter=Parameter.duration, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets] + [delays[q] for q in targets], + ) - sweeper = Sweeper( - Parameter.duration, - qd_pulse_duration_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, - ) - - data = RabiLengthData(amplitudes=amplitudes) + data = RabiLengthData(amplitudes=amplitudes, rx90=params.rx90) # execute the sweep - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) - for qubit in targets: - prob = results[qubit].probability(state=1) + for q in targets: + prob = probability(results[ro_pulses[q].id], state=1) data.register_qubit( RabiLenType, - (qubit), + (q), dict( - length=qd_pulse_duration_range, + length=sweeper.values, prob=prob, error=np.sqrt(prob * (1 - prob) / params.nshots).tolist(), ), @@ -153,17 +157,17 @@ def _fit(data: RabiLengthData) -> RabiLengthResults: except Exception as e: log.warning(f"Rabi fit failed for qubit {qubit} due to {e}.") - return RabiLengthResults(durations, amplitudes, fitted_parameters, chi2) + return RabiLengthResults(durations, amplitudes, fitted_parameters, data.rx90, chi2) -def _update(results: RabiLengthResults, platform: Platform, target: QubitId): - update.drive_duration(results.length[target], platform, target) - update.drive_amplitude(results.amplitude[target], platform, target) +def _update(results: RabiLengthResults, platform: CalibrationPlatform, target: QubitId): + update.drive_duration(results.length[target], results.rx90, platform, target) + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) def _plot(data: RabiLengthData, fit: RabiLengthResults, target: QubitId): """Plotting function for RabiLength experiment.""" - return utils.plot_probabilities(data, target, fit) + return utils.plot_probabilities(data, target, fit, data.rx90) rabi_length = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/rabi/length_frequency.py b/src/qibocal/protocols/rabi/length_frequency.py index 28ac4715c..5e2871bf0 100644 --- a/src/qibocal/protocols/rabi/length_frequency.py +++ b/src/qibocal/protocols/rabi/length_frequency.py @@ -6,15 +6,14 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import table_dict, table_html +from ...result import probability from ..utils import HZ_TO_GHZ, chi2_reduced, fallback_period, guess_period from .length_frequency_signal import ( RabiLengthFreqSignalData, @@ -68,60 +67,66 @@ def register_qubit(self, qubit, freq, lens, prob, error): def _acquisition( - params: RabiLengthFrequencyParameters, platform: Platform, targets: list[QubitId] + params: RabiLengthFrequencyParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiLengthFreqData: """Data acquisition for Rabi experiment sweeping length.""" - sequence, qd_pulses, ro_pulses, amplitudes = sequence_length( - targets, params, platform + sequence, qd_pulses, delays, ro_pulses, amplitudes = sequence_length( + targets, params, platform, params.rx90 ) - # qubit drive pulse length - length_range = np.arange( + sweep_range = ( params.pulse_duration_start, params.pulse_duration_end, params.pulse_duration_step, ) - sweeper_len = Sweeper( - Parameter.duration, - length_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, - ) + if params.interpolated_sweeper: + len_sweeper = Sweeper( + parameter=Parameter.duration_interpolated, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets], + ) + else: + len_sweeper = Sweeper( + parameter=Parameter.duration, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets] + [delays[q] for q in targets], + ) - # qubit drive pulse amplitude frequency_range = np.arange( params.min_freq, params.max_freq, params.step_freq, ) - sweeper_freq = Sweeper( - Parameter.frequency, - frequency_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) + freq_sweepers = {} + for qubit in targets: + channel = platform.qubits[qubit].drive + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=platform.config(channel).frequency + frequency_range, + channels=[channel], + ) - data = RabiLengthFreqData(amplitudes=amplitudes) + data = RabiLengthFreqData(amplitudes=amplitudes, rx90=params.rx90) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ), - sweeper_len, - sweeper_freq, + results = platform.execute( + [sequence], + [[len_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, ) + for qubit in targets: - result = results[ro_pulses[qubit].serial] - prob = result.probability(state=1) + result = results[ro_pulses[qubit].id] + prob = probability(result, state=1) data.register_qubit( qubit=qubit, - freq=qd_pulses[qubit].frequency + frequency_range, - lens=length_range, + freq=freq_sweepers[qubit].values, + lens=len_sweeper.values, prob=prob.tolist(), error=np.sqrt(prob * (1 - prob) / params.nshots).tolist(), ) @@ -192,6 +197,7 @@ def _fit(data: RabiLengthFreqData) -> RabiLengthFrequencyResults: fitted_parameters=fitted_parameters, frequency=fitted_frequencies, chi2=chi2, + rx90=data.rx90, ) @@ -240,10 +246,12 @@ def _plot( row=1, col=1, ) + pulse_name = "Pi-half pulse" if data.rx90 else "Pi pulse" + fitting_report = table_html( table_dict( target, - ["Optimal rabi frequency", "Pi-pulse duration"], + ["Optimal rabi frequency", f"{pulse_name} duration"], [ fit.frequency[target], f"{fit.length[target][0]:.2f} +- {fit.length[target][1]:.2f} ns", diff --git a/src/qibocal/protocols/rabi/length_frequency_signal.py b/src/qibocal/protocols/rabi/length_frequency_signal.py index abf73caa3..00fe5b80b 100644 --- a/src/qibocal/protocols/rabi/length_frequency_signal.py +++ b/src/qibocal/protocols/rabi/length_frequency_signal.py @@ -7,16 +7,15 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import table_dict, table_html +from ...result import magnitude, phase from ..utils import HZ_TO_GHZ, fallback_period, guess_period from .length_signal import RabiLengthSignalResults from .utils import fit_length_function, sequence_length @@ -40,12 +39,18 @@ class RabiLengthFrequencySignalParameters(Parameters): """Frequency to use as step for the scan.""" pulse_amplitude: Optional[float] = None """Pi pulse amplitude. Same for all qubits.""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" + interpolated_sweeper: bool = False + """Use real-time interpolation if supported by instruments.""" @dataclass class RabiLengthFrequencySignalResults(RabiLengthSignalResults): """RabiLengthFrequency outputs.""" + rx90: bool + """Pi or Pi_half calibration""" frequency: dict[QubitId, Union[float, list[float]]] """Drive frequency for each qubit.""" @@ -65,6 +70,8 @@ class RabiLengthFrequencySignalResults(RabiLengthSignalResults): class RabiLengthFreqSignalData(Data): """RabiLengthFreqSignal data acquisition.""" + rx90: bool + """Pi or Pi_half calibration""" amplitudes: dict[QubitId, float] = field(default_factory=dict) """Pulse amplitudes provided by the user.""" data: dict[QubitId, npt.NDArray[RabiLenFreqSignalType]] = field( @@ -94,62 +101,65 @@ def frequencies(self, qubit): def _acquisition( params: RabiLengthFrequencySignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RabiLengthFreqSignalData: """Data acquisition for Rabi experiment sweeping length.""" - sequence, qd_pulses, ro_pulses, amplitudes = sequence_length( - targets, params, platform + sequence, qd_pulses, delays, ro_pulses, amplitudes = sequence_length( + targets, params, platform, params.rx90 ) - # qubit drive pulse length - length_range = np.arange( + sweep_range = ( params.pulse_duration_start, params.pulse_duration_end, params.pulse_duration_step, ) - sweeper_len = Sweeper( - Parameter.duration, - length_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, - ) + if params.interpolated_sweeper: + len_sweeper = Sweeper( + parameter=Parameter.duration_interpolated, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets], + ) + else: + len_sweeper = Sweeper( + parameter=Parameter.duration, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets] + [delays[q] for q in targets], + ) - # qubit drive pulse amplitude frequency_range = np.arange( params.min_freq, params.max_freq, params.step_freq, ) - sweeper_freq = Sweeper( - Parameter.frequency, - frequency_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) + freq_sweepers = {} + for qubit in targets: + channel = platform.qubits[qubit].drive + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=platform.config(channel).frequency + frequency_range, + channels=[channel], + ) - data = RabiLengthFreqSignalData(amplitudes=amplitudes) + data = RabiLengthFreqSignalData(amplitudes=amplitudes, rx90=params.rx90) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_len, - sweeper_freq, + results = platform.execute( + [sequence], + [[len_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) for qubit in targets: - result = results[ro_pulses[qubit].serial] + result = results[ro_pulses[qubit].id] data.register_qubit( qubit=qubit, - freq=qd_pulses[qubit].frequency + frequency_range, - lens=length_range, - signal=result.magnitude, - phase=result.phase, + freq=freq_sweepers[qubit].values, + lens=len_sweeper.values, + signal=magnitude(result), + phase=phase(result), ) return data @@ -203,6 +213,7 @@ def _fit(data: RabiLengthFreqSignalData) -> RabiLengthFrequencySignalResults: amplitude=data.amplitudes, fitted_parameters=fitted_parameters, frequency=fitted_frequencies, + rx90=data.rx90, ) @@ -277,10 +288,12 @@ def _plot( row=1, col=2, ) + pulse_name = "Pi-half pulse" if data.rx90 else "Pi pulse" + fitting_report = table_html( table_dict( target, - ["Optimal rabi frequency", "Pi-pulse duration"], + ["Optimal rabi frequency", f"{pulse_name} duration"], [ fit.frequency[target], f"{fit.length[target]:.2f} ns", @@ -297,10 +310,12 @@ def _plot( def _update( - results: RabiLengthFrequencySignalResults, platform: Platform, target: QubitId + results: RabiLengthFrequencySignalResults, + platform: CalibrationPlatform, + target: QubitId, ): - update.drive_amplitude(results.amplitude[target], platform, target) - update.drive_duration(results.length[target], platform, target) + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) + update.drive_duration(results.length[target], results.rx90, platform, target) update.drive_frequency(results.frequency[target], platform, target) diff --git a/src/qibocal/protocols/rabi/length_sequences.py b/src/qibocal/protocols/rabi/length_sequences.py deleted file mode 100644 index 3b6259d20..000000000 --- a/src/qibocal/protocols/rabi/length_sequences.py +++ /dev/null @@ -1,75 +0,0 @@ -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Routine - -from .length_signal import ( - RabiLengthSignalData, - RabiLengthSignalParameters, - RabiLenSignalType, - _fit, - _plot, - _update, -) -from .utils import sequence_length - - -def _acquisition( - params: RabiLengthSignalParameters, platform: Platform, targets: list[QubitId] -) -> RabiLengthSignalData: - r""" - Data acquisition for RabiLength Experiment. - In the Rabi experiment we apply a pulse at the frequency of the qubit and scan the drive pulse length - to find the drive pulse length that creates a rotation of a desired angle. - """ - - sequence, qd_pulses, ro_pulses, amplitudes = sequence_length( - targets, params, platform - ) - - # define the parameter to sweep and its range: - # qubit drive pulse duration time - qd_pulse_duration_range = np.arange( - params.pulse_duration_start, - params.pulse_duration_end, - params.pulse_duration_step, - ) - - data = RabiLengthSignalData(amplitudes=amplitudes) - - # sweep the parameter - for duration in qd_pulse_duration_range: - for qubit in targets: - qd_pulses[qubit].duration = duration - ro_pulses[qubit].start = qd_pulses[qubit].finish - - # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - ) - - for qubit in targets: - result = results[ro_pulses[qubit].serial] - data.register_qubit( - RabiLenSignalType, - (qubit), - dict( - length=np.array([duration]), - signal=np.array([result.magnitude]), - phase=np.array([result.phase]), - ), - ) - - return data - - -rabi_length_sequences = Routine(_acquisition, _fit, _plot, _update) -"""RabiLength Routine object.""" diff --git a/src/qibocal/protocols/rabi/length_signal.py b/src/qibocal/protocols/rabi/length_signal.py index fc139197d..3e7db1afa 100644 --- a/src/qibocal/protocols/rabi/length_signal.py +++ b/src/qibocal/protocols/rabi/length_signal.py @@ -3,15 +3,14 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import fallback_period, guess_period +from qibocal.result import magnitude, phase from . import utils @@ -28,6 +27,10 @@ class RabiLengthSignalParameters(Parameters): """Step pi pulse duration [ns].""" pulse_amplitude: Optional[float] = None """Pi pulse amplitude. Same for all qubits.""" + rx90: bool = False + """Calibration of native pi pulse, if true calibrates pi/2 pulse""" + interpolated_sweeper: bool = False + """Use real-time interpolation if supported by instruments.""" @dataclass @@ -40,6 +43,8 @@ class RabiLengthSignalResults(Results): """Pi pulse amplitude. Same for all qubits.""" fitted_parameters: dict[QubitId, dict[str, float]] """Raw fitting output.""" + rx90: bool + """Pi or Pi_half calibration""" RabiLenSignalType = np.dtype( @@ -52,6 +57,8 @@ class RabiLengthSignalResults(Results): class RabiLengthSignalData(Data): """RabiLength acquisition outputs.""" + rx90: bool + """Pi or Pi_half calibration""" amplitudes: dict[QubitId, float] = field(default_factory=dict) """Pulse durations provided by the user.""" data: dict[QubitId, npt.NDArray[RabiLenSignalType]] = field(default_factory=dict) @@ -59,7 +66,9 @@ class RabiLengthSignalData(Data): def _acquisition( - params: RabiLengthSignalParameters, platform: Platform, targets: list[QubitId] + params: RabiLengthSignalParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> RabiLengthSignalData: r""" Data acquisition for RabiLength Experiment. @@ -67,47 +76,47 @@ def _acquisition( to find the drive pulse length that creates a rotation of a desired angle. """ - sequence, qd_pulses, ro_pulses, amplitudes = utils.sequence_length( - targets, params, platform + sequence, qd_pulses, delays, ro_pulses, amplitudes = utils.sequence_length( + targets, params, platform, params.rx90, use_align=params.interpolated_sweeper ) - - # define the parameter to sweep and its range: - # qubit drive pulse duration time - qd_pulse_duration_range = np.arange( + sweep_range = ( params.pulse_duration_start, params.pulse_duration_end, params.pulse_duration_step, ) + if params.interpolated_sweeper: + sweeper = Sweeper( + parameter=Parameter.duration_interpolated, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets], + ) + else: + sweeper = Sweeper( + parameter=Parameter.duration, + range=sweep_range, + pulses=[qd_pulses[q] for q in targets] + [delays[q] for q in targets], + ) - sweeper = Sweeper( - Parameter.duration, - qd_pulse_duration_range, - [qd_pulses[qubit] for qubit in targets], - type=SweeperType.ABSOLUTE, - ) - data = RabiLengthSignalData(amplitudes=amplitudes) - - # execute the sweep - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, + data = RabiLengthSignalData(amplitudes=amplitudes, rx90=params.rx90) + + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) - for qubit in targets: - result = results[ro_pulses[qubit].serial] + for q in targets: + result = results[ro_pulses[q].id] data.register_qubit( RabiLenSignalType, - (qubit), + (q), dict( - length=qd_pulse_duration_range, - signal=result.magnitude, - phase=result.phase, + length=sweeper.values, + signal=magnitude(result), + phase=phase(result), ), ) return data @@ -149,17 +158,21 @@ def _fit(data: RabiLengthSignalData) -> RabiLengthSignalResults: except Exception as e: log.warning(f"Rabi fit failed for qubit {qubit} due to {e}.") - return RabiLengthSignalResults(durations, data.amplitudes, fitted_parameters) + return RabiLengthSignalResults( + durations, data.amplitudes, fitted_parameters, data.rx90 + ) -def _update(results: RabiLengthSignalResults, platform: Platform, target: QubitId): - update.drive_duration(results.length[target], platform, target) - update.drive_amplitude(results.amplitude[target], platform, target) +def _update( + results: RabiLengthSignalResults, platform: CalibrationPlatform, target: QubitId +): + update.drive_duration(results.length[target], results.rx90, platform, target) + update.drive_amplitude(results.amplitude[target], results.rx90, platform, target) def _plot(data: RabiLengthSignalData, fit: RabiLengthSignalResults, target: QubitId): """Plotting function for RabiLength experiment.""" - return utils.plot(data, target, fit) + return utils.plot(data, target, fit, data.rx90) rabi_length_signal = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/rabi/utils.py b/src/qibocal/protocols/rabi/utils.py index d31a40679..dc609e8c1 100644 --- a/src/qibocal/protocols/rabi/utils.py +++ b/src/qibocal/protocols/rabi/utils.py @@ -1,12 +1,11 @@ import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import Delay, Platform, PulseSequence from scipy.optimize import curve_fit -from qibocal.auto.operation import Parameters +from qibocal.auto.operation import Parameters, QubitId +from qibocal.update import replace from ..utils import COLORBAND, COLORBAND_LINE, table_dict, table_html @@ -33,7 +32,7 @@ def rabi_length_function(x, offset, amplitude, period, phase, t2_inv): ) -def plot(data, qubit, fit): +def plot(data, qubit, fit, rx90): quantity, title, fitting = extract_rabi(data) figures = [] fitting_report = "" @@ -95,11 +94,12 @@ def plot(data, qubit, fit): row=1, col=1, ) + pulse_name = "Pi-half pulse" if rx90 else "Pi pulse" fitting_report = table_html( table_dict( qubit, - ["Pi pulse amplitude [a.u.]", "Pi pulse length [ns]"], + [f"{pulse_name} amplitude [a.u.]", f"{pulse_name} length [ns]"], [np.round(fit.amplitude[qubit], 3), np.round(fit.length[qubit], 3)], ) ) @@ -117,7 +117,7 @@ def plot(data, qubit, fit): return figures, fitting_report -def plot_probabilities(data, qubit, fit): +def plot_probabilities(data, qubit, fit, rx90): quantity, title, fitting = extract_rabi(data) figures = [] fitting_report = "" @@ -166,11 +166,16 @@ def plot_probabilities(data, qubit, fit): marker_color="rgb(255, 130, 67)", ), ) + pulse_name = "Pi-half pulse" if rx90 else "Pi pulse" fitting_report = table_html( table_dict( qubit, - ["Pi pulse amplitude [a.u.]", "Pi pulse length [ns]", "chi2 reduced"], + [ + f"{pulse_name} amplitude [a.u.]", + f"{pulse_name} length [ns]", + "chi2 reduced", + ], [fit.amplitude[qubit], fit.length[qubit], fit.chi2[qubit]], display_error=True, ) @@ -224,49 +229,78 @@ def period_correction_factor(phase: float): def sequence_amplitude( - targets: list[QubitId], params: Parameters, platform: Platform + targets: list[QubitId], + params: Parameters, + platform: Platform, + rx90: bool, ) -> tuple[PulseSequence, dict, dict, dict]: """Return sequence for rabi amplitude.""" + sequence = PulseSequence() qd_pulses = {} ro_pulses = {} durations = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) + for q in targets: + natives = platform.natives.single_qubit[q] + + qd_channel, qd_pulse = natives.RX90()[0] if rx90 else natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + if params.pulse_length is not None: - qd_pulses[qubit].duration = params.pulse_length + qd_pulse = replace(qd_pulse, duration=params.pulse_length) - durations[qubit] = qd_pulses[qubit].duration - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) + durations[q] = qd_pulse.duration + qd_pulses[q] = qd_pulse + ro_pulses[q] = ro_pulse + + if rx90: + sequence.append((qd_channel, qd_pulses[q])) + + sequence.append((qd_channel, qd_pulses[q])) + sequence.append((ro_channel, Delay(duration=durations[q]))) + sequence.append((ro_channel, ro_pulse)) return sequence, qd_pulses, ro_pulses, durations def sequence_length( - targets: list[QubitId], params: Parameters, platform: Platform + targets: list[QubitId], + params: Parameters, + platform: Platform, + rx90: bool, + use_align: bool = False, ) -> tuple[PulseSequence, dict, dict, dict]: """Return sequence for rabi length.""" + sequence = PulseSequence() qd_pulses = {} + delays = {} ro_pulses = {} amplitudes = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_qubit_drive_pulse( - qubit, start=0, duration=params.pulse_duration_start - ) + for q in targets: + natives = platform.natives.single_qubit[q] + + qd_channel, qd_pulse = natives.RX90()[0] if rx90 else natives.RX()[0] + ro_channel, ro_pulse = natives.MZ()[0] + if params.pulse_amplitude is not None: - qd_pulses[qubit].amplitude = params.pulse_amplitude - amplitudes[qubit] = qd_pulses[qubit].amplitude + qd_pulse = replace(qd_pulse, amplitude=params.pulse_amplitude) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish - ) - sequence.add(qd_pulses[qubit]) - sequence.add(ro_pulses[qubit]) - return sequence, qd_pulses, ro_pulses, amplitudes + amplitudes[q] = qd_pulse.amplitude + ro_pulses[q] = ro_pulse + qd_pulses[q] = qd_pulse + + if rx90: + sequence.append((qd_channel, qd_pulse)) + + sequence.append((qd_channel, qd_pulse)) + if use_align: + sequence.align([qd_channel, ro_channel]) + else: + delays[q] = Delay(duration=16) + sequence.append((ro_channel, delays[q])) + sequence.append((ro_channel, ro_pulse)) + + return sequence, qd_pulses, delays, ro_pulses, amplitudes def fit_length_function( @@ -328,13 +362,12 @@ def fit_amplitude_function( ) if signal is False: perr = np.sqrt(np.diag(perr)) - else: - popt = [ # Change it according to fit function changes + if None not in y_limits and None not in x_limits: + popt = [ y_limits[0] + (y_limits[1] - y_limits[0]) * popt[0], (y_limits[1] - y_limits[0]) * popt[1], popt[2] * (x_limits[1] - x_limits[0]), popt[3] - 2 * np.pi * x_limits[0] / (x_limits[1] - x_limits[0]) / popt[2], ] pi_pulse_parameter = popt[2] / 2 * period_correction_factor(phase=popt[3]) - return popt, perr, pi_pulse_parameter diff --git a/src/qibocal/protocols/ramsey/ramsey.py b/src/qibocal/protocols/ramsey/ramsey.py index c74d42b56..0b9ca57bc 100644 --- a/src/qibocal/protocols/ramsey/ramsey.py +++ b/src/qibocal/protocols/ramsey/ramsey.py @@ -4,14 +4,12 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Readout, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log +from qibocal.result import probability from ..utils import chi2_reduced, table_dict, table_html from .ramsey_signal import ( @@ -55,7 +53,7 @@ class RamseyData(RamseySignalData): def _acquisition( params: RamseyParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RamseyData: """Data acquisition for Ramsey Experiment (detuned). @@ -78,50 +76,47 @@ def _acquisition( """ waits = np.arange( - # wait time between RX90 pulses params.delay_between_pulses_start, params.delay_between_pulses_end, params.delay_between_pulses_step, ) - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ) - - sequence = PulseSequence() - data = RamseyData( detuning=params.detuning, qubit_freqs={ - qubit: platform.qubits[qubit].native_gates.RX.frequency for qubit in targets + qubit: platform.config(platform.qubits[qubit].drive).frequency + for qubit in targets }, ) - if not params.unrolling: - sequence = PulseSequence() + updates = [] + if params.detuning is not None: for qubit in targets: - sequence += ramsey_sequence( - platform=platform, qubit=qubit, detuning=params.detuning - ) + channel = platform.qubits[qubit].drive + f0 = platform.config(channel).frequency + updates.append({channel: {"frequency": f0 + params.detuning}}) + if not params.unrolling: + sequence, delays = ramsey_sequence(platform, targets) sweeper = Sweeper( - Parameter.start, - waits, - [sequence.get_qubit_pulses(qubit).qd_pulses[-1] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=waits, + pulses=delays, ) # execute the sweep - results = platform.sweep( - sequence, - options, - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, + updates=updates, ) for qubit in targets: - probs = results[qubit].probability(state=1) + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + probs = probability(results[ro_pulse.id], state=1) # The probability errors are the standard errors of the binomial distribution errors = [np.sqrt(prob * (1 - prob) / params.nshots) for prob in probs] data.register_qubit( @@ -133,30 +128,37 @@ def _acquisition( errors=errors, ), ) - - if params.unrolling: + else: sequences, all_ro_pulses = [], [] for wait in waits: - sequence = PulseSequence() - for qubit in targets: - sequence += ramsey_sequence( - platform=platform, qubit=qubit, wait=wait, detuning=params.detuning - ) - + sequence, _ = ramsey_sequence(platform, targets, wait) sequences.append(sequence) - all_ro_pulses.append(sequence.ro_pulses) + all_ro_pulses.append( + { + qubit: [ + pulse + for pulse in list( + sequence.channel(platform.qubits[qubit].acquisition) + ) + if isinstance(pulse, Readout) + ][0] + for qubit in targets + } + ) - results = platform.execute_pulse_sequences(sequences, options) + results = platform.execute( + sequences, + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, + updates=updates, + ) - # We dont need ig as every serial is different - for ig, (wait, ro_pulses) in enumerate(zip(waits, all_ro_pulses)): + for wait, ro_pulses in zip(waits, all_ro_pulses): for qubit in targets: - serial = ro_pulses[qubit].serial - if params.unrolling: - result = results[serial][0] - else: - result = results[ig][serial] - prob = result.probability() + result = results[ro_pulses[qubit].id] + prob = probability(result, state=1) error = np.sqrt(prob * (1 - prob) / params.nshots) data.register_qubit( RamseyType, diff --git a/src/qibocal/protocols/ramsey/ramsey_signal.py b/src/qibocal/protocols/ramsey/ramsey_signal.py index 07bb695c4..58dd26ba3 100644 --- a/src/qibocal/protocols/ramsey/ramsey_signal.py +++ b/src/qibocal/protocols/ramsey/ramsey_signal.py @@ -4,16 +4,14 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibolab import AcquisitionType, AveragingMode, Parameter, Readout, Sweeper + +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log +from qibocal.result import magnitude +from ... import update from ..utils import table_dict, table_html from .utils import fitting, process_fit, ramsey_fit, ramsey_sequence @@ -81,7 +79,7 @@ def waits(self): def _acquisition( params: RamseySignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RamseySignalData: """Data acquisition for Ramsey Experiment (detuned).""" @@ -96,81 +94,88 @@ def _acquisition( params.delay_between_pulses_step, ) - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ) - data = RamseySignalData( detuning=params.detuning, qubit_freqs={ - qubit: platform.qubits[qubit].native_gates.RX.frequency for qubit in targets + qubit: platform.config(platform.qubits[qubit].drive).frequency + for qubit in targets }, ) - if not params.unrolling: - sequence = PulseSequence() + updates = [] + if params.detuning is not None: for qubit in targets: - sequence += ramsey_sequence( - platform=platform, qubit=qubit, detuning=params.detuning - ) + channel = platform.qubits[qubit].drive + f0 = platform.config(channel).frequency + updates.append({channel: {"frequency": f0 + params.detuning}}) + + if not params.unrolling: + sequence, delays = ramsey_sequence(platform, targets) sweeper = Sweeper( - Parameter.start, - waits, - [ - sequence.get_qubit_pulses(qubit).qd_pulses[-1] for qubit in targets - ], # TODO: check if it is correct - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=waits, + pulses=delays, ) # execute the sweep - results = platform.sweep( - sequence, - options, - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, + updates=updates, ) for qubit in targets: - result = results[sequence.get_qubit_pulses(qubit).ro_pulses[0].serial] + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[-1] + result = results[ro_pulse.id] # The probability errors are the standard errors of the binomial distribution data.register_qubit( RamseySignalType, (qubit), dict( wait=waits, - signal=result.magnitude, + signal=magnitude(result), ), ) else: sequences, all_ro_pulses = [], [] for wait in waits: - sequence = PulseSequence() - for qubit in targets: - sequence += ramsey_sequence( - platform=platform, qubit=qubit, wait=wait, detuning=params.detuning - ) - + sequence, _ = ramsey_sequence(platform, targets, wait) sequences.append(sequence) - all_ro_pulses.append(sequence.ro_pulses) + all_ro_pulses.append( + { + qubit: [ + pulse + for pulse in list( + sequence.channel(platform.qubits[qubit].acquisition) + ) + if isinstance(pulse, Readout) + ][0] + for qubit in targets + } + ) - results = platform.execute_pulse_sequences(sequences, options) + results = platform.execute( + sequences, + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, + updates=updates, + ) - # We dont need ig as everty serial is different - for ig, (wait, ro_pulses) in enumerate(zip(waits, all_ro_pulses)): + for wait, ro_pulses in zip(waits, all_ro_pulses): for qubit in targets: - serial = ro_pulses[qubit].serial - if params.unrolling: - result = results[serial][0] - else: - result = results[ig][serial] + result = results[ro_pulses[qubit].id] data.register_qubit( RamseySignalType, (qubit), dict( wait=np.array([wait]), - signal=np.array([result.magnitude]), + signal=np.array([magnitude(result)]), ), ) @@ -285,11 +290,13 @@ def _plot(data: RamseySignalData, target: QubitId, fit: RamseySignalResults = No return figures, fitting_report -def _update(results: RamseySignalResults, platform: Platform, target: QubitId): +def _update( + results: RamseySignalResults, platform: CalibrationPlatform, target: QubitId +): if results.detuning is not None: update.drive_frequency(results.frequency[target][0], platform, target) else: - update.t2(results.t2[target][0], platform, target) + update.t2(results.t2[target], platform, target) ramsey_signal = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/ramsey/ramsey_zz.py b/src/qibocal/protocols/ramsey/ramsey_zz.py index 90ebe871d..021951e26 100644 --- a/src/qibocal/protocols/ramsey/ramsey_zz.py +++ b/src/qibocal/protocols/ramsey/ramsey_zz.py @@ -4,14 +4,13 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Readout, Sweeper -from ...auto.operation import Routine +from qibocal.calibration import CalibrationPlatform + +from ...auto.operation import QubitId, Routine from ...config import log +from ...result import probability from ..utils import table_dict, table_html from .ramsey import ( COLORBAND, @@ -60,7 +59,7 @@ class RamseyZZData(RamseySignalData): def _acquisition( params: RamseyZZParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RamseyZZData: """Data acquisition for RamseyZZ Experiment. @@ -75,72 +74,91 @@ def _acquisition( params.delay_between_pulses_step, ) - options = ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.SINGLESHOT, - ) - data = RamseyZZData( detuning=params.detuning, qubit_freqs={ - qubit: platform.qubits[qubit].native_gates.RX.frequency for qubit in targets + qubit: platform.config(platform.qubits[qubit].drive).frequency + for qubit in targets }, target_qubit=params.target_qubit, ) + updates = [] + if params.detuning is not None: + for qubit in targets: + channel = platform.qubits[qubit].drive + f0 = platform.config(channel).frequency + updates.append({channel: {"frequency": f0 + params.detuning}}) + for setup in ["I", "X"]: if not params.unrolling: - sequence = PulseSequence() - for qubit in targets: - sequence += ramsey_sequence( - platform=platform, - qubit=qubit, - detuning=params.detuning, - target_qubit=params.target_qubit if setup == "X" else None, - ) + sequence, delays = ramsey_sequence( + platform=platform, + targets=targets, + target_qubit=params.target_qubit if setup == "X" else None, + ) sweeper = Sweeper( - Parameter.start, - waits, - [sequence.get_qubit_pulses(qubit).qd_pulses[-1] for qubit in targets], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + values=waits, + pulses=delays, ) # execute the sweep - results = platform.sweep( - sequence, - options, - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, + updates=updates, ) for qubit in targets: - probs = results[qubit].probability(state=1) + ro_pulse = list(sequence.channel(platform.qubits[qubit].acquisition))[ + -1 + ] + probs = probability(results[ro_pulse.id], state=1) errors = [np.sqrt(prob * (1 - prob) / params.nshots) for prob in probs] else: sequences, all_ro_pulses = [], [] probs, errors = [], [] for wait in waits: - sequence = PulseSequence() - for qubit in targets: - sequence += ramsey_sequence( - platform=platform, - qubit=qubit, - wait=wait, - detuning=params.detuning, - target_qubit=params.target_qubit if setup == "X" else None, - ) - + sequence, _ = ramsey_sequence( + platform=platform, + targets=targets, + wait=wait, + target_qubit=params.target_qubit if setup == "X" else None, + ) sequences.append(sequence) - all_ro_pulses.append(sequence.ro_pulses) + all_ro_pulses.append( + { + qubit: [ + readout + for readout in sequence.channel( + platform.qubits[qubit].acquisition + ) + if isinstance(readout, Readout) + ][0] + for qubit in targets + } + ) - results = platform.execute_pulse_sequences(sequences, options) + results = platform.execute( + sequences, + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.SINGLESHOT, + updates=updates, + ) for wait, ro_pulses in zip(waits, all_ro_pulses): for qubit in targets: - prob = results[ro_pulses[qubit].serial][0].probability(state=1) + result = results[ro_pulses[qubit].id] + prob = probability(result, state=1) probs.append(prob) errors.append(np.sqrt(prob * (1 - prob) / params.nshots)) diff --git a/src/qibocal/protocols/ramsey/utils.py b/src/qibocal/protocols/ramsey/utils.py index f24eff751..0acc78b20 100644 --- a/src/qibocal/protocols/ramsey/utils.py +++ b/src/qibocal/protocols/ramsey/utils.py @@ -1,11 +1,10 @@ from typing import Optional import numpy as np -from qibolab import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import Delay, Platform, PulseSequence from scipy.optimize import curve_fit +from qibocal.auto.operation import QubitId from qibocal.protocols.utils import GHZ_TO_HZ, fallback_period, guess_period POPT_EXCEPTION = [0, 0, 0, 0, 1] @@ -20,9 +19,8 @@ def ramsey_sequence( platform: Platform, - qubit: QubitId, + targets: list[QubitId], wait: int = 0, - detuning: Optional[int] = 0, target_qubit: Optional[QubitId] = None, ): """Pulse sequence used in Ramsey (detuned) experiments. @@ -30,30 +28,38 @@ def ramsey_sequence( The pulse sequence is the following: RX90 -- wait -- RX90 -- MZ - - If detuning is specified the RX90 pulses will be sent to - frequency = drive_frequency + detuning """ - + delays = [] sequence = PulseSequence() - first_pi_half_pulse = platform.create_RX90_pulse(qubit, start=0) - second_pi_half_pulse = platform.create_RX90_pulse( - qubit, start=first_pi_half_pulse.finish + wait - ) - - # apply detuning: - if detuning is not None: - first_pi_half_pulse.frequency += detuning - second_pi_half_pulse.frequency += detuning - readout_pulse = platform.create_qubit_readout_pulse( - qubit, start=second_pi_half_pulse.finish - ) - - sequence.add(first_pi_half_pulse, second_pi_half_pulse, readout_pulse) - if target_qubit is not None: - x_pulse_target_qubit = platform.create_RX_pulse(target_qubit, start=0) - sequence.add(x_pulse_target_qubit) - return sequence + for qubit in targets: + natives = platform.natives.single_qubit[qubit] + + qd_channel, qd_pulse = natives.R(theta=np.pi / 2)[0] + ro_channel, ro_pulse = natives.MZ()[0] + + qd_delay = Delay(duration=wait) + ro_delay = Delay(duration=wait) + + sequence.extend( + [ + (qd_channel, qd_pulse), + (qd_channel, qd_delay), + (qd_channel, qd_pulse), + (ro_channel, Delay(duration=2 * qd_pulse.duration)), + (ro_channel, ro_delay), + (ro_channel, ro_pulse), + ] + ) + + delays.extend([qd_delay, ro_delay]) + if target_qubit is not None: + assert ( + target_qubit not in targets + ), f"Cannot run Ramsey experiment on qubit {target_qubit} if it is already in Ramsey sequence." + natives = platform.natives.single_qubit[target_qubit] + sequence += natives.RX() + + return sequence, delays def ramsey_fit(x, offset, amplitude, delta, phase, decay): diff --git a/src/qibocal/protocols/randomized_benchmarking/filtered_rb.py b/src/qibocal/protocols/randomized_benchmarking/filtered_rb.py index 6c949bb73..4bc1b35b2 100644 --- a/src/qibocal/protocols/randomized_benchmarking/filtered_rb.py +++ b/src/qibocal/protocols/randomized_benchmarking/filtered_rb.py @@ -2,10 +2,9 @@ import numpy as np import plotly.graph_objects as go -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibocal.auto.operation import Results, Routine +from qibocal.auto.operation import QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.randomized_benchmarking.utils import rb_acquisition from qibocal.protocols.utils import table_dict, table_html @@ -24,7 +23,7 @@ class FilteredRBResult(Results): def _acquisition( params: FilteredRBParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RBData: """The data acquisition stage of Filtered Randomized Benchmarking. @@ -35,7 +34,7 @@ def _acquisition( Args: params : All parameters in one object. - platform : Platform the experiment is executed on. + platform : CalibrationPlatform the experiment is executed on. target : list of qubits the experiment is executed on. Returns: @@ -123,4 +122,6 @@ def _plot( return [fig], fitting_report +# TODO: add update function (?) + filtered_rb = Routine(_acquisition, _fit, _plot) diff --git a/src/qibocal/protocols/randomized_benchmarking/standard_rb.py b/src/qibocal/protocols/randomized_benchmarking/standard_rb.py index c001b07a5..37833bd0a 100644 --- a/src/qibocal/protocols/randomized_benchmarking/standard_rb.py +++ b/src/qibocal/protocols/randomized_benchmarking/standard_rb.py @@ -3,10 +3,9 @@ import numpy as np import plotly.graph_objects as go -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibocal.auto.operation import Parameters, Results, Routine +from qibocal.auto.operation import Parameters, QubitId, Routine +from qibocal.calibration import CalibrationPlatform from ..utils import table_dict, table_html from .fitting import exp1B_func, fit_exp1B_func @@ -143,7 +142,7 @@ def random_circuits( def _acquisition( params: StandardRBParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> RBData: """The data acquisition stage of Standard Randomized Benchmarking. @@ -154,7 +153,7 @@ def _acquisition( Args: params: All parameters in one object. - platform: Platform the experiment is executed on. + platform: CalibrationPlatform the experiment is executed on. target: list of qubits the experiment is executed on. Returns: @@ -226,7 +225,7 @@ def _plot( Returns: tuple[list[go.Figure], str]: """ - + target = tuple(target) if isinstance(target, list) else target qubit = target fig = go.Figure() fitting_report = "" @@ -331,4 +330,14 @@ def _plot( return [fig], fitting_report -standard_rb = Routine(_acquisition, _fit, _plot) +def _update(results: StandardRBResult, platform: CalibrationPlatform, target: QubitId): + """Write rb fidelity in calibration.""" + + # TODO: shall we use the gate fidelity or the pulse fidelity + platform.calibration.single_qubits[target].rb_fidelity = ( + results.fidelity[target], + results.fit_uncertainties[target][1] / 2, + ) + + +standard_rb = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q.py b/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q.py index d76aa59ec..2160e53a1 100644 --- a/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q.py +++ b/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q.py @@ -1,14 +1,13 @@ from dataclasses import dataclass -from qibolab.platform import Platform -from qibolab.qubits import QubitPairId - -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitPairId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.randomized_benchmarking.standard_rb import ( StandardRBParameters, _plot, ) +from ...calibration.calibration import TwoQubitCalibration from .utils import RB2QData, StandardRBResult, fit, twoq_rb_acquisition FILE_CLIFFORDS = "2qubitCliffs.json" @@ -27,7 +26,7 @@ class StandardRB2QParameters(StandardRBParameters): def _acquisition( params: StandardRB2QParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> RB2QData: """Data acquisition for two qubit Standard Randomized Benchmarking.""" @@ -42,4 +41,21 @@ def _fit(data: RB2QData) -> StandardRBResult: return results -standard_rb_2q = Routine(_acquisition, _fit, _plot) +def _update( + results: StandardRBResult, platform: CalibrationPlatform, target: QubitPairId +): + """Write rb fidelity in calibration.""" + # FIXME: error raised by qq fit + if isinstance(target, list): + target = tuple(target) + + if target not in platform.calibration.two_qubits: + platform.calibration.two_qubits[target] = TwoQubitCalibration() + + platform.calibration.two_qubits[target].rb_fidelity = ( + results.fidelity[target], + results.fit_uncertainties[target][1] / 2, + ) + + +standard_rb_2q = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q_inter.py b/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q_inter.py index c6a657e05..332e306f8 100644 --- a/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q_inter.py +++ b/src/qibocal/protocols/randomized_benchmarking/standard_rb_2q_inter.py @@ -1,10 +1,9 @@ from dataclasses import dataclass, fields import numpy as np -from qibolab.platform import Platform -from qibolab.qubits import QubitPairId -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitPairId, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.randomized_benchmarking.standard_rb import _plot from qibocal.protocols.randomized_benchmarking.standard_rb_2q import ( StandardRB2QParameters, @@ -41,18 +40,18 @@ def __contains__(self, value: QubitPairId): def _acquisition( params: StandardRB2QInterParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> RB2QInterData: """Data acquisition for two qubit Interleaved Randomized Benchmarking.""" - data = twoq_rb_acquisition(params, platform, targets, interleave=params.interleave) - fidelity = {} for target in targets: - fidelity[target] = platform.pairs[target].gate_fidelity + assert ( + target in platform.calibration.two_qubits + ), "Pair not calibrated, run standard 2q rb before interleaved version" + fidelity[target] = platform.calibration.two_qubits[target].rb_fidelity data.fidelity = fidelity - return data @@ -69,19 +68,16 @@ def _fit(data: RB2QInterData) -> StandardRB2QInterResult: qubits = data.pairs results = fit(qubits, data) - fidelity_cz = {} for qubit in qubits: - if qubit in data.fidelity and data.fidelity[qubit] is not None: - fid_cz = results.fidelity[qubit] / data.fidelity[qubit][0] - uncertainty_cz = np.sqrt( - 1 - / data.fidelity[qubit][0] ** 2 - * results.fit_uncertainties[qubit][1] ** 2 - + (results.fidelity[qubit] / data.fidelity[qubit][0] ** 2) ** 2 - * data.fidelity[qubit][1] ** 2 - ) - fidelity_cz[qubit] = [fid_cz, uncertainty_cz] + fid_cz = results.fidelity[qubit] / data.fidelity[qubit][0] + # TODO: check this error formula + uncertainty_cz = np.sqrt( + 1 / data.fidelity[qubit][0] ** 2 * results.fit_uncertainties[qubit][1] ** 2 + + (results.fidelity[qubit] / data.fidelity[qubit][0] ** 2) ** 2 + * data.fidelity[qubit][1] ** 2 + ) + fidelity_cz[qubit] = [fid_cz, uncertainty_cz] return StandardRB2QInterResult( results.fidelity, @@ -93,4 +89,15 @@ def _fit(data: RB2QInterData) -> StandardRB2QInterResult: ) -standard_rb_2q_inter = Routine(_acquisition, _fit, _plot) +def _update( + results: StandardRBResult, platform: CalibrationPlatform, target: QubitPairId +): + """Write cz fidelity in calibration.""" + # TODO: shall we use the gate fidelity or the pulse fidelity + target = tuple(target) + platform.calibration.two_qubits[target].cz_fidelity = tuple( + results.fidelity_cz[target] + ) + + +standard_rb_2q_inter = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/randomized_benchmarking/utils.py b/src/qibocal/protocols/randomized_benchmarking/utils.py index ef85ba829..0a6541aaa 100644 --- a/src/qibocal/protocols/randomized_benchmarking/utils.py +++ b/src/qibocal/protocols/randomized_benchmarking/utils.py @@ -9,15 +9,14 @@ from qibo import gates from qibo.backends import construct_backend from qibo.models import Circuit -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId -from qibocal.auto.operation import Data, Parameters, Results +from qibocal.auto.operation import Data, Parameters, QubitId, QubitPairId, Results from qibocal.auto.transpile import ( dummy_transpiler, execute_transpiled_circuit, execute_transpiled_circuits, ) +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.randomized_benchmarking.dict_utils import ( SINGLE_QUBIT_CLIFFORDS_NAMES, calculate_pulses_clifford, @@ -330,7 +329,7 @@ class StandardRBResult(Results): def setup( params: Parameters, - platform: Platform, + platform: CalibrationPlatform, single_qubit: bool = True, interleave: Optional[str] = None, ): @@ -463,7 +462,7 @@ def execute_circuits(circuits, targets, params, backend, single_qubit=True): def rb_acquisition( params: Parameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], add_inverse_layer: bool = True, interleave: str = None, @@ -510,7 +509,7 @@ def rb_acquisition( def twoq_rb_acquisition( params: Parameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], add_inverse_layer: bool = True, interleave: str = None, @@ -528,7 +527,7 @@ def twoq_rb_acquisition( RB2QData: The acquired data for two qubit randomized benchmarking. """ targets = [tuple(pair) if isinstance(pair, list) else pair for pair in targets] - data, backend = setup(params, platform, single_qubit=False) + data, backend = setup(params, platform, single_qubit=False, interleave=interleave) circuits, indexes, npulses_per_clifford = get_circuits( params, targets, add_inverse_layer, interleave, single_qubit=False ) diff --git a/src/qibocal/protocols/readout_characterization.py b/src/qibocal/protocols/readout_characterization.py index 04b5928eb..d2f32befd 100644 --- a/src/qibocal/protocols/readout_characterization.py +++ b/src/qibocal/protocols/readout_characterization.py @@ -4,13 +4,11 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, Delay, PulseSequence, Readout from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.protocols.utils import ( effective_qubit_temperature, format_error_single_cell, @@ -19,6 +17,8 @@ table_html, ) +from ..result import unpack + @dataclass class ReadoutCharacterizationParameters(Parameters): @@ -74,14 +74,16 @@ class ReadoutCharacterizationData(Data): def _acquisition( params: ReadoutCharacterizationParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> ReadoutCharacterizationData: """Data acquisition for resonator spectroscopy.""" data = ReadoutCharacterizationData( qubit_frequencies={ - qubit: platform.qubits[qubit].drive_frequency for qubit in targets + # TODO: should this be the drive frequency instead? + qubit: float(platform.calibration.single_qubits[qubit].qubit.frequency_01) + for qubit in targets }, delay=float(params.delay), ) @@ -89,55 +91,46 @@ def _acquisition( # FIXME: ADD 1st measurament and post_selection for accurate state preparation ? for state in [0, 1]: - # Define the pulse sequences - if state == 1: - RX_pulses = {} - ro_pulses = {} sequence = PulseSequence() for qubit in targets: - start = 0 + natives = platform.natives.single_qubit[qubit] + ro_channel = natives.MZ()[0][0] if state == 1: - RX_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - sequence.add(RX_pulses[qubit]) - start = RX_pulses[qubit].finish - ro_pulses[qubit] = [] - for _ in range(2): - ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start) - start += ro_pulse.duration + int( - params.delay - ) # device required conversion - sequence.add(ro_pulse) - ro_pulses[qubit].append(ro_pulse) + sequence += natives.RX() + sequence.append((ro_channel, Delay(duration=natives.RX()[0][1].duration))) + sequence += natives.MZ() + sequence.append((ro_channel, Delay(duration=params.delay))) + sequence += natives.MZ() # execute the pulse sequence - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), + results = platform.execute( + [sequence], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, ) - results_samples = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - ), + results_samples = platform.execute( + [sequence], + acquisition_type=AcquisitionType.DISCRIMINATION, + nshots=params.nshots, + relaxation_time=params.relaxation_time, ) # Save the data for qubit in targets: - for i, ro_pulse in enumerate(ro_pulses[qubit]): - result = results[ro_pulse.serial] + readouts = [ + pulse + for pulse in sequence.channel(platform.qubits[qubit].acquisition) + if isinstance(pulse, Readout) + ] + for j, ro_pulse in enumerate(readouts): + i, q = unpack(results[ro_pulse.id]) data.register_qubit( ReadoutCharacterizationType, - (qubit, state, i), - dict(i=result.voltage_i, q=result.voltage_q), + (qubit, state, j), + dict(i=i, q=q), ) - result_samples = results_samples[ro_pulse.serial] - data.samples[qubit, state, i] = result_samples.samples.tolist() - + data.samples[qubit, state, j] = results_samples[ro_pulse.id].tolist() return data @@ -321,10 +314,14 @@ def _plot( def _update( - results: ReadoutCharacterizationResults, platform: Platform, target: QubitId + results: ReadoutCharacterizationResults, + platform: CalibrationPlatform, + target: QubitId, ): update.readout_fidelity(results.fidelity[target], platform, target) - update.assignment_fidelity(results.assignment_fidelity[target], platform, target) + platform.calibration.single_qubits[target].readout.effective_temperature = ( + results.effective_temperature[target][0] + ) readout_characterization = Routine(_acquisition, _fit, _plot, _update) diff --git a/src/qibocal/protocols/readout_mitigation_matrix.py b/src/qibocal/protocols/readout_mitigation_matrix.py index 9d12b3f7e..d894b3bd9 100644 --- a/src/qibocal/protocols/readout_mitigation_matrix.py +++ b/src/qibocal/protocols/readout_mitigation_matrix.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, field -from typing import Optional +from typing import Optional, Tuple import numpy as np import numpy.typing as npt @@ -7,11 +7,11 @@ from qibo import gates from qibo.backends import construct_backend from qibo.models import Circuit -from qibolab.platform import Platform -from qibolab.qubits import QubitId +from scipy.sparse import lil_matrix -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine from qibocal.auto.transpile import dummy_transpiler, execute_transpiled_circuit +from qibocal.calibration import CalibrationPlatform from qibocal.config import log @@ -41,6 +41,14 @@ class ReadoutMitigationMatrixResults(Results): ) +ReadoutMitigationMatrixId = tuple[Tuple[QubitId, ...], str, str] +"""Data identifier for single list of qubits. + +Tuple[QubitId, ...] is the qubits which have been passed on as parameters. +The two strings represents the expected state and the measured state. +""" + + @dataclass class ReadoutMitigationMatrixData(Data): """ReadoutMitigationMatrix acquisition outputs.""" @@ -49,13 +57,13 @@ class ReadoutMitigationMatrixData(Data): """List of qubit ids""" nshots: int """Number of shots""" - data: dict = field(default_factory=dict) + data: dict[ReadoutMitigationMatrixId, float] = field(default_factory=dict) """Raw data acquited.""" def _acquisition( params: ReadoutMitigationMatrixParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[list[QubitId]], ) -> ReadoutMitigationMatrixData: data = ReadoutMitigationMatrixData( @@ -63,7 +71,7 @@ def _acquisition( ) backend = construct_backend("qibolab", platform=platform) transpiler = dummy_transpiler(backend) - qubit_map = [i for i in range(platform.nqubits)] + for qubits in targets: nqubits = len(qubits) for i in range(2**nqubits): @@ -84,7 +92,7 @@ def _acquisition( for freq in frequencies: data.register_qubit( ReadoutMitigationMatrixType, - (qubits), + (tuple(qubits)), dict( state=np.array([int(state, 2)]), frequency=freq, @@ -151,5 +159,26 @@ def _plot( return figs, fitting_report -readout_mitigation_matrix = Routine(_acquisition, _fit, _plot) +def _update( + results: ReadoutMitigationMatrixData, + platform: CalibrationPlatform, + target: list[QubitId], +): + # create empty matrix if it doesn't exist + if platform.calibration.readout_mitigation_matrix is None: + platform.calibration.readout_mitigation_matrix = lil_matrix( + (2**platform.calibration.nqubits, 2**platform.calibration.nqubits) + ) + + # compute indices + mask = sum(1 << platform.calibration.qubit_index(i) for i in target) + indices = [i for i in range(2**platform.calibration.nqubits) if (i & mask) == i] + + # update matrix + platform.calibration.readout_mitigation_matrix[np.ix_(indices, indices)] = ( + results.readout_mitigation_matrix[tuple(target)] + ) + + +readout_mitigation_matrix = Routine(_acquisition, _fit, _plot, _update) """Readout mitigation matrix protocol.""" diff --git a/src/qibocal/protocols/readout_optimization/resonator_amplitude.py b/src/qibocal/protocols/readout_optimization/resonator_amplitude.py index 6826d2757..ab054220d 100644 --- a/src/qibocal/protocols/readout_optimization/resonator_amplitude.py +++ b/src/qibocal/protocols/readout_optimization/resonator_amplitude.py @@ -5,15 +5,14 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, Delay, PulseSequence from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.fitting.classifier.qubit_fit import QubitFit from qibocal.protocols.utils import table_dict, table_html +from qibocal.update import replace @dataclass @@ -64,7 +63,7 @@ class ResonatorAmplitudeResults(Results): def _acquisition( params: ResonatorAmplitudeParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> ResonatorAmplitudeData: r""" @@ -75,7 +74,7 @@ def _acquisition( Args: params (:class:`ResonatorAmplitudeParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform + platform (:class:`CalibrationPlatform`): Qibolab's platform targets (list): list of QubitIds to be characterized Returns: @@ -85,45 +84,42 @@ def _acquisition( data = ResonatorAmplitudeData() for qubit in targets: error = 1 - old_amp = platform.qubits[qubit].native_gates.MZ.amplitude + natives = platform.natives.single_qubit[qubit] + + ro_channel, ro_pulse = natives.MZ()[0] new_amp = params.amplitude_start while error > params.error_threshold and new_amp <= params.amplitude_stop: - platform.qubits[qubit].native_gates.MZ.amplitude = new_amp + + new_ro = replace(ro_pulse, amplitude=new_amp) sequence_0 = PulseSequence() sequence_1 = PulseSequence() - qd_pulses = platform.create_RX_pulse(qubit, start=0) - ro_pulses = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses.finish - ) - sequence_0.add(ro_pulses) - sequence_1.add(qd_pulses) - sequence_1.add(ro_pulses) - - state0_results = platform.execute_pulse_sequence( - sequence_0, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), + qd_channel, qd_pulse = natives.RX()[0] + + sequence_1.append((qd_channel, qd_pulse)) + sequence_1.append((ro_channel, Delay(duration=qd_pulse.duration))) + sequence_1.append((ro_channel, new_ro)) + + sequence_0.append((ro_channel, new_ro)) + + state0_results = platform.execute( + [sequence_0], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, ) - state1_results = platform.execute_pulse_sequence( - sequence_1, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), + state1_results = platform.execute( + [sequence_1], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, ) - result0 = state0_results[ro_pulses.serial] - result1 = state1_results[ro_pulses.serial] + result0 = state0_results[new_ro.id] + result1 = state1_results[new_ro.id] - i_values = np.concatenate((result0.voltage_i, result1.voltage_i)) - q_values = np.concatenate((result0.voltage_q, result1.voltage_q)) - iq_values = np.stack((i_values, q_values), axis=-1) - nshots = int(len(i_values) / 2) + iq_values = np.concatenate((result0, result1)) + nshots = params.nshots states = [0] * nshots + [1] * nshots model = QubitFit() model.fit(iq_values, np.array(states)) @@ -138,7 +134,6 @@ def _acquisition( threshold=np.array([model.threshold]), ), ) - platform.qubits[qubit].native_gates.MZ.amplitude = old_amp new_amp += params.amplitude_step return data @@ -203,7 +198,9 @@ def _plot( return figures, fitting_report -def _update(results: ResonatorAmplitudeResults, platform: Platform, target: QubitId): +def _update( + results: ResonatorAmplitudeResults, platform: CalibrationPlatform, target: QubitId +): update.readout_amplitude(results.best_amp[target], platform, target) update.iq_angle(results.best_angle[target], platform, target) update.threshold(results.best_threshold[target], platform, target) diff --git a/src/qibocal/protocols/readout_optimization/resonator_frequency.py b/src/qibocal/protocols/readout_optimization/resonator_frequency.py deleted file mode 100644 index c9a27609f..000000000 --- a/src/qibocal/protocols/readout_optimization/resonator_frequency.py +++ /dev/null @@ -1,236 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from plotly.subplots import make_subplots -from qibolab import AcquisitionType, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.fitting.classifier.qubit_fit import QubitFit -from qibocal.protocols.utils import table_dict, table_html - - -@dataclass -class ResonatorFrequencyParameters(Parameters): - """Optimization RO frequency inputs.""" - - freq_width: int - """Width [Hz] for frequency sweep relative to the readout frequency [Hz].""" - freq_step: int - """Frequency step for sweep [Hz].""" - - -@dataclass -class ResonatorFrequencyResults(Results): - """Optimization Resonator frequency outputs.""" - - fidelities: dict[QubitId, list] - """Assignment fidelities.""" - best_freq: dict[QubitId, float] - """Resonator Frequency with the highest assignment fidelity.""" - best_angle: dict[QubitId, float] - """IQ angle that maximes assignment fidelity""" - best_threshold: dict[QubitId, float] - """Threshold that maximes assignment fidelity""" - - -ResonatorFrequencyType = np.dtype( - [ - ("freq", np.float64), - ("assignment_fidelity", np.float64), - ("angle", np.float64), - ("threshold", np.float64), - ] -) -"""Custom dtype for Optimization RO frequency.""" - - -@dataclass -class ResonatorFrequencyData(Data): - """ "Optimization RO frequency acquisition outputs.""" - - resonator_type: str - """Resonator type.""" - data: dict[QubitId, npt.NDArray[ResonatorFrequencyType]] = field( - default_factory=dict - ) - - def unique_freqs(self, qubit: QubitId) -> np.ndarray: - return np.unique(self.data[qubit]["freq"]) - - -def _acquisition( - params: ResonatorFrequencyParameters, platform: Platform, targets: list[QubitId] -) -> ResonatorFrequencyData: - r""" - Data acquisition for readout frequency optimization. - While sweeping the readout frequency, the routine performs a single shot - classification and evaluates the assignement fidelity. - At the end, the readout frequency is updated, choosing the one that has - the highest assignment fidelity. - - Args: - params (ResonatorFrequencyParameters): experiment's parameters - platform (Platform): Qibolab platform object - qubits (list): list of target qubits to perform the action - - """ - - # create 2 sequences of pulses for the experiment: - # sequence_0: I - MZ - # sequence_1: RX - MZ - - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel - sequence_0 = PulseSequence() - sequence_1 = PulseSequence() - ro_pulses = {} - qd_pulses = {} - for qubit in targets: - qd_pulses[qubit] = platform.create_RX_pulse(qubit, start=0) - ro_pulses[qubit] = platform.create_qubit_readout_pulse( - qubit, start=qd_pulses[qubit].finish - ) - sequence_0.add(ro_pulses[qubit]) - sequence_1.add(qd_pulses[qubit]) - sequence_1.add(ro_pulses[qubit]) - - # define the parameter to sweep and its range: - delta_frequency_range = np.arange( - -params.freq_width / 2, params.freq_width / 2, params.freq_step - ) - - data = ResonatorFrequencyData(resonator_type=platform.resonator_type) - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - - results_0 = platform.sweep( - sequence_0, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), - sweeper, - ) - - results_1 = platform.sweep( - sequence_1, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - ), - sweeper, - ) - - # retrieve the results for every qubit - for qubit in targets: - for k, freq in enumerate(delta_frequency_range): - i_values = [] - q_values = [] - states = [] - for i, results in enumerate([results_0, results_1]): - result = results[ro_pulses[qubit].serial] - i_values.extend(result.voltage_i[:, k]) - q_values.extend(result.voltage_q[:, k]) - states.extend([i] * len(result.voltage_i[:, k])) - - model = QubitFit() - model.fit(np.stack((i_values, q_values), axis=-1), np.array(states)) - data.register_qubit( - ResonatorFrequencyType, - (qubit), - dict( - freq=np.array([ro_pulses[qubit].frequency + freq]), - assignment_fidelity=np.array([model.assignment_fidelity]), - angle=np.array([model.angle]), - threshold=np.array([model.threshold]), - ), - ) - return data - - -def _fit(data: ResonatorFrequencyData) -> ResonatorFrequencyResults: - """Post-Processing for Optimization RO frequency""" - qubits = data.qubits - best_freq = {} - best_angle = {} - best_threshold = {} - highest_fidelity = {} - for qubit in qubits: - data_qubit = data[qubit] - index_best_fid = np.argmax(data_qubit["assignment_fidelity"]) - highest_fidelity[qubit] = data_qubit["assignment_fidelity"][index_best_fid] - best_freq[qubit] = data_qubit["freq"][index_best_fid] - best_angle[qubit] = data_qubit["angle"][index_best_fid] - best_threshold[qubit] = data_qubit["threshold"][index_best_fid] - - return ResonatorFrequencyResults( - fidelities=highest_fidelity, - best_freq=best_freq, - best_angle=best_angle, - best_threshold=best_threshold, - ) - - -def _plot( - data: ResonatorFrequencyData, fit: ResonatorFrequencyResults, target: QubitId -): - """Plotting function for Optimization RO frequency.""" - figures = [] - freqs = data[target]["freq"] - opacity = 1 - fitting_report = "" - fig = make_subplots( - rows=1, - cols=1, - ) - if fit is not None: - fig.add_trace( - go.Scatter( - x=freqs, - y=data[target]["assignment_fidelity"], - opacity=opacity, - showlegend=True, - ), - row=1, - col=1, - ) - - fitting_report = table_html( - table_dict( - target, - "Best Resonator Frequency [Hz]", - np.round(fit.best_freq[target], 4), - ) - ) - - fig.update_layout( - showlegend=True, - xaxis_title="Resonator Frequencies [GHz]", - yaxis_title="Assignment Fidelities", - ) - - figures.append(fig) - - return figures, fitting_report - - -def _update(results: ResonatorFrequencyResults, platform: Platform, target: QubitId): - update.readout_frequency(results.best_freq[target], platform, target) - update.threshold(results.best_threshold[target], platform, target) - update.iq_angle(results.best_angle[target], platform, target) - - -resonator_frequency = Routine(_acquisition, _fit, _plot, _update) -""""Optimization RO frequency Routine object.""" diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/__init__.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency.py deleted file mode 100644 index f35c5e1e3..000000000 --- a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency.py +++ /dev/null @@ -1,182 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols import classification -from qibocal.protocols.readout_optimization.resonator_frequency import ( - ResonatorFrequencyType, -) -from qibocal.protocols.utils import HZ_TO_GHZ, table_dict, table_html - - -@dataclass -class TwpaFrequencyParameters(Parameters): - """TwpaFrequency runcard inputs.""" - - frequency_width: float - """Relative frequency width [Hz]""" - frequency_step: float - """Frequency step [Hz]""" - - -@dataclass -class TwpaFrequencyData(Data): - """TwpaFrequency acquisition outputs.""" - - data: dict[ - tuple[QubitId, float], npt.NDArray[classification.ClassificationType] - ] = field(default_factory=dict) - """Raw data acquired.""" - frequencies: dict[QubitId, float] = field(default_factory=dict) - """Frequencies for each qubit.""" - - -@dataclass -class TwpaFrequencyResults(Results): - """TwpaFrequency outputs.""" - - best_freqs: dict[QubitId, float] = field(default_factory=dict) - best_fidelities: dict[QubitId, float] = field(default_factory=dict) - best_angles: dict[QubitId, float] = field(default_factory=dict) - best_thresholds: dict[QubitId, float] = field(default_factory=dict) - - -def _acquisition( - params: TwpaFrequencyParameters, - platform: Platform, - targets: list[QubitId], -) -> TwpaFrequencyData: - r""" - Data acquisition for TWPA power optmization. - This protocol perform a classification protocol for twpa frequencies - in the range [twpa_frequency - frequency_width / 2, twpa_frequency + frequency_width / 2] - with step frequency_step. - - Args: - params (:class:`TwpaFrequencyParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - qubits (dict): dict of target :class:`Qubit` objects to be characterized - - Returns: - data (:class:`TwpaFrequencyData`) - """ - - data = TwpaFrequencyData() - - freq_range = np.arange( - -params.frequency_width / 2, params.frequency_width / 2, params.frequency_step - ).astype(int) - - initial_twpa_freq = {} - for qubit in targets: - initial_twpa_freq[qubit] = float( - platform.qubits[qubit].twpa.local_oscillator.frequency - ) - data.frequencies[qubit] = list( - float(platform.qubits[qubit].twpa.local_oscillator.frequency) + freq_range - ) - - for freq in freq_range: - for qubit in targets: - platform.qubits[qubit].twpa.local_oscillator.frequency = ( - initial_twpa_freq[qubit] + freq - ) - - classification_data = classification._acquisition( - classification.SingleShotClassificationParameters.load( - {"nshots": params.nshots} - ), - platform, - targets, - ) - classification_result = classification._fit(classification_data) - for qubit in targets: - data.register_qubit( - ResonatorFrequencyType, - (qubit), - dict( - freq=np.array( - [platform.qubits[qubit].twpa.local_oscillator.frequency], - dtype=np.float64, - ), - assignment_fidelity=np.array( - [classification_result.assignment_fidelity[qubit]], - ), - angle=np.array([classification_result.rotation_angle[qubit]]), - threshold=np.array([classification_result.threshold[qubit]]), - ), - ) - return data - - -def _fit(data: TwpaFrequencyData) -> TwpaFrequencyResults: - """Extract fidelity for each configuration qubit / param. - Where param can be either frequency or power.""" - - qubits = data.qubits - best_freq = {} - best_fidelity = {} - best_angle = {} - best_threshold = {} - for qubit in qubits: - data_qubit = data[qubit] - index_best_err = np.argmax(data_qubit["assignment_fidelity"]) - best_fidelity[qubit] = data_qubit["assignment_fidelity"][index_best_err] - best_freq[qubit] = data_qubit["freq"][index_best_err] - best_angle[qubit] = data_qubit["angle"][index_best_err] - best_threshold[qubit] = data_qubit["threshold"][index_best_err] - - return TwpaFrequencyResults( - best_freq, best_fidelity, best_thresholds=best_threshold, best_angles=best_angle - ) - - -def _plot(data: TwpaFrequencyData, fit: TwpaFrequencyResults, target: QubitId): - """Plotting function that shows the assignment fidelity - for different values of the twpa frequency for a single qubit""" - - figures = [] - fitting_report = "" - if fit is not None: - qubit_data = data.data[target] - fidelities = qubit_data["assignment_fidelity"] - frequencies = qubit_data["freq"] - fitting_report = table_html( - table_dict( - target, - ["Best assignment fidelity", "TWPA Frequency [Hz]"], - [ - np.round(fit.best_fidelities[target], 3), - fit.best_freqs[target], - ], - ) - ) - fig = go.Figure( - [go.Scatter(x=frequencies * HZ_TO_GHZ, y=fidelities, name="Fidelity")] - ) - - fig.update_layout( - showlegend=True, - xaxis_title="TWPA Frequency [GHz]", - yaxis_title="Assignment Fidelity", - ) - - figures.append(fig) - - return figures, fitting_report - - -def _update(results: TwpaFrequencyResults, platform: Platform, target: QubitId): - update.twpa_frequency(results.best_freqs[target], platform, target) - update.iq_angle(results.best_angles[target], platform, target) - update.threshold(results.best_thresholds[target], platform, target) - - -twpa_frequency = Routine(_acquisition, _fit, _plot, _update) -"""Twpa frequency Routine object.""" diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_SNR.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_SNR.py deleted file mode 100644 index 7b1e1e32b..000000000 --- a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_SNR.py +++ /dev/null @@ -1,265 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from plotly.subplots import make_subplots -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols.resonator_spectroscopy import resonator_spectroscopy -from qibocal.protocols.utils import HZ_TO_GHZ, PowerLevel, table_dict, table_html - - -@dataclass -class ResonatorTWPAFrequencyParameters(Parameters): - """ResonatorTWPAFrequency runcard inputs.""" - - freq_width: int - """Width for frequency sweep relative to the readout frequency (Hz).""" - freq_step: int - """Frequency step for sweep (Hz).""" - twpa_freq_width: int - """Width for TPWA frequency sweep (Hz).""" - twpa_freq_step: int - """TPWA frequency step (Hz).""" - power_level: PowerLevel - """resonator Power regime (low or high).""" - nshots: Optional[int] = None - """Number of shots.""" - relaxation_time: Optional[int] = None - """Relaxation time (ns).""" - - def __post_init__(self): - self.power_level = PowerLevel(self.power_level) - - -@dataclass -class ResonatorTWPAFrequencyResults(Results): - """ResonatorTWPAFrequency outputs.""" - - twpa_frequency: dict[QubitId, float] = field(default_factory=dict) - """TWPA frequency [GHz] for each qubit.""" - frequency: Optional[dict[QubitId, float]] = field(default_factory=dict) - """Readout frequency [GHz] for each qubit.""" - bare_frequency: Optional[dict[QubitId, float]] = field(default_factory=dict) - """Bare frequency [GHz] for each qubit.""" - - -ResonatorTWPAFrequencyType = np.dtype( - [ - ("freq", np.float64), - ("twpa_freq", np.float64), - ("signal", np.float64), - ("phase", np.float64), - ] -) -"""Custom dtype for Resonator TWPA Frequency.""" - - -@dataclass -class ResonatorTWPAFrequencyData(Data): - """ResonatorTWPAFrequency data acquisition.""" - - resonator_type: str - """Resonator type.""" - data: dict[QubitId, npt.NDArray[ResonatorTWPAFrequencyType]] = field( - default_factory=dict - ) - """Raw data acquired.""" - power_level: Optional[PowerLevel] = None - """Power regime of the resonator.""" - - @classmethod - def load(cls, path): - obj = super().load(path) - # Instantiate PowerLevel object - if obj.power_level is not None: # pylint: disable=E1101 - obj.power_level = PowerLevel(obj.power_level) # pylint: disable=E1101 - return obj - - -def _acquisition( - params: ResonatorTWPAFrequencyParameters, - platform: Platform, - targets: list[QubitId], -) -> ResonatorTWPAFrequencyData: - r""" - Data acquisition for TWPA frequency optmization using SNR. - This protocol perform a classification protocol for twpa frequencies - in the range [twpa_frequency - frequency_width / 2, twpa_frequency + frequency_width / 2] - with step frequency_step. - - Args: - params (:class:`ResonatorTWPAFrequencyParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - qubits (dict): dict of target :class:`Qubit` objects to be characterized - - Returns: - data (:class:`ResonatorTWPAFrequencyData`) - """ - - data = ResonatorTWPAFrequencyData( - power_level=params.power_level, - resonator_type=platform.resonator_type, - ) - - TWPAFrequency_range = np.arange( - -params.twpa_freq_width // 2, params.twpa_freq_width // 2, params.twpa_freq_step - ) - - initial_twpa_freq = {} - for qubit in targets: - initial_twpa_freq[qubit] = float( - platform.qubits[qubit].twpa.local_oscillator.frequency - ) - - for _freq in TWPAFrequency_range: - for qubit in targets: - platform.qubits[qubit].twpa.local_oscillator.frequency = ( - initial_twpa_freq[qubit] + _freq - ) - - resonator_spectroscopy_data, _ = resonator_spectroscopy.acquisition( - resonator_spectroscopy.parameters_type.load( - { - "freq_width": params.freq_width, - "freq_step": params.freq_step, - "power_level": params.power_level, - "relaxation_time": params.relaxation_time, - "nshots": params.nshots, - } - ), - platform, - targets, - ) - - for qubit in targets: - data.register_qubit( - ResonatorTWPAFrequencyType, - (qubit), - dict( - signal=resonator_spectroscopy_data[qubit].signal, - phase=resonator_spectroscopy_data[qubit].phase, - freq=resonator_spectroscopy_data[qubit].freq, - twpa_freq=_freq + initial_twpa_freq[qubit], - ), - ) - - return data - - -def _fit(data: ResonatorTWPAFrequencyData) -> ResonatorTWPAFrequencyResults: - """Post-processing function for ResonatorTWPASpectroscopy.""" - qubits = data.qubits - bare_frequency = {} - frequency = {} - twpa_frequency = {} - for qubit in qubits: - data_qubit = data[qubit] - if data.resonator_type == "3D": - index_best_freq = np.argmax(data_qubit["signal"]) - else: - index_best_freq = np.argmin(data_qubit["signal"]) - twpa_frequency[qubit] = data_qubit["twpa_freq"][index_best_freq] - - if data.power_level is PowerLevel.high: - bare_frequency[qubit] = data_qubit["freq"][index_best_freq] - else: - frequency[qubit] = data_qubit["freq"][index_best_freq] - - if data.power_level is PowerLevel.high: - return ResonatorTWPAFrequencyResults( - twpa_frequency=twpa_frequency, - bare_frequency=bare_frequency, - ) - else: - return ResonatorTWPAFrequencyResults( - twpa_frequency=twpa_frequency, - frequency=frequency, - ) - - -def _plot(data: ResonatorTWPAFrequencyData, fit: ResonatorTWPAFrequencyResults, target): - """Plotting for ResonatorTWPAFrequency.""" - - figures = [] - fitting_report = "" - fig = make_subplots( - rows=1, - cols=2, - horizontal_spacing=0.1, - vertical_spacing=0.2, - subplot_titles=( - "Signal [a.u.]", - "Phase [rad]", - ), - ) - - fitting_report = "" - qubit_data = data[target] - resonator_frequencies = qubit_data.freq * HZ_TO_GHZ - twpa_frequencies = qubit_data.twpa_freq * HZ_TO_GHZ - - fig.add_trace( - go.Heatmap( - x=resonator_frequencies, - y=twpa_frequencies, - z=qubit_data.signal, - colorbar_x=0.46, - ), - row=1, - col=1, - ) - fig.update_xaxes(title_text="Frequency [GHz]", row=1, col=1) - fig.update_yaxes(title_text="TWPA Frequency [GHz]", row=1, col=1) - fig.add_trace( - go.Heatmap( - x=resonator_frequencies, - y=twpa_frequencies, - z=qubit_data.phase, - colorbar_x=1.01, - ), - row=1, - col=2, - ) - fig.update_xaxes(title_text="Frequency [GHz]", row=1, col=2) - fig.update_yaxes(title_text="TWPA Frequency [GHz]", row=1, col=2) - - if fit is not None: - label_1 = "TWPA Frequency [Hz]" - twpa_frequency = np.round(fit.twpa_frequency[target]) - if target in fit.bare_frequency: - label_2 = "High Power Resonator Frequency [Hz]" - resonator_frequency = np.round(fit.bare_frequency[target]) - else: - label_2 = "Low Power Resonator Frequency [Hz]" - resonator_frequency = np.round(fit.frequency[target]) - - summary = table_dict( - target, - [ - label_2, - label_1, - ], - [ - resonator_frequency, - twpa_frequency, - ], - ) - - fitting_report = table_html(summary) - - fig.update_layout( - showlegend=False, - ) - - figures.append(fig) - - return figures, fitting_report - - -twpa_frequency_snr = Routine(_acquisition, _fit, _plot) -"""Resonator TWPA Frequency Routine object.""" diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_power.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_power.py deleted file mode 100644 index 86f21a4e0..000000000 --- a/src/qibocal/protocols/readout_optimization/twpa_calibration/frequency_power.py +++ /dev/null @@ -1,226 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols import classification -from qibocal.protocols.utils import HZ_TO_GHZ, table_dict, table_html - - -@dataclass -class TwpaFrequencyPowerParameters(Parameters): - """Twpa Frequency Power runcard inputs.""" - - frequency_width: float - """Frequency total width.""" - frequency_step: float - """Frequency step to be probed.""" - power_width: float - """Power total width.""" - power_step: float - """Power step to be probed.""" - - -TwpaFrequencyPowerType = np.dtype( - [ - ("freq", np.float64), - ("power", np.float64), - ("assignment_fidelity", np.float64), - ("angle", np.float64), - ("threshold", np.float64), - ] -) - - -@dataclass -class TwpaFrequencyPowerData(Data): - """Twpa Frequency Power acquisition outputs.""" - - data: dict[ - tuple[QubitId, float, float], npt.NDArray[classification.ClassificationType] - ] = field(default_factory=dict) - """Raw data acquired.""" - frequencies: dict[QubitId, float] = field(default_factory=dict) - """Frequencies for each qubit.""" - powers: dict[QubitId, float] = field(default_factory=dict) - """Powers for each qubit.""" - - -@dataclass -class TwpaFrequencyPowerResults(Results): - """Twpa Frequency Power outputs.""" - - best_freqs: dict[QubitId, float] = field(default_factory=dict) - best_powers: dict[QubitId, float] = field(default_factory=dict) - best_fidelities: dict[QubitId, float] = field(default_factory=dict) - best_angles: dict[QubitId, float] = field(default_factory=dict) - best_thresholds: dict[QubitId, float] = field(default_factory=dict) - - -def _acquisition( - params: TwpaFrequencyPowerParameters, - platform: Platform, - targets: list[QubitId], -) -> TwpaFrequencyPowerData: - r""" - Data acquisition for TWPA frequency vs. power optmization. - This protocol perform a classification protocol for twpa frequencies - in the range [twpa_frequency - frequency_width / 2, twpa_frequency + frequency_width / 2] - with step frequency_step and powers in the range [twpa_power - power_width / 2, twpa_power + power_width / 2] - - Args: - params (:class:`TwpaFrequencyPowerParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - targets (list): list of qubit to be characterized - - Returns: - data (:class:`TwpaFrequencyPowerData`) - """ - - data = TwpaFrequencyPowerData() - - freq_range = np.arange( - -params.frequency_width / 2, params.frequency_width / 2, params.frequency_step - ).astype(int) - power_range = np.arange( - -params.power_width / 2, params.power_width / 2, params.power_step - ) - data = TwpaFrequencyPowerData() - - initial_twpa_freq = {} - initial_twpa_power = {} - for qubit in targets: - initial_twpa_freq[qubit] = platform.qubits[ - qubit - ].twpa.local_oscillator.frequency - initial_twpa_power[qubit] = platform.qubits[qubit].twpa.local_oscillator.power - - for freq in freq_range: - platform.qubits[qubit].twpa.local_oscillator.frequency = ( - initial_twpa_freq[qubit] + freq - ) - - for power in power_range: - platform.qubits[qubit].twpa.local_oscillator.power = ( - initial_twpa_power[qubit] + power - ) - - classification_data = classification._acquisition( - classification.SingleShotClassificationParameters.load( - {"nshots": params.nshots} - ), - platform, - targets, - ) - - classification_result = classification._fit(classification_data) - - data.register_qubit( - TwpaFrequencyPowerType, - (qubit), - dict( - freq=np.array( - [platform.qubits[qubit].twpa.local_oscillator.frequency], - dtype=np.float64, - ), - power=np.array( - [platform.qubits[qubit].twpa.local_oscillator.power], - dtype=np.float64, - ), - assignment_fidelity=np.array( - [classification_result.assignment_fidelity[qubit]], - ), - angle=np.array([classification_result.rotation_angle[qubit]]), - threshold=np.array([classification_result.threshold[qubit]]), - ), - ) - return data - - -def _fit(data: TwpaFrequencyPowerData) -> TwpaFrequencyPowerResults: - """Extract fidelity for each configuration qubit / param. - Where param can be either frequency or power.""" - - best_freq = {} - best_power = {} - best_fidelity = {} - best_angle = {} - best_threshold = {} - qubits = data.qubits - - for qubit in qubits: - data_qubit = data[qubit] - index_best_err = np.argmax(data_qubit["assignment_fidelity"]) - best_fidelity[qubit] = data_qubit["assignment_fidelity"][index_best_err] - best_freq[qubit] = data_qubit["freq"][index_best_err] - best_power[qubit] = data_qubit["power"][index_best_err] - best_angle[qubit] = data_qubit["angle"][index_best_err] - best_threshold[qubit] = data_qubit["threshold"][index_best_err] - - return TwpaFrequencyPowerResults( - best_freq, - best_power, - best_fidelity, - best_angles=best_angle, - best_thresholds=best_threshold, - ) - - -def _plot( - data: TwpaFrequencyPowerData, fit: TwpaFrequencyPowerResults, target: QubitId -): - """Plotting function that shows the assignment fidelity - for different values of the twpa frequency for a single qubit""" - - figures = [] - fitting_report = "" - if fit is not None: - qubit_data = data.data[target] - fidelities = qubit_data["assignment_fidelity"] - frequencies = qubit_data["freq"] - powers = qubit_data["power"] - fitting_report = table_html( - table_dict( - target, - ["Best assignment fidelity", "TWPA Frequency [Hz]", "TWPA Power [dBm]"], - [ - np.round(fit.best_fidelities[target], 3), - fit.best_freqs[target], - np.round(fit.best_powers[target], 3), - ], - ) - ) - - fig = go.Figure( - [ - go.Heatmap( - x=frequencies * HZ_TO_GHZ, y=powers, z=fidelities, name="Fidelity" - ) - ] - ) - - fig.update_layout( - showlegend=True, - xaxis_title="TWPA Frequency [GHz]", - yaxis_title="TWPA Power [dBm]", - ) - - figures.append(fig) - - return figures, fitting_report - - -def _update(results: TwpaFrequencyPowerResults, platform: Platform, target: QubitId): - update.twpa_frequency(results.best_freqs[target], platform, target) - update.twpa_power(results.best_powers[target], platform, target) - update.iq_angle(results.best_angles[target], platform, target) - update.threshold(results.best_thresholds[target], platform, target) - - -twpa_frequency_power = Routine(_acquisition, _fit, _plot, _update) -"""Twpa frequency Routine object.""" diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/power.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/power.py deleted file mode 100644 index 76f0b3c87..000000000 --- a/src/qibocal/protocols/readout_optimization/twpa_calibration/power.py +++ /dev/null @@ -1,184 +0,0 @@ -from dataclasses import dataclass, field - -import numpy as np -import plotly.graph_objects as go -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal import update -from qibocal.auto.operation import Parameters, Results, Routine -from qibocal.protocols import classification -from qibocal.protocols.utils import table_dict, table_html - -from . import frequency - - -@dataclass -class TwpaPowerParameters(Parameters): - """TwpaPower runcard inputs.""" - - power_width: float - """Power total width.""" - power_step: float - """Power step to be probed.""" - - -TwpaPowerType = np.dtype( - [ - ("power", np.float64), - ("assignment_fidelity", np.float64), - ("angle", np.float64), - ("threshold", np.float64), - ] -) - - -@dataclass -class TwpaPowerData(frequency.TwpaFrequencyData): - """Data class for twpa power protocol.""" - - powers: dict[QubitId, float] = field(default_factory=dict) - """Frequencies for each qubit.""" - - -@dataclass -class TwpaPowerResults(Results): - """Result class for twpa power protocol.""" - - best_powers: dict[QubitId, float] = field(default_factory=dict) - best_fidelities: dict[QubitId, float] = field(default_factory=dict) - best_angles: dict[QubitId, float] = field(default_factory=dict) - best_thresholds: dict[QubitId, float] = field(default_factory=dict) - - -def _acquisition( - params: TwpaPowerParameters, - platform: Platform, - targets: list[QubitId], -) -> TwpaPowerData: - r""" - Data acquisition for TWPA power optmization. - This protocol perform a classification protocol for twpa powers - in the range [twpa_power - power_width / 2, twpa_power + power_width / 2] - with step power_step. - - Args: - params (:class:`TwpaPowerParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - targets (list): list of QubitId to be characterized - - Returns: - data (:class:`TwpaFrequencyData`) - """ - - data = TwpaPowerData() - - power_range = np.arange( - -params.power_width / 2, params.power_width / 2, params.power_step - ) - - initial_twpa_power = {} - for qubit in targets: - initial_twpa_power[qubit] = platform.qubits[qubit].twpa.local_oscillator.power - data.powers[qubit] = list( - platform.qubits[qubit].twpa.local_oscillator.power + power_range - ) - - for power in power_range: - for qubit in targets: - platform.qubits[qubit].twpa.local_oscillator.power = ( - initial_twpa_power[qubit] + power - ) - - classification_data = classification._acquisition( - classification.SingleShotClassificationParameters.load( - {"nshots": params.nshots} - ), - platform, - targets, - ) - classification_result = classification._fit(classification_data) - - for qubit in targets: - data.register_qubit( - TwpaPowerType, - (qubit), - dict( - power=np.array( - [float(platform.qubits[qubit].twpa.local_oscillator.power)] - ), - assignment_fidelity=np.array( - [classification_result.assignment_fidelity[qubit]] - ), - angle=np.array([classification_result.rotation_angle[qubit]]), - threshold=np.array([classification_result.threshold[qubit]]), - ), - ) - return data - - -def _fit(data: TwpaPowerData) -> TwpaPowerResults: - """Extract fidelity for each configuration qubit / param. - Where param can be either frequency or power.""" - qubits = data.qubits - best_power = {} - best_fidelity = {} - best_angle = {} - best_threshold = {} - for qubit in qubits: - data_qubit = data[qubit] - index_best_err = np.argmax(data_qubit["assignment_fidelity"]) - best_fidelity[qubit] = data_qubit["assignment_fidelity"][index_best_err] - best_power[qubit] = data_qubit["power"][index_best_err] - best_angle[qubit] = data_qubit["angle"][index_best_err] - best_threshold[qubit] = data_qubit["threshold"][index_best_err] - - return TwpaPowerResults( - best_power, - best_fidelity, - best_angles=best_angle, - best_thresholds=best_threshold, - ) - - -def _plot(data: TwpaPowerData, fit: TwpaPowerResults, target: QubitId): - """Plotting function that shows the assignment fidelity - for different values of the twpa power for a single qubit.""" - - figures = [] - fitting_report = "" - - if fit is not None: - qubit_data = data.data[target] - fidelities = qubit_data["assignment_fidelity"] - powers = qubit_data["power"] - fitting_report = table_html( - table_dict( - target, - ["Best assignment fidelity", "TWPA Power [dBm]"], - [ - np.round(fit.best_fidelities[target], 3), - np.round(fit.best_powers[target], 3), - ], - ) - ) - fig = go.Figure([go.Scatter(x=powers, y=fidelities, name="Fidelity")]) - figures.append(fig) - - fig.update_layout( - showlegend=True, - xaxis_title="TWPA Power [dB]", - yaxis_title="Assignment Fidelity", - ) - - return figures, fitting_report - - -def _update(results: TwpaPowerResults, platform: Platform, target: QubitId): - update.twpa_power(results.best_powers[target], platform, target) - update.iq_angle(results.best_angles[target], platform, target) - update.threshold(results.best_thresholds[target], platform, target) - - -twpa_power = Routine(_acquisition, _fit, _plot, _update) -"""Twpa power Routine object.""" diff --git a/src/qibocal/protocols/readout_optimization/twpa_calibration/power_SNR.py b/src/qibocal/protocols/readout_optimization/twpa_calibration/power_SNR.py deleted file mode 100644 index 93b22e0e5..000000000 --- a/src/qibocal/protocols/readout_optimization/twpa_calibration/power_SNR.py +++ /dev/null @@ -1,265 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from plotly.subplots import make_subplots -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols.resonator_spectroscopy import resonator_spectroscopy -from qibocal.protocols.utils import HZ_TO_GHZ, PowerLevel, table_dict, table_html - - -@dataclass -class ResonatorTWPAPowerParameters(Parameters): - """ResonatorTWPAPower runcard inputs.""" - - freq_width: int - """Width for frequency sweep relative to the readout frequency (Hz).""" - freq_step: int - """Frequency step for sweep (Hz).""" - twpa_pow_width: int - """Width for TPWA power sweep (dBm).""" - twpa_pow_step: int - """TPWA power step (dBm).""" - power_level: PowerLevel - """resonator Power regime (low or high).""" - nshots: Optional[int] = None - """Number of shots.""" - relaxation_time: Optional[int] = None - """Relaxation time (ns).""" - - def __post_init__(self): - self.power_level = PowerLevel(self.power_level) - - -@dataclass -class ResonatorTWPAPowerResults(Results): - """ResonatorTWPAPower outputs.""" - - twpa_power: dict[QubitId, float] = field(default_factory=dict) - """TWPA frequency [GHz] for each qubit.""" - frequency: Optional[dict[QubitId, float]] = field(default_factory=dict) - """Readout frequency [GHz] for each qubit.""" - bare_frequency: Optional[dict[QubitId, float]] = field(default_factory=dict) - """Bare frequency [GHz] for each qubit.""" - - -ResonatorTWPAPowerType = np.dtype( - [ - ("freq", np.float64), - ("twpa_pow", np.float64), - ("signal", np.float64), - ("phase", np.float64), - ] -) -"""Custom dtype for Resonator TWPA Power.""" - - -@dataclass -class ResonatorTWPAPowerData(Data): - """ResonatorTWPAPower data acquisition.""" - - resonator_type: str - """Resonator type.""" - data: dict[QubitId, npt.NDArray[ResonatorTWPAPowerType]] = field( - default_factory=dict - ) - """Raw data acquired.""" - power_level: Optional[PowerLevel] = None - """Power regime of the resonator.""" - - @classmethod - def load(cls, path): - obj = super().load(path) - # Instantiate PowerLevel object - if obj.power_level is not None: # pylint: disable=E1101 - obj.power_level = PowerLevel(obj.power_level) # pylint: disable=E1101 - return obj - - -def _acquisition( - params: ResonatorTWPAPowerParameters, - platform: Platform, - targets: list[QubitId], -) -> ResonatorTWPAPowerData: - r""" - Data acquisition for TWPA power optmization using SNR. - This protocol perform a classification protocol for twpa powers - in the range [twpa_power - frequency_width / 2, twpa_power + frequency_width / 2] - with step frequency_step. - - Args: - params (:class:`ResonatorTWPAPowerParameters`): input parameters - platform (:class:`Platform`): Qibolab's platform - qubits (dict): dict of target :class:`Qubit` objects to be characterized - - Returns: - data (:class:`ResonatorTWPAPowerData`) - """ - - data = ResonatorTWPAPowerData( - power_level=params.power_level, - resonator_type=platform.resonator_type, - ) - - TWPAPower_range = np.arange( - -params.twpa_pow_width / 2, params.twpa_pow_width / 2, params.twpa_pow_step - ) - - initial_twpa_pow = {} - for qubit in targets: - initial_twpa_pow[qubit] = float( - platform.qubits[qubit].twpa.local_oscillator.power - ) - - for _pow in TWPAPower_range: - for qubit in targets: - platform.qubits[qubit].twpa.local_oscillator.power = ( - initial_twpa_pow[qubit] + _pow - ) - - resonator_spectroscopy_data, _ = resonator_spectroscopy.acquisition( - resonator_spectroscopy.parameters_type.load( - { - "freq_width": params.freq_width, - "freq_step": params.freq_step, - "power_level": params.power_level, - "relaxation_time": params.relaxation_time, - "nshots": params.nshots, - } - ), - platform, - targets, - ) - - for qubit in targets: - data.register_qubit( - ResonatorTWPAPowerType, - (qubit), - dict( - signal=resonator_spectroscopy_data.data[qubit].signal, - phase=resonator_spectroscopy_data.data[qubit].phase, - freq=resonator_spectroscopy_data.data[qubit].freq, - twpa_pow=_pow + initial_twpa_pow[qubit], - ), - ) - - return data - - -def _fit(data: ResonatorTWPAPowerData, fit_type="att") -> ResonatorTWPAPowerResults: - """Post-processing function for ResonatorTWPASpectroscopy.""" - qubits = data.qubits - bare_frequency = {} - frequency = {} - twpa_power = {} - for qubit in qubits: - data_qubit = data[qubit] - if data.resonator_type == "3D": - index_best_pow = np.argmax(data_qubit["signal"]) - else: - index_best_pow = np.argmin(data_qubit["signal"]) - twpa_power[qubit] = data_qubit["twpa_pow"][index_best_pow] - - if data.power_level is PowerLevel.high: - bare_frequency[qubit] = data_qubit["freq"][index_best_pow] - else: - frequency[qubit] = data_qubit["freq"][index_best_pow] - - if data.power_level is PowerLevel.high: - return ResonatorTWPAPowerResults( - twpa_power=twpa_power, - bare_frequency=bare_frequency, - ) - else: - return ResonatorTWPAPowerResults( - twpa_power=twpa_power, - frequency=frequency, - ) - - -def _plot(data: ResonatorTWPAPowerData, fit: ResonatorTWPAPowerResults, target): - """Plotting for ResonatorTWPAPower.""" - - figures = [] - fitting_report = "" - fig = make_subplots( - rows=1, - cols=2, - horizontal_spacing=0.1, - vertical_spacing=0.2, - subplot_titles=( - "Signal [a.u.]", - "Phase [rad]", - ), - ) - - fitting_report = "" - qubit_data = data[target] - frequencies = qubit_data.freq * HZ_TO_GHZ - powers = qubit_data.twpa_pow - - fig.add_trace( - go.Heatmap( - x=frequencies, - y=powers, - z=qubit_data.signal, - colorbar_x=0.46, - ), - row=1, - col=1, - ) - fig.update_xaxes(title_text="Frequency [GHz]", row=1, col=1) - fig.update_yaxes(title_text="TWPA Power [dBm]", row=1, col=1) - fig.add_trace( - go.Heatmap( - x=frequencies, - y=powers, - z=qubit_data.phase, - colorbar_x=1.01, - ), - row=1, - col=2, - ) - fig.update_xaxes(title_text="Frequency [GHz]", row=1, col=2) - fig.update_yaxes(title_text="TWPA Power [dBm]", row=1, col=2) - - if fit is not None: - label_1 = "TWPA Power" - twpa_power = np.round(fit.twpa_power[target]) - if target in fit.bare_frequency: - label_2 = "High Power Resonator Frequency [Hz]" - resonator_frequency = np.round(fit.bare_frequency[target]) - else: - label_2 = "Low Power Resonator Frequency [Hz]" - resonator_frequency = np.round(fit.frequency[target]) - - summary = table_dict( - target, - [ - label_2, - label_1, - ], - [ - resonator_frequency, - twpa_power, - ], - ) - - fitting_report = table_html(summary) - - fig.update_layout( - showlegend=False, - ) - - figures.append(fig) - - return figures, fitting_report - - -twpa_power_snr = Routine(_acquisition, _fit, _plot) -"""Resonator TWPA Power Routine object.""" diff --git a/src/qibocal/protocols/resonator_punchout.py b/src/qibocal/protocols/resonator_punchout.py index 3580ee954..771913a0f 100644 --- a/src/qibocal/protocols/resonator_punchout.py +++ b/src/qibocal/protocols/resonator_punchout.py @@ -5,14 +5,12 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, PulseSequence, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude, phase from .utils import HZ_TO_GHZ, fit_punchout, norm, table_dict, table_html @@ -25,11 +23,11 @@ class ResonatorPunchoutParameters(Parameters): """Width for frequency sweep relative to the readout frequency [Hz].""" freq_step: int """Frequency step for sweep [Hz].""" - min_amp_factor: float + min_amp: float """Minimum amplitude multiplicative factor.""" - max_amp_factor: float + max_amp: float """Maximum amplitude multiplicative factor.""" - step_amp_factor: float + step_amp: float """Step amplitude multiplicative factor.""" amplitude: float = None """Initial readout amplitude.""" @@ -66,7 +64,7 @@ class ResonatorPunchoutData(Data): resonator_type: str """Resonator type.""" - amplitudes: dict[QubitId, float] + amplitudes: dict[QubitId, float] = field(default_factory=dict) """Amplitudes provided by the user.""" data: dict[QubitId, npt.NDArray[ResPunchoutType]] = field(default_factory=dict) """Raw data acquired.""" @@ -85,47 +83,44 @@ def register_qubit(self, qubit, freq, amp, signal, phase): def _acquisition( params: ResonatorPunchoutParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> ResonatorPunchoutData: """Data acquisition for Punchout over amplitude.""" # create a sequence of pulses for the experiment: # MZ - # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel - sequence = PulseSequence() - - ro_pulses = {} - amplitudes = {} - for qubit in targets: - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) - if params.amplitude is not None: - ro_pulses[qubit].amplitude = params.amplitude - - amplitudes[qubit] = ro_pulses[qubit].amplitude - sequence.add(ro_pulses[qubit]) - # define the parameters to sweep and their range: # resonator frequency delta_frequency_range = np.arange( -params.freq_width / 2, params.freq_width / 2, params.freq_step ) - freq_sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) - # amplitude - amplitude_range = np.arange( - params.min_amp_factor, params.max_amp_factor, params.step_amp_factor - ) + # taking advantage of multiplexing, apply the same set of gates to all qubits in parallel + ro_pulses = {} + amplitudes = {} + freq_sweepers = {} + sequence = PulseSequence() + for qubit in targets: + natives = platform.natives.single_qubit[qubit] + ro_channel, ro_pulse = natives.MZ()[0] + + ro_pulses[qubit] = ro_pulse + amplitudes[qubit] = ro_pulse.probe.amplitude + sequence.append((ro_channel, ro_pulse)) + + probe = platform.qubits[qubit].probe + f0 = platform.config(probe).frequency + freq_sweepers[qubit] = Sweeper( + parameter=Parameter.frequency, + values=f0 + delta_frequency_range, + channels=[probe], + ) + amp_sweeper = Sweeper( - Parameter.amplitude, - amplitude_range, - [ro_pulses[qubit] for qubit in targets], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.min_amp, params.max_amp, params.step_amp), + pulses=[ro_pulses[qubit] for qubit in targets], ) data = ResonatorPunchoutData( @@ -133,16 +128,13 @@ def _acquisition( resonator_type=platform.resonator_type, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - amp_sweeper, - freq_sweeper, + results = platform.execute( + [sequence], + [[amp_sweeper], [freq_sweepers[q] for q in targets]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) # retrieve the results for every qubit @@ -159,7 +151,7 @@ def _acquisition( signal=result.magnitude, phase=phase, freq=delta_frequency_range + ro_pulse.frequency, - amp=amplitude_range * amplitudes[qubit], + amp=amp_sweeper.values, ) return data @@ -267,9 +259,14 @@ def _plot( return figures, fitting_report -def _update(results: ResonatorPunchoutResults, platform: Platform, target: QubitId): +def _update( + results: ResonatorPunchoutResults, platform: CalibrationPlatform, target: QubitId +): update.readout_frequency(results.readout_frequency[target], platform, target) update.bare_resonator_frequency(results.bare_frequency[target], platform, target) + update.dressed_resonator_frequency( + results.readout_frequency[target], platform, target + ) update.readout_amplitude(results.readout_amplitude[target], platform, target) diff --git a/src/qibocal/protocols/resonator_spectroscopy.py b/src/qibocal/protocols/resonator_spectroscopy.py index 56124ac45..752d2c8aa 100644 --- a/src/qibocal/protocols/resonator_spectroscopy.py +++ b/src/qibocal/protocols/resonator_spectroscopy.py @@ -5,13 +5,13 @@ import numpy as np import numpy.typing as npt from _collections_abc import Callable -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, PulseSequence, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude, phase +from qibocal.update import replace from .utils import ( PowerLevel, @@ -95,9 +95,6 @@ class ResonatorSpectroscopyParameters(Parameters): amplitude: Optional[float] = None """Readout amplitude (optional). If defined, same amplitude will be used in all qubits. Otherwise the default amplitude defined on the platform runcard will be used""" - attenuation: Optional[int] = None - """Readout attenuation (optional). If defined, same attenuation will be used in all qubits. - Otherwise the default attenuation defined on the platform runcard will be used""" hardware_average: bool = True """By default hardware average will be performed.""" phase_delay: float = None @@ -131,10 +128,6 @@ class ResonatorSpectroscopyResults(Results): default_factory=dict, ) """Readout amplitude for each qubit.""" - attenuation: Optional[dict[QubitId, int]] = field( - default_factory=dict, - ) - """Readout attenuation [dB] for each qubit.""" def __contains__(self, key: QubitId): return all( @@ -162,8 +155,6 @@ class ResonatorSpectroscopyData(Data): """Raw data acquired.""" power_level: Optional[PowerLevel] = None """Power regime of the resonator.""" - attenuations: Optional[dict[QubitId, int]] = field(default_factory=dict) - """Readout attenuation [dB] for each qubit""" @classmethod def load(cls, path): @@ -175,7 +166,9 @@ def load(cls, path): def _acquisition( - params: ResonatorSpectroscopyParameters, platform: Platform, targets: list[QubitId] + params: ResonatorSpectroscopyParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> ResonatorSpectroscopyData: """Data acquisition for resonator spectroscopy.""" # create a sequence of pulses for the experiment: @@ -185,73 +178,67 @@ def _acquisition( sequence = PulseSequence() ro_pulses = {} amplitudes = {} - attenuations = {} - for qubit in targets: - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) + for q in targets: + natives = platform.natives.single_qubit[q] + channel, pulse = natives.MZ()[0] if params.amplitude is not None: - ro_pulses[qubit].amplitude = params.amplitude - - amplitudes[qubit] = ro_pulses[qubit].amplitude + probe = replace(pulse.probe, amplitude=params.amplitude) + pulse = replace(pulse, probe=probe) - if params.attenuation is not None: - platform.qubits[qubit].readout.attenuation = params.attenuation + amplitudes[q] = pulse.probe.amplitude - try: - attenuation = platform.qubits[qubit].readout.attenuation - except AttributeError: - attenuation = None - - attenuations[qubit] = attenuation - sequence.add(ro_pulses[qubit]) + ro_pulses[q] = pulse + sequence.append((channel, pulse)) # define the parameter to sweep and its range: delta_frequency_range = np.arange( -params.freq_width / 2, params.freq_width / 2, params.freq_step ) - sweeper = Sweeper( - Parameter.frequency, - delta_frequency_range, - pulses=[ro_pulses[qubit] for qubit in targets], - type=SweeperType.OFFSET, - ) + sweepers = [ + Sweeper( + parameter=Parameter.frequency, + values=platform.config(platform.qubits[q].probe).frequency + + delta_frequency_range, + channels=[platform.qubits[q].probe], + ) + for q in targets + ] + data = ResonatorSpectroscopyData( resonator_type=platform.resonator_type, power_level=params.power_level, amplitudes=amplitudes, - attenuations=attenuations, fit_function=params.fit_function, phase_sign=params.phase_sign, ) - results = platform.sweep( - sequence, - params.execution_parameters, - sweeper, + results = platform.execute( + [sequence], + [sweepers], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.SINGLESHOT, ) # retrieve the results for every qubit - for qubit in targets: - result = results[ro_pulses[qubit].serial] + for q in targets: + result = results[ro_pulses[q].id] # store the results - frequency =delta_frequency_range + ro_pulses[qubit].frequency - - if params.phase_delay is not None: - phase = result.average.phase - phase = np.unwrap(phase)-(frequency-frequency[0])*1e-6*params.phase_delay - else: - phase = result.average.phase - + ro_frequency = platform.config(platform.qubits[q].probe).frequency + signal = magnitude(result) + phase_ = phase(result) data.register_qubit( ResSpecType, - (qubit), + (q), dict( - signal=result.average.magnitude, - phase=phase, - freq=delta_frequency_range + ro_pulses[qubit].frequency, - error_signal=result.average.std, - error_phase=result.phase_std, + signal=signal.mean(axis=0), + phase=phase_.mean(axis=0), + freq=delta_frequency_range + ro_frequency, + error_signal=np.std(signal, axis=0, ddof=1) / np.sqrt(signal.shape[0]), + error_phase=np.std(phase_, axis=0, ddof=1) / np.sqrt(phase_.shape[0]), ), ) return data @@ -313,7 +300,6 @@ def _fit( error_fit_pars=error_fit_pars, chi2_reduced=chi2, amplitude=data.amplitudes, - attenuation=data.attenuations, ) return ResonatorSpectroscopyResults( frequency=frequency, @@ -321,7 +307,6 @@ def _fit( error_fit_pars=error_fit_pars, chi2_reduced=chi2, amplitude=data.amplitudes, - attenuation=data.attenuations, ) @@ -332,15 +317,14 @@ def _plot( return FITS[data.fit_function].plot(data, target, fit) -def _update(results: ResonatorSpectroscopyResults, platform: Platform, target: QubitId): +def _update( + results: ResonatorSpectroscopyResults, + platform: CalibrationPlatform, + target: QubitId, +): update.readout_frequency(results.frequency[target], platform, target) - - # if this condition is satifisfied means that we are in the low power regime - # therefore we update also the readout amplitude if len(results.bare_frequency) == 0: update.readout_amplitude(results.amplitude[target], platform, target) - if results.attenuation[target] is not None: - update.readout_attenuation(results.attenuation[target], platform, target) else: update.bare_resonator_frequency( results.bare_frequency[target], platform, target diff --git a/src/qibocal/protocols/signal_experiments/calibrate_state_discrimination.py b/src/qibocal/protocols/signal_experiments/calibrate_state_discrimination.py index 89c02242d..28dba9c42 100644 --- a/src/qibocal/protocols/signal_experiments/calibrate_state_discrimination.py +++ b/src/qibocal/protocols/signal_experiments/calibrate_state_discrimination.py @@ -5,13 +5,11 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, PulseSequence from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform SAMPLES_FACTOR = 16 @@ -25,6 +23,7 @@ class CalibrateStateDiscriminationParameters(Parameters): """Number of shots.""" relaxation_time: Optional[int] = None """Relaxation time (ns).""" + unrolling: Optional[bool] = False CalibrateStateDiscriminationResType = np.dtype( @@ -49,8 +48,6 @@ class CalibrateStateDiscriminationResults(Results): [ ("i", np.float64), ("q", np.float64), - ("signal", np.float64), - ("phase", np.float64), ] ) """Custom dtype for CalibrateStateDiscrimination.""" @@ -69,7 +66,7 @@ class CalibrateStateDiscriminationData(Data): def _acquisition( params: CalibrateStateDiscriminationParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitId], ) -> CalibrateStateDiscriminationData: r""" @@ -79,67 +76,58 @@ def _acquisition( Args: params (CalibrateStateDiscriminationParameters): experiment's parameters - platform (Platform): Qibolab platform object + platform (CalibrationPlatform): Qibolab platform object qubits (dict): list of target qubits to perform the action """ - # create 2 sequences of pulses for the experiment: - # sequence_0: I - MZ - # sequence_1: RX - MZ - - data = CalibrateStateDiscriminationData(resonator_type=platform.resonator_type) - - # TODO: test if qibolab supports multiplex with raw acquisition - for qubit in targets: - sequence_0 = PulseSequence() - sequence_1 = PulseSequence() - sequence_1.add(platform.create_RX_pulse(qubit, start=0)) - - sequence_0.add( - platform.create_qubit_readout_pulse(qubit, start=sequence_0.finish) - ) - sequence_1.add( - platform.create_qubit_readout_pulse(qubit, start=sequence_1.finish) - ) + native = platform.natives.single_qubit + sequences, all_ro_pulses = [], [] + for state in [0, 1]: + ro_pulses = {} + sequence = PulseSequence() + for q in targets: + ro_sequence = native[q].MZ() + ro_pulses[q] = ro_sequence[0][1].id + sequence += ro_sequence + + if state == 1: + rx_sequence = PulseSequence() + for q in targets: + rx_sequence += native[q].RX() + sequence = rx_sequence | sequence + + sequences.append(sequence) + all_ro_pulses.append(ro_pulses) + + options = dict( + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.RAW, + averaging_mode=AveragingMode.CYCLIC, + ) - results_0 = platform.execute_pulse_sequence( - sequence_0, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.RAW, - averaging_mode=AveragingMode.CYCLIC, - ), - ) + if params.unrolling: + results = platform.execute(sequences, **options) + else: + results = {} + for sequence in sequences: + results.update(platform.execute([sequence], **options)) - results_1 = platform.execute_pulse_sequence( - sequence_1, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.RAW, - averaging_mode=AveragingMode.CYCLIC, - ), - ) + data = CalibrateStateDiscriminationData(resonator_type=platform.resonator_type) - for i, experiment in enumerate( - zip([sequence_0, sequence_1], [results_0, results_1]) - ): - sequence, results = experiment - result = results[sequence.ro_pulses[0].serial] - # store the results + for state, ro_pulses in zip([0, 1], all_ro_pulses): + for qubit in targets: + serial = ro_pulses[qubit] + result = results[serial] data.register_qubit( CalibrateStateDiscriminationType, - (qubit, i), + (qubit, state), dict( - signal=result.magnitude, - phase=result.phase, - i=result.voltage_i, - q=result.voltage_q, + i=result[..., 0], + q=result[..., 1], ), ) - return data @@ -200,21 +188,48 @@ def _plot( fig.add_trace( go.Scatter( - x=np.arange(len(fit.data[target])), - y=np.abs(fit.data[target]), + x=data[target, 0].i, + y=data[target, 0].q, opacity=1, - name="kernel state 0", + name="State 0", showlegend=True, - legendgroup="kernel state 0", + legendgroup="State 0", ), row=1, col=1, ) + fig.add_trace( + go.Scatter( + x=data[target, 1].i, + y=data[target, 1].q, + opacity=1, + name="State 1", + showlegend=True, + legendgroup="State 1", + ), + row=1, + col=1, + ) + + # TODO: check which plot we prefer + # fig.add_trace( + # go.Scatter( + # x=np.arange(len(fit.data[target])), + # y=np.abs(fit.data[target]), + # opacity=1, + # name="kernel state 0", + # showlegend=True, + # legendgroup="kernel state 0", + # ), + # row=1, + # col=1, + # ) + fig.update_layout( showlegend=True, - xaxis_title="Kernel samples", - yaxis_title="Kernel absolute value", + xaxis_title="I", + yaxis_title="Q", ) figures.append(fig) @@ -223,7 +238,9 @@ def _plot( def _update( - results: CalibrateStateDiscriminationResults, platform: Platform, qubit: QubitId + results: CalibrateStateDiscriminationResults, + platform: CalibrationPlatform, + qubit: QubitId, ): update.kernel(results.data[qubit], platform, qubit) diff --git a/src/qibocal/protocols/signal_experiments/time_of_flight_readout.py b/src/qibocal/protocols/signal_experiments/time_of_flight_readout.py index 94356618a..351b5c103 100644 --- a/src/qibocal/protocols/signal_experiments/time_of_flight_readout.py +++ b/src/qibocal/protocols/signal_experiments/time_of_flight_readout.py @@ -4,13 +4,13 @@ import numpy as np import numpy.typing as npt import plotly.graph_objects as go -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitId +from qibolab import AcquisitionType, AveragingMode, PulseSequence -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.protocols.utils import S_TO_NS, table_dict, table_html +from qibocal.auto.operation import Data, Parameters, QubitId, Results, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.protocols.utils import table_dict, table_html +from qibocal.result import magnitude +from qibocal.update import replace @dataclass @@ -46,27 +46,29 @@ class TimeOfFlightReadoutData(Data): def _acquisition( - params: TimeOfFlightReadoutParameters, platform: Platform, targets: list[QubitId] + params: TimeOfFlightReadoutParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> TimeOfFlightReadoutData: """Data acquisition for time of flight experiment.""" sequence = PulseSequence() ro_pulses = {} + native = platform.natives.single_qubit for qubit in targets: - ro_pulses[qubit] = platform.create_qubit_readout_pulse(qubit, start=0) + ro_channel, ro_pulse = native[qubit].MZ()[0] if params.readout_amplitude is not None: - ro_pulses[qubit].amplitude = params.readout_amplitude - sequence.add(ro_pulses[qubit]) - - results = platform.execute_pulse_sequence( - sequence, - options=ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.RAW, - averaging_mode=AveragingMode.CYCLIC, - ), + ro_pulse = replace(ro_pulse, amplitude=params.readout_amplitude) + ro_pulses[qubit] = ro_pulse + sequence.append((ro_channel, ro_pulse)) + + results = platform.execute( + [sequence], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.RAW, + averaging_mode=AveragingMode.CYCLIC, ) data = TimeOfFlightReadoutData( @@ -75,7 +77,7 @@ def _acquisition( # retrieve and store the results for every qubit for qubit in targets: - samples = results[ro_pulses[qubit].serial].magnitude + samples = magnitude(results[ro_pulses[qubit].id]) # store the results data.register_qubit(TimeOfFlightReadoutType, (qubit), dict(samples=samples)) return data @@ -141,9 +143,7 @@ def _plot( line_color="grey", ) fitting_report = table_html( - table_dict( - target, "Time of flights [ns]", fit.fitted_parameters[target] * S_TO_NS - ) + table_dict(target, "Time of flights [ns]", fit.fitted_parameters[target]) ) fig.update_layout( showlegend=True, diff --git a/src/qibocal/protocols/state_tomography.py b/src/qibocal/protocols/state_tomography.py index 7724a2c08..ab48d4d69 100644 --- a/src/qibocal/protocols/state_tomography.py +++ b/src/qibocal/protocols/state_tomography.py @@ -10,11 +10,10 @@ from qibo import Circuit, gates from qibo.backends import NumpyBackend, construct_backend, matrices from qibo.quantum_info import fidelity, partial_trace -from qibolab.platform import Platform -from qibolab.qubits import QubitId -from qibocal.auto.operation import DATAFILE, Data, Parameters, Results, Routine +from qibocal.auto.operation import DATAFILE, Data, Parameters, QubitId, Results, Routine from qibocal.auto.transpile import dummy_transpiler, execute_transpiled_circuit +from qibocal.calibration import CalibrationPlatform from .utils import table_dict, table_html @@ -96,7 +95,9 @@ class StateTomographyResults(Results): def _acquisition( - params: StateTomographyParameters, platform: Platform, targets: list[QubitId] + params: StateTomographyParameters, + platform: CalibrationPlatform, + targets: list[QubitId], ) -> StateTomographyData: """Acquisition protocol for single qubit state tomography experiment.""" if params.circuit is None: diff --git a/src/qibocal/protocols/two_qubit_interaction/__init__.py b/src/qibocal/protocols/two_qubit_interaction/__init__.py index 37870c41d..723b4d067 100644 --- a/src/qibocal/protocols/two_qubit_interaction/__init__.py +++ b/src/qibocal/protocols/two_qubit_interaction/__init__.py @@ -14,4 +14,3 @@ from .mermin import mermin from .optimize import optimize_two_qubit_gate from .virtual_z_phases import correct_virtual_z_phases -from .virtual_z_phases_signal import correct_virtual_z_phases_signal diff --git a/src/qibocal/protocols/two_qubit_interaction/chevron/chevron.py b/src/qibocal/protocols/two_qubit_interaction/chevron/chevron.py index 59aa00967..bc305b631 100644 --- a/src/qibocal/protocols/two_qubit_interaction/chevron/chevron.py +++ b/src/qibocal/protocols/two_qubit_interaction/chevron/chevron.py @@ -1,23 +1,21 @@ """SWAP experiment for two qubit gates, chevron plot.""" from dataclasses import dataclass, field -from typing import Optional +from typing import Literal, Optional import numpy as np import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from scipy.optimize import curve_fit -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import Data, Parameters, QubitPairId, Results, Routine +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import table_dict, table_html +from .... import update from ..utils import fit_flux_amplitude, order_pair from .utils import COLORAXIS, chevron_fit, chevron_sequence @@ -26,11 +24,11 @@ class ChevronParameters(Parameters): """CzFluxTime runcard inputs.""" - amplitude_min_factor: float + amplitude_min: float """Amplitude minimum.""" - amplitude_max_factor: float + amplitude_max: float """Amplitude maximum.""" - amplitude_step_factor: float + amplitude_step: float """Amplitude step.""" duration_min: float """Duration minimum.""" @@ -42,28 +40,8 @@ class ChevronParameters(Parameters): """Time delay between flux pulses and readout.""" parking: bool = True """Wether to park non interacting qubits or not.""" - native: str = "CZ" - """Two qubit interaction to be calibrated. - - iSWAP and CZ are the possible options. - - """ - - @property - def amplitude_range(self): - return np.arange( - self.amplitude_min_factor, - self.amplitude_max_factor, - self.amplitude_step_factor, - ) - - @property - def duration_range(self): - return np.arange( - self.duration_min, - self.duration_max, - self.duration_step, - ) + native: Literal["CZ", "iSWAP"] = "CZ" + """Two qubit interaction to be calibrated.""" @dataclass @@ -74,12 +52,8 @@ class ChevronResults(Results): """CZ angle.""" duration: dict[QubitPairId, int] """Virtual Z phase correction.""" - native: str = "CZ" - """Two qubit interaction to be calibrated. - - iSWAP and CZ are the possible options. - - """ + native: Literal["CZ", "iSWAP"] = "CZ" + """Two qubit interaction to be calibrated.""" ChevronType = np.dtype( @@ -105,8 +79,6 @@ class ChevronData(Data): iSWAP and CZ are the possible options. """ - sweetspot: dict[QubitPairId, float] = field(default_factory=dict) - """Sweetspot value for high frequency qubit.""" data: dict[QubitPairId, npt.NDArray[ChevronType]] = field(default_factory=dict) label: Optional[str] = None @@ -140,14 +112,14 @@ def high_frequency(self, pair): def _aquisition( params: ChevronParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> ChevronData: r"""Perform an CZ experiment between pairs of qubits by changing its frequency. Args: - platform: Platform to use. + platform: CalibrationPlatform to use. params: Experiment parameters. targets (list): List of pairs to use sequentially. @@ -159,51 +131,51 @@ def _aquisition( data = ChevronData(native=params.native) for pair in targets: # order the qubits so that the low frequency one is the first - sequence = chevron_sequence( + ordered_pair = order_pair(pair, platform) + sequence, flux_pulse, parking_pulses, delays = chevron_sequence( platform=platform, - pair=pair, + ordered_pair=ordered_pair, duration_max=params.duration_max, parking=params.parking, dt=params.dt, native=params.native, ) - ordered_pair = order_pair(pair, platform) - # TODO: move in function to avoid code duplications sweeper_amplitude = Sweeper( - Parameter.amplitude, - params.amplitude_range, - pulses=[sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0]], - type=SweeperType.FACTOR, - ) - data.native_amplitude[ordered_pair] = ( - sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0].amplitude + parameter=Parameter.amplitude, + range=(params.amplitude_min, params.amplitude_max, params.amplitude_step), + pulses=[flux_pulse], ) - data.sweetspot[ordered_pair] = platform.qubits[ordered_pair[1]].sweetspot sweeper_duration = Sweeper( - Parameter.duration, - params.duration_range, - pulses=[sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0]], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + range=(params.duration_min, params.duration_max, params.duration_step), + pulses=[flux_pulse] + delays + parking_pulses, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_duration, - sweeper_amplitude, + ro_high = list(sequence.channel(platform.qubits[ordered_pair[1]].acquisition))[ + -1 + ] + ro_low = list(sequence.channel(platform.qubits[ordered_pair[0]].acquisition))[ + -1 + ] + + data.native_amplitude[ordered_pair] = flux_pulse.amplitude + + results = platform.execute( + [sequence], + [[sweeper_duration], [sweeper_amplitude]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.CYCLIC, ) + data.register_qubit( ordered_pair[0], ordered_pair[1], - params.duration_range, - params.amplitude_range * data.native_amplitude[ordered_pair], - results[ordered_pair[0]].probability(state=1), - results[ordered_pair[1]].probability(state=1), + sweeper_duration.values, + sweeper_amplitude.values, + results[ro_low.id], + results[ro_high.id], ) return data @@ -323,11 +295,10 @@ def _plot(data: ChevronData, fit: ChevronResults, target: QubitPairId): fitting_report = table_html( table_dict( target[1], - [f"{fit.native} amplitude", f"{fit.native} duration", "Bias point"], + [f"{fit.native} amplitude", f"{fit.native} duration"], [ fit.amplitude[target], fit.duration[target], - fit.amplitude[target] + data.sweetspot[target], ], ) ) @@ -335,12 +306,13 @@ def _plot(data: ChevronData, fit: ChevronResults, target: QubitPairId): return [fig], fitting_report -def _update(results: ChevronResults, platform: Platform, target: QubitPairId): +def _update( + results: ChevronResults, platform: CalibrationPlatform, target: QubitPairId +): if isinstance(target, list): target = tuple(target) - if target not in results.duration: - target = (target[1], target[0]) + target = target[::-1] if target not in results.duration else target getattr(update, f"{results.native}_duration")( results.duration[target], platform, target diff --git a/src/qibocal/protocols/two_qubit_interaction/chevron/chevron_signal.py b/src/qibocal/protocols/two_qubit_interaction/chevron/chevron_signal.py index ccd21bdc1..003231460 100644 --- a/src/qibocal/protocols/two_qubit_interaction/chevron/chevron_signal.py +++ b/src/qibocal/protocols/two_qubit_interaction/chevron/chevron_signal.py @@ -4,12 +4,11 @@ import numpy as np import numpy.typing as npt -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper -from qibocal.auto.operation import Routine +from qibocal.auto.operation import QubitPairId, Routine +from qibocal.calibration import CalibrationPlatform +from qibocal.result import magnitude from ..utils import order_pair from .chevron import ( @@ -74,7 +73,7 @@ def high_frequency(self, pair): def _aquisition( params: ChevronSignalParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> ChevronSignalData: r""" @@ -82,7 +81,7 @@ def _aquisition( Args: params: Experiment parameters. - platform: Platform to use. + platform: CalibrationPlatform to use. targets (list): List of pairs to use sequentially. Returns: @@ -94,50 +93,50 @@ def _aquisition( for pair in targets: # order the qubits so that the low frequency one is the first ordered_pair = order_pair(pair, platform) - sequence = chevron_sequence( + sequence, flux_pulse, parking_pulses, delays = chevron_sequence( platform=platform, - pair=pair, + ordered_pair=ordered_pair, duration_max=params.duration_max, parking=params.parking, dt=params.dt, native=params.native, ) - data.native_amplitude[ordered_pair] = ( - sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0].amplitude - ) - data.sweetspot[ordered_pair] = platform.qubits[ordered_pair[1]].sweetspot - sweeper_amplitude = Sweeper( - Parameter.amplitude, - params.amplitude_range, - pulses=[sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0]], - type=SweeperType.FACTOR, + parameter=Parameter.amplitude, + range=(params.amplitude_min, params.amplitude_max, params.amplitude_step), + pulses=[flux_pulse], ) sweeper_duration = Sweeper( - Parameter.duration, - params.duration_range, - pulses=[sequence.get_qubit_pulses(ordered_pair[1]).qf_pulses[0]], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + range=(params.duration_min, params.duration_max, params.duration_step), + pulses=[flux_pulse] + delays + parking_pulses, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_duration, - sweeper_amplitude, + + ro_high = list(sequence.channel(platform.qubits[ordered_pair[1]].acquisition))[ + -1 + ] + ro_low = list(sequence.channel(platform.qubits[ordered_pair[0]].acquisition))[ + -1 + ] + + data.native_amplitude[ordered_pair] = flux_pulse.amplitude + + results = platform.execute( + [sequence], + [[sweeper_duration], [sweeper_amplitude]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.INTEGRATION, + averaging_mode=AveragingMode.CYCLIC, ) data.register_qubit( ordered_pair[0], ordered_pair[1], - params.duration_range, - params.amplitude_range * data.native_amplitude[ordered_pair], - results[ordered_pair[0]].magnitude, - results[ordered_pair[1]].magnitude, + sweeper_duration.values, + sweeper_amplitude.values, + magnitude(results[ro_low.id]), + magnitude(results[ro_high.id]), ) return data diff --git a/src/qibocal/protocols/two_qubit_interaction/chevron/utils.py b/src/qibocal/protocols/two_qubit_interaction/chevron/utils.py index 40ea08213..973a64a1a 100644 --- a/src/qibocal/protocols/two_qubit_interaction/chevron/utils.py +++ b/src/qibocal/protocols/two_qubit_interaction/chevron/utils.py @@ -1,9 +1,11 @@ +from typing import Optional + import numpy as np -from qibolab.platform import Platform -from qibolab.pulses import PulseSequence -from qibolab.qubits import QubitPairId +from qibolab import Delay, PulseSequence, VirtualZ -from ..utils import order_pair +from qibocal.auto.operation import QubitPairId +from qibocal.calibration import CalibrationPlatform +from qibocal.update import replace COLORAXIS = ["coloraxis2", "coloraxis1"] @@ -14,68 +16,75 @@ def chevron_sequence( - platform: Platform, - pair: QubitPairId, - duration_max: int, + platform: CalibrationPlatform, + ordered_pair: QubitPairId, + duration_max: Optional[int] = None, parking: bool = False, native: str = "CZ", dt: int = 0, ): """Chevron pulse sequence.""" - sequence = PulseSequence() - ordered_pair = order_pair(pair, platform) - # initialize in system in 11 state - + low_natives = platform.natives.single_qubit[ordered_pair[0]] + high_natives = platform.natives.single_qubit[ordered_pair[1]] if native == "CZ": - initialize_lowfreq = platform.create_RX_pulse( - ordered_pair[0], start=0, relative_phase=0 - ) - sequence.add(initialize_lowfreq) - - initialize_highfreq = platform.create_RX_pulse( - ordered_pair[1], start=0, relative_phase=0 - ) - sequence.add(initialize_highfreq) - - flux_sequence, _ = getattr(platform, f"create_{native}_pulse_sequence")( - qubits=(ordered_pair[1], ordered_pair[0]), - start=initialize_highfreq.finish, - ) + sequence += low_natives.RX() + sequence += high_natives.RX() - sequence.add(flux_sequence.get_qubit_pulses(ordered_pair[0])) - sequence.add(flux_sequence.get_qubit_pulses(ordered_pair[1])) + drive_duration = sequence.duration + raw_flux_sequence = getattr(platform.natives.two_qubit[ordered_pair], native)() + flux_channel, flux_pulse = [ + (ch, pulse) + for ch, pulse in raw_flux_sequence + if ch == platform.qubits[ordered_pair[1]].flux + ][0] - delay_measurement = duration_max + if duration_max is not None: + flux_pulse = replace(flux_pulse, duration=duration_max) - if platform.couplers: - coupler_pulse = flux_sequence.coupler_pulses( - platform.pairs[tuple(ordered_pair)].coupler.name - ) - sequence.add(coupler_pulse) - delay_measurement = max(duration_max, coupler_pulse.duration) + sequence.append((flux_channel, Delay(duration=drive_duration))) + sequence.append((flux_channel, flux_pulse)) + parking_pulses = [] if parking: - for pulse in flux_sequence: - if pulse.qubit not in ordered_pair: - pulse.start = COUPLER_PULSE_START - pulse.duration = COUPLER_PULSE_DURATION - sequence.add(pulse) + for ch, pulse in raw_flux_sequence: + if not isinstance(pulse, VirtualZ) and ch != flux_channel: + sequence.append((ch, Delay(duration=drive_duration))) + sequence.append((ch, pulse)) + parking_pulses.append(pulse) - # add readout - measure_lowfreq = platform.create_qubit_readout_pulse( - ordered_pair[0], - start=initialize_highfreq.finish + delay_measurement + dt, - ) - measure_highfreq = platform.create_qubit_readout_pulse( - ordered_pair[1], - start=initialize_highfreq.finish + delay_measurement + dt, + flux_duration = max(flux_pulse.duration, raw_flux_sequence.duration) + + ro_low_channel, ro_high_channel = ( + platform.qubits[ordered_pair[0]].acquisition, + platform.qubits[ordered_pair[1]].acquisition, ) + ro_low_delay = ro_high_delay = drive_delay = Delay(duration=flux_duration) + dt_delay = Delay(duration=dt) + drive_channel, second_rx = high_natives.RX()[0] + sequence += [ + (ro_low_channel, Delay(duration=drive_duration)), + (ro_high_channel, Delay(duration=drive_duration)), + (ro_low_channel, ro_low_delay), + (ro_high_channel, ro_high_delay), + (ro_low_channel, dt_delay), + (ro_high_channel, dt_delay), + (drive_channel, drive_delay), + (drive_channel, dt_delay), + (ro_low_channel, Delay(duration=second_rx.duration)), + (ro_high_channel, Delay(duration=second_rx.duration)), + (drive_channel, second_rx), + ] - sequence.add(measure_lowfreq) - sequence.add(measure_highfreq) + # add readout + sequence += low_natives.MZ() + high_natives.MZ() - return sequence + return ( + sequence, + flux_pulse, + parking_pulses, + [ro_low_delay, ro_high_delay, drive_delay], + ) # fitting function for single row in chevron plot (rabi-like curve) diff --git a/src/qibocal/protocols/two_qubit_interaction/chsh/__init__.py b/src/qibocal/protocols/two_qubit_interaction/chsh/__init__.py deleted file mode 100644 index 61fe5789b..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/chsh/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .protocol import chsh_circuits, chsh_pulses diff --git a/src/qibocal/protocols/two_qubit_interaction/chsh/circuits.py b/src/qibocal/protocols/two_qubit_interaction/chsh/circuits.py deleted file mode 100644 index 898ab8536..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/chsh/circuits.py +++ /dev/null @@ -1,95 +0,0 @@ -"""Auxiliary functions to run CHSH using circuits.""" - -import numpy as np -from qibo import gates -from qibo.models import Circuit - -from .utils import READOUT_BASIS - - -def create_bell_circuit(theta=np.pi / 4, bell_state=0): - """Creates the circuit to generate the bell states and with a theta-measurement - bell_state chooses the initial bell state for the test: - 0 -> |00>+|11> - 1 -> |00>-|11> - 2 -> |10>-|01> - 3 -> |10>+|01> - Native defaults to only using GPI2 and GPI gates. - """ - p = [0, 0] - c = Circuit(2) - c.add(gates.H(0)) - c.add(gates.H(1)) - c.add(gates.CZ(0, 1)) - c.add(gates.H(1)) - if bell_state == 1: - c.add(gates.Z(0)) - elif bell_state == 2: - c.add(gates.Z(0)) - c.add(gates.X(0)) - elif bell_state == 3: - c.add(gates.X(0)) - - c.add(gates.RY(0, theta)) - return c, p - - -def create_bell_circuit_native(theta=np.pi / 4, bell_state=0): - """Creates the circuit to generate the bell states and with a theta-measurement - bell_state chooses the initial bell state for the test: - 0 -> |00>+|11> - 1 -> |00>-|11> - 2 -> |10>-|01> - 3 -> |10>+|01> - Native defaults to only using GPI2 and GPI gates. - """ - - c = Circuit(2) - p = [0, 0] - c.add(gates.GPI2(0, np.pi / 2)) - c.add(gates.GPI2(1, np.pi / 2)) - c.add(gates.CZ(0, 1)) - c.add(gates.GPI2(1, -np.pi / 2)) - if bell_state == 0: - p[0] += np.pi - elif bell_state == 1: - p[0] += 0 - elif bell_state == 2: - p[0] += 0 - c.add(gates.GPI2(0, p[0])) - c.add(gates.GPI2(0, p[0])) - elif bell_state == 3: - p[0] += np.pi - c.add(gates.GPI2(0, p[0])) - c.add(gates.GPI2(0, p[0])) - - c.add(gates.GPI2(0, p[0])) - p[0] += theta - c.add(gates.GPI2(0, p[0] + np.pi)) - - return c, p - - -def create_chsh_circuits( - theta=np.pi / 4, - bell_state=0, - native=True, - readout_basis=READOUT_BASIS, -): - """Creates the circuits needed for the 4 measurement settings for chsh. - Native defaults to only using GPI2 and GPI gates. - rerr adds a readout bitflip error to the simulation. - """ - create_bell = create_bell_circuit_native if native else create_bell_circuit - chsh_circuits = {} - for basis in readout_basis: - c, p = create_bell(theta, bell_state) - for i, base in enumerate(basis): - if base == "X": - if native: - c.add(gates.GPI2(i, p[i] + np.pi / 2)) - else: - c.add(gates.H(i)) - c.add(gates.M(0, 1)) - chsh_circuits[basis] = c - return chsh_circuits diff --git a/src/qibocal/protocols/two_qubit_interaction/chsh/protocol.py b/src/qibocal/protocols/two_qubit_interaction/chsh/protocol.py deleted file mode 100644 index b5beb46c6..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/chsh/protocol.py +++ /dev/null @@ -1,381 +0,0 @@ -"""Protocol for CHSH experiment using both circuits and pulses.""" - -import json -from dataclasses import dataclass, field -from pathlib import Path -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibo.backends import construct_backend -from qibolab import ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId - -from qibocal.auto.operation import Data, Parameters, Results, Routine -from qibocal.auto.transpile import dummy_transpiler, execute_transpiled_circuit -from qibocal.config import log - -from ...readout_mitigation_matrix import ( - ReadoutMitigationMatrixParameters as mitigation_params, -) -from ...readout_mitigation_matrix import _acquisition as mitigation_acquisition -from ...readout_mitigation_matrix import _fit as mitigation_fit -from ...utils import calculate_frequencies -from .circuits import create_chsh_circuits -from .pulses import create_chsh_sequences -from .utils import READOUT_BASIS, compute_chsh - -COMPUTATIONAL_BASIS = ["00", "01", "10", "11"] - -CLASSICAL_BOUND = 2 -"""Classical limit of CHSH,""" -QUANTUM_BOUND = 2 * np.sqrt(2) -"""Quantum limit of CHSH.""" - - -MITIGATION_MATRIX_FILE = "mitigation_matrix" -"""File where readout mitigation matrix is stored.""" - - -@dataclass -class CHSHParameters(Parameters): - """CHSH runcard inputs.""" - - bell_states: list - """List with Bell states to compute CHSH. - The following notation it is used: - 0 -> |00>+|11> - 1 -> |00>-|11> - 2 -> |10>-|01> - 3 -> |10>+|01> - """ - ntheta: int - """Number of angles probed linearly between 0 and 2 pi.""" - native: Optional[bool] = False - """If True a circuit will be created using only GPI2 and CZ gates.""" - apply_error_mitigation: Optional[bool] = False - """Error mitigation model""" - - -@dataclass -class CHSHData(Data): - """CHSH Data structure.""" - - bell_states: list - """Bell states list.""" - thetas: list - """Angles probed.""" - data: dict[QubitId, QubitId, int, tuple, str] = field(default_factory=dict) - """Raw data acquired.""" - mitigation_matrix: dict[tuple[QubitId, ...], npt.NDArray] = field( - default_factory=dict - ) - """Mitigation matrix computed using the readout_mitigation_matrix protocol.""" - - def save(self, path: Path): - """Saving data including mitigation matrix.""" - if self.mitigation_matrix: - np.savez( - path / f"{MITIGATION_MATRIX_FILE}.npz", - **{ - json.dumps((control, target)): self.mitigation_matrix[ - control, target - ] - for control, target, _, _, _ in self.data - }, - ) - super().save(path=path) - - @classmethod - def load(cls, path: Path): - """Custom loading to acco modate mitigation matrix""" - instance = super().load(path=path) - # load readout mitigation matrix - mitigation_matrix = super().load_data( - path=path, filename=MITIGATION_MATRIX_FILE - ) - instance.mitigation_matrix = mitigation_matrix - return instance - - def register_basis(self, pair, bell_state, basis, frequencies): - """Store output for single qubit.""" - - # Add zero is state do not appear in state - # could be removed by using high number of shots - for i in COMPUTATIONAL_BASIS: - if i not in frequencies: - frequencies[i] = 0 - - for state, freq in frequencies.items(): - if (pair[0], pair[1], bell_state, basis, state) in self.data: - self.data[pair[0], pair[1], bell_state, basis, state] = np.concatenate( - ( - self.data[pair[0], pair[1], bell_state, basis, state], - np.array([freq]), - ) - ) - else: - self.data[pair[0], pair[1], bell_state, basis, state] = np.array([freq]) - - def merge_frequencies(self, pair, bell_state): - """Merge frequencies with different measurement basis.""" - freqs = [] - bell_data = { - (index[3], index[4]): value - for index, value in self.data.items() - if index[:3] == (pair[0], pair[1], bell_state) - } - - freqs = [] - for i in READOUT_BASIS: - freqs.append( - {state[1]: value for state, value in bell_data.items() if state[0] == i} - ) - - return freqs - - @property - def params(self): - """Convert non-arrays attributes into dict.""" - data_dict = super().params - data_dict.pop("mitigation_matrix") - - return data_dict - - -@dataclass -class CHSHResults(Results): - """CHSH Results class.""" - - chsh: dict[tuple[QubitPairId, int], float] = field(default_factory=dict) - """Raw CHSH value.""" - chsh_mitigated: dict[tuple[QubitPairId, int], float] = field(default_factory=dict) - """Mitigated CHSH value.""" - - def __contains__(self, key: QubitPairId): - """Check if key is in class. - - While key is a QubitPairId both chsh and chsh_mitigated contain - an additional key which represents the basis chosen. - - """ - - return key in [(target, control) for target, control, _ in self.chsh] - - -def _acquisition_pulses( - params: CHSHParameters, - platform: Platform, - targets: list[list[QubitId]], -) -> CHSHData: - r"""Data acquisition for CHSH protocol using pulse sequences.""" - thetas = np.linspace(0, 2 * np.pi, params.ntheta) - data = CHSHData(bell_states=params.bell_states, thetas=thetas.tolist()) - - if params.apply_error_mitigation: - mitigation_data = mitigation_acquisition( - mitigation_params(nshots=params.nshots), platform, targets - ) - mitigation_results = mitigation_fit(mitigation_data) - - platform.connect() - for pair in targets: - if params.apply_error_mitigation: - try: - data.mitigation_matrix[pair] = ( - mitigation_results.readout_mitigation_matrix[pair] - ) - except KeyError: - log.warning( - f"Skipping error mitigation for qubits {pair} due to error." - ) - - for bell_state in params.bell_states: - for theta in thetas: - chsh_sequences = create_chsh_sequences( - platform=platform, - qubits=pair, - theta=theta, - bell_state=bell_state, - ) - for basis, sequence in chsh_sequences.items(): - results = platform.execute_pulse_sequence( - sequence, - ExecutionParameters( - nshots=params.nshots, relaxation_time=params.relaxation_time - ), - ) - frequencies = calculate_frequencies(results, list(pair)) - data.register_basis(pair, bell_state, basis, frequencies) - return data - - -def _acquisition_circuits( - params: CHSHParameters, - platform: Platform, - targets: list[QubitPairId], -) -> CHSHData: - """Data acquisition for CHSH protocol using circuits.""" - thetas = np.linspace(0, 2 * np.pi, params.ntheta) - data = CHSHData( - bell_states=params.bell_states, - thetas=thetas.tolist(), - ) - backend = construct_backend("qibolab", platform=platform) - transpiler = dummy_transpiler(backend) - if params.apply_error_mitigation: - mitigation_data = mitigation_acquisition( - mitigation_params(nshots=params.nshots), platform, targets - ) - mitigation_results = mitigation_fit(mitigation_data) - for pair in targets: - if params.apply_error_mitigation: - try: - data.mitigation_matrix[pair] = ( - mitigation_results.readout_mitigation_matrix[pair] - ) - except KeyError: - log.warning( - f"Skipping error mitigation for qubits {pair} due to error." - ) - for bell_state in params.bell_states: - for theta in thetas: - chsh_circuits = create_chsh_circuits( - bell_state=bell_state, - theta=theta, - native=params.native, - ) - for basis, circuit in chsh_circuits.items(): - _, result = execute_transpiled_circuit( - circuit, - nshots=params.nshots, - transpiler=transpiler, - backend=backend, - qubit_map=pair, - ) - frequencies = result.frequencies() - data.register_basis(pair, bell_state, basis, frequencies) - - return data - - -def _plot(data: CHSHData, fit: CHSHResults, target: QubitPairId): - """Plotting function for CHSH protocol.""" - figures = [] - - for bell_state in data.bell_states: - fig = go.Figure(layout_yaxis_range=[-3, 3]) - if fit is not None: - fig.add_trace( - go.Scatter( - x=data.thetas, - y=fit.chsh[target[0], target[1], bell_state], - name="Bare", - ) - ) - if fit.chsh_mitigated: - fig.add_trace( - go.Scatter( - x=data.thetas, - y=fit.chsh_mitigated[target[0], target[1], bell_state], - name="Mitigated", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[+CLASSICAL_BOUND] * len(data.thetas), - line_color="gray", - name="Classical limit", - line_dash="dash", - legendgroup="classic", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[-CLASSICAL_BOUND] * len(data.thetas), - line_color="gray", - name="Classical limit", - legendgroup="classic", - line_dash="dash", - showlegend=False, - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[+QUANTUM_BOUND] * len(data.thetas), - line_color="gray", - name="Quantum limit", - legendgroup="quantum", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[-QUANTUM_BOUND] * len(data.thetas), - line_color="gray", - name="Quantum limit", - legendgroup="quantum", - showlegend=False, - ) - ) - - fig.update_layout( - xaxis_title="Theta [rad]", - yaxis_title="CHSH value", - xaxis=dict(range=[min(data.thetas), max(data.thetas)]), - ) - figures.append(fig) - - return figures, "" - - -def _fit(data: CHSHData) -> CHSHResults: - """Fitting for CHSH protocol.""" - results = {} - mitigated_results = {} - for pair in data.pairs: - for bell_state in data.bell_states: - freq = data.merge_frequencies(pair, bell_state) - if data.mitigation_matrix: - matrix = data.mitigation_matrix[pair] - - mitigated_freq_list = [] - for freq_basis in freq: - mitigated_freq = {format(i, f"0{2}b"): [] for i in range(4)} - for i in range(len(data.thetas)): - freq_array = np.zeros(4) - for k, v in freq_basis.items(): - freq_array[int(k, 2)] = v[i] - freq_array = freq_array.reshape(-1, 1) - for j, val in enumerate(matrix @ freq_array): - mitigated_freq[format(j, f"0{2}b")].append(float(val)) - mitigated_freq_list.append(mitigated_freq) - results[pair[0], pair[1], bell_state] = [ - compute_chsh(freq, bell_state, l) for l in range(len(data.thetas)) - ] - - if data.mitigation_matrix: - mitigated_results[pair[0], pair[1], bell_state] = [ - compute_chsh(mitigated_freq_list, bell_state, l) - for l in range(len(data.thetas)) - ] - return CHSHResults(chsh=results, chsh_mitigated=mitigated_results) - - -chsh_circuits = Routine(_acquisition_circuits, _fit, _plot, two_qubit_gates=True) -"""CHSH experiment using circuits.""" -chsh_pulses = Routine(_acquisition_pulses, _fit, _plot, two_qubit_gates=True) -"""CHSH experiment using pulses.""" diff --git a/src/qibocal/protocols/two_qubit_interaction/chsh/pulses.py b/src/qibocal/protocols/two_qubit_interaction/chsh/pulses.py deleted file mode 100644 index 3602d88bd..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/chsh/pulses.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Auxialiary functions to run CHSH using pulses.""" - -from collections import defaultdict - -import numpy as np -from qibolab.pulses import PulseSequence - -from .utils import READOUT_BASIS - - -def create_bell_sequence(platform, qubits, theta=np.pi / 4, bell_state=0): - """Creates the pulse sequence to generate the bell states and with a theta-measurement - bell_state chooses the initial bell state for the test: - 0 -> |00>+|11> - 1 -> |00>-|11> - 2 -> |10>-|01> - 3 -> |10>+|01> - """ - - virtual_z_phases = defaultdict(int) - - sequence = PulseSequence() - sequence.add( - platform.create_RX90_pulse(qubits[0], start=0, relative_phase=np.pi / 2) - ) - sequence.add( - platform.create_RX90_pulse(qubits[1], start=0, relative_phase=np.pi / 2) - ) - - (cz_sequence, cz_virtual_z_phases) = platform.create_CZ_pulse_sequence( - qubits, sequence.finish - ) - sequence.add(cz_sequence) - for qubit in cz_virtual_z_phases: - virtual_z_phases[qubit] += cz_virtual_z_phases[qubit] - - t = sequence.finish - - sequence.add( - platform.create_RX90_pulse( - qubits[1], - start=t, - relative_phase=virtual_z_phases[qubits[1]] - np.pi / 2, - ) - ) - - if bell_state == 0: - virtual_z_phases[qubits[0]] += np.pi - elif bell_state == 1: - virtual_z_phases[qubits[0]] += 0 - elif bell_state == 2: - virtual_z_phases[qubits[0]] += 0 - sequence.add( - platform.create_RX_pulse( - qubits[0], start=t, relative_phase=virtual_z_phases[qubits[0]] - ) - ) - elif bell_state == 3: - virtual_z_phases[qubits[0]] += np.pi - sequence.add( - platform.create_RX_pulse( - qubits[0], start=t, relative_phase=virtual_z_phases[qubits[0]] - ) - ) - - t = sequence.finish - sequence.add( - platform.create_RX90_pulse( - qubits[0], start=t, relative_phase=virtual_z_phases[qubits[0]] - ) - ) - virtual_z_phases[qubits[0]] += theta - sequence.add( - platform.create_RX90_pulse( - qubits[0], - start=sequence.finish, - relative_phase=virtual_z_phases[qubits[0]] + np.pi, - ) - ) - - return sequence, virtual_z_phases - - -def create_chsh_sequences( - platform, qubits, theta=np.pi / 4, bell_state=0, readout_basis=READOUT_BASIS -): - """Creates the pulse sequences needed for the 4 measurement settings for chsh.""" - - chsh_sequences = {} - - for basis in readout_basis: - sequence, virtual_z_phases = create_bell_sequence( - platform, qubits, theta, bell_state - ) - t = sequence.finish - for i, base in enumerate(basis): - if base == "X": - sequence.add( - platform.create_RX90_pulse( - qubits[i], - start=t, - relative_phase=virtual_z_phases[qubits[i]] + np.pi / 2, - ) - ) - measurement_start = sequence.finish - for qubit in qubits: - MZ_pulse = platform.create_MZ_pulse(qubit, start=measurement_start) - sequence.add(MZ_pulse) - chsh_sequences[basis] = sequence - - return chsh_sequences diff --git a/src/qibocal/protocols/two_qubit_interaction/chsh/utils.py b/src/qibocal/protocols/two_qubit_interaction/chsh/utils.py deleted file mode 100644 index 0f38a3589..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/chsh/utils.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Auxiliary functions to run CHSH protocol.""" - -from qibo.config import log - -READOUT_BASIS = ["ZZ", "ZX", "XZ", "XX"] - - -def compute_chsh(frequencies, basis, i): - """Computes the chsh inequality out of the frequencies of the 4 circuits executed.""" - chsh = 0 - aux = 0 - for freq in frequencies: - for outcome in freq: - if aux == 1 + 2 * ( - basis % 2 - ): # This value sets where the minus sign is in the CHSH inequality - chsh -= (-1) ** (int(outcome[0]) + int(outcome[1])) * freq[outcome][i] - else: - chsh += (-1) ** (int(outcome[0]) + int(outcome[1])) * freq[outcome][i] - aux += 1 - nshots = sum(freq[x][i] for x in freq) - try: - return chsh / nshots - except ZeroDivisionError: - log.warning("Zero number of shots, returning zero.") - return 0 diff --git a/src/qibocal/protocols/two_qubit_interaction/mermin/__init__.py b/src/qibocal/protocols/two_qubit_interaction/mermin/__init__.py deleted file mode 100644 index 5a4e9488d..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/mermin/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .protocol import mermin diff --git a/src/qibocal/protocols/two_qubit_interaction/mermin/protocol.py b/src/qibocal/protocols/two_qubit_interaction/mermin/protocol.py deleted file mode 100644 index a4f764d02..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/mermin/protocol.py +++ /dev/null @@ -1,264 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - -import numpy as np -import numpy.typing as npt -import plotly.graph_objects as go -from qibolab import ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId - -from qibocal.auto.operation import Data, Parameters, Results, Routine - -from ...readout_mitigation_matrix import readout_mitigation_matrix -from ...utils import STRING_TYPE, calculate_frequencies -from .pulses import create_mermin_sequences -from .utils import ( - compute_mermin, - get_mermin_coefficients, - get_mermin_polynomial, - get_readout_basis, -) - -PLOT_PADDING = 0.2 - - -@dataclass -class MerminParameters(Parameters): - """Mermin experiment input parameters.""" - - ntheta: int - """Number of angles probed linearly between 0 and 2 pi.""" - native: Optional[bool] = False - """If True a circuit will be created using only GPI2 and CZ gates.""" - apply_error_mitigation: Optional[bool] = False - """Error mitigation model""" - - -MerminType = np.dtype( - [ - ("theta", float), - ("basis", STRING_TYPE), - ("state", int), - ("frequency", int), - ] -) - - -@dataclass -class MerminData(Data): - """Mermin Data structure.""" - - thetas: list - """Angles probed.""" - data: dict[list[QubitId], npt.NDArray[MerminType]] = field(default_factory=dict) - """Raw data acquired.""" - mitigation_matrix: dict[list[QubitId], npt.NDArray[np.float64]] = field( - default_factory=dict - ) - """Mitigation matrix computed using the readout_mitigation_matrix protocol.""" - - @property - def targets(self): - return list(self.data) - - -@dataclass -class MerminResults(Results): - """Mermin Results class.""" - - mermin: dict[tuple[QubitId, ...], npt.NDArray[np.float64]] = field( - default_factory=dict - ) - """Raw Mermin value.""" - - mermin_mitigated: dict[tuple[QubitId, ...], npt.NDArray[np.float64]] = field( - default_factory=dict - ) - """Mitigated Mermin value.""" - - -def _acquisition( - params: MerminParameters, - platform: Platform, - targets: list[list[QubitId]], -) -> MerminData: - """Data acquisition for Mermin protocol using pulse sequences.""" - - thetas = np.linspace(0, 2 * np.pi, params.ntheta) - data = MerminData(thetas=thetas.tolist()) - if params.apply_error_mitigation: - mitigation_data, _ = readout_mitigation_matrix.acquisition( - readout_mitigation_matrix.parameters_type.load(dict(nshots=params.nshots)), - platform, - targets, - ) - - mitigation_results, _ = readout_mitigation_matrix.fit(mitigation_data) - data.mitigation_matrix = mitigation_results.readout_mitigation_matrix - platform.connect() - for qubits in targets: - mermin_polynomial = get_mermin_polynomial(len(qubits)) - readout_basis = get_readout_basis(mermin_polynomial) - - for theta in thetas: - mermin_sequences = create_mermin_sequences( - platform, qubits, readout_basis=readout_basis, theta=theta - ) - options = ExecutionParameters(nshots=params.nshots) - # TODO: use unrolling - for basis, sequence in mermin_sequences.items(): - results = platform.execute_pulse_sequence(sequence, options=options) - frequencies = calculate_frequencies(results, qubits) - for state, frequency in enumerate(frequencies.values()): - data.register_qubit( - MerminType, - tuple(qubits), - dict( - theta=np.array([theta]), - basis=np.array([basis]), - state=np.array([state]), - frequency=np.array([frequency]), - ), - ) - return data - - -def _fit(data: MerminData) -> MerminResults: - """Fitting for Mermin protocol.""" - targets = data.targets - results = {qubits: [] for qubits in targets} - mitigated_results = {qubits: [] for qubits in targets} - basis = np.unique(data.data[targets[0]].basis) - for qubits in targets: - mermin_polynomial = get_mermin_polynomial(len(qubits)) - mermin_coefficients = get_mermin_coefficients(mermin_polynomial) - - for theta in data.thetas: - qubit_data = data.data[qubits] - outputs = [] - mitigated_outputs = [] - for base in basis: - frequencies = np.zeros(2 ** len(qubits)) - data_filter = (qubit_data.basis == base) & (qubit_data.theta == theta) - filtered_data = qubit_data[data_filter] - state_freq = qubit_data[data_filter].frequency - for state, freq in zip(filtered_data.state, filtered_data.frequency): - frequencies[state] = freq - - outputs.append( - { - format(i, f"0{len(qubits)}b"): freq - for i, freq in enumerate(state_freq) - } - ) - - if data.mitigation_matrix: - mitigated_output = np.dot( - data.mitigation_matrix[qubits], - frequencies, - ) - mitigated_outputs.append( - { - format(i, f"0{len(qubits)}b"): freq - for i, freq in enumerate(mitigated_output) - } - ) - if data.mitigation_matrix: - mitigated_results[tuple(qubits)].append( - compute_mermin(mitigated_outputs, mermin_coefficients) - ) - results[tuple(qubits)].append(compute_mermin(outputs, mermin_coefficients)) - return MerminResults( - mermin=results, - mermin_mitigated=mitigated_results, - ) - - -def _plot(data: MerminData, fit: MerminResults, target): - """Plotting function for Mermin protocol.""" - figures = [] - - n_qubits = len(target) - classical_bound = 2 ** (n_qubits // 2) - quantum_bound = 2 ** ((n_qubits - 1) / 2) * (2 ** (n_qubits // 2)) - - fig = go.Figure( - layout_yaxis_range=[-quantum_bound - PLOT_PADDING, quantum_bound + PLOT_PADDING] - ) - if fit is not None: - fig.add_trace( - go.Scatter( - x=data.thetas, - y=fit.mermin[tuple(target)], - name="Bare", - ) - ) - if fit.mermin_mitigated: - fig.add_trace( - go.Scatter( - x=data.thetas, - y=fit.mermin_mitigated[tuple(target)], - name="Mitigated", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[+classical_bound] * len(data.thetas), - line_color="gray", - name="Classical limit", - line_dash="dash", - legendgroup="classic", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[-classical_bound] * len(data.thetas), - line_color="gray", - name="Classical limit", - legendgroup="classic", - line_dash="dash", - showlegend=False, - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[+quantum_bound] * len(data.thetas), - line_color="gray", - name="Quantum limit", - legendgroup="quantum", - ) - ) - - fig.add_trace( - go.Scatter( - mode="lines", - x=data.thetas, - y=[-quantum_bound] * len(data.thetas), - line_color="gray", - name="Quantum limit", - legendgroup="quantum", - showlegend=False, - ) - ) - - fig.update_layout( - xaxis_title="Theta [rad]", - yaxis_title="Mermin polynomial value", - xaxis=dict(range=[min(data.thetas), max(data.thetas)]), - ) - figures.append(fig) - - return figures, "" - - -mermin = Routine(_acquisition, _fit, _plot) diff --git a/src/qibocal/protocols/two_qubit_interaction/mermin/pulses.py b/src/qibocal/protocols/two_qubit_interaction/mermin/pulses.py deleted file mode 100644 index 239f81d67..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/mermin/pulses.py +++ /dev/null @@ -1,84 +0,0 @@ -from collections import defaultdict - -import numpy as np -from qibolab.pulses import PulseSequence - - -def create_mermin_sequence(platform, qubits, theta=None): - """Creates the pulse sequence to generate the bell states and with a theta-measurement""" - - nqubits = len(qubits) - if theta is None: - theta = ((nqubits - 1) * 0.25 * np.pi) % (2 * np.pi) - - virtual_z_phases = defaultdict(int) - sequence = PulseSequence() - - for qubit in qubits: - sequence.add( - platform.create_RX90_pulse( - qubit, start=0, relative_phase=virtual_z_phases[qubit] + np.pi / 2 - ) - ) - - # TODO: Not hardcode topology - - # qubits[0] needs to be the center qubit where everything is connected - for i in range(1, len(qubits)): - (cz_sequence1, cz_virtual_z_phases) = platform.create_CZ_pulse_sequence( - [qubits[0]] + [qubits[i]], sequence.finish + 8 # TODO: ask for the 8 - ) - sequence.add(cz_sequence1) - for qubit in cz_virtual_z_phases: - virtual_z_phases[qubit] += cz_virtual_z_phases[qubit] - - t = sequence.finish + 8 - - for i in range(1, len(qubits)): - sequence.add( - platform.create_RX90_pulse( - qubits[i], - start=t, - relative_phase=virtual_z_phases[qubits[i]] - np.pi / 2, - ) - ) - - virtual_z_phases[qubits[0]] -= theta - - return sequence, virtual_z_phases - - -def create_mermin_sequences(platform, qubits, readout_basis, theta): - """Creates the pulse sequences needed for the 4 measurement settings for chsh.""" - - mermin_sequences = {} - - for basis in readout_basis: - sequence, virtual_z_phases = create_mermin_sequence( - platform, qubits, theta=theta - ) - # t = sequence.finish - for i, base in enumerate(basis): - if base == "X": - sequence.add( - platform.create_RX90_pulse( - qubits[i], - start=sequence.finish, - relative_phase=virtual_z_phases[qubits[i]] + np.pi / 2, - ) - ) - if base == "Y": - sequence.add( - platform.create_RX90_pulse( - qubits[i], - start=sequence.finish, - relative_phase=virtual_z_phases[qubits[i]], - ) - ) - measurement_start = sequence.finish - - for qubit in qubits: - sequence.add(platform.create_MZ_pulse(qubit, start=measurement_start)) - - mermin_sequences[basis] = sequence - return mermin_sequences diff --git a/src/qibocal/protocols/two_qubit_interaction/mermin/utils.py b/src/qibocal/protocols/two_qubit_interaction/mermin/utils.py deleted file mode 100644 index e0afdca28..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/mermin/utils.py +++ /dev/null @@ -1,44 +0,0 @@ -from qibo.hamiltonians import SymbolicHamiltonian -from qibo.symbols import X, Y - - -def compute_mermin(frequencies, mermin_coefficients): - """Computes the chsh inequality out of the frequencies of the 4 circuits executed.""" - assert len(frequencies) == len(mermin_coefficients) - m = 0 - for j, freq in enumerate(frequencies): - for key in freq: - m += ( - mermin_coefficients[j] - * freq[key] - * (-1) ** (sum([int(key[k]) for k in range(len(key))])) - ) - nshots = sum(freq[x] for x in freq) - if nshots != 0: - return float(m / nshots) - - return 0 - - -def get_mermin_polynomial(n): - assert n > 1 - m0 = X(0) - m0p = Y(0) - for i in range(1, n): - mn = m0 * (X(i) + Y(i)) + m0p * (X(i) - Y(i)) - mnp = m0 * (Y(i) - X(i)) + m0p * (X(i) + Y(i)) - m0 = mn.expand() - m0p = mnp.expand() - m = m0 / 2 ** ((n - 1) // 2) - return SymbolicHamiltonian(m.expand()) - - -def get_readout_basis(mermin_polynomial): - return [ - "".join([factor.name[0] for factor in term.factors]) - for term in mermin_polynomial.terms - ] - - -def get_mermin_coefficients(mermin_polynomial): - return [term.coefficient.real for term in mermin_polynomial.terms] diff --git a/src/qibocal/protocols/two_qubit_interaction/optimize.py b/src/qibocal/protocols/two_qubit_interaction/optimize.py index dcf4845b9..7ea214032 100644 --- a/src/qibocal/protocols/two_qubit_interaction/optimize.py +++ b/src/qibocal/protocols/two_qubit_interaction/optimize.py @@ -7,13 +7,18 @@ import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import AcquisitionType, AveragingMode, Parameter, Sweeper from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import ( + Data, + Parameters, + QubitId, + QubitPairId, + Results, + Routine, +) +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import table_dict, table_html @@ -43,10 +48,8 @@ class OptimizeTwoQubitGateParameters(Parameters): """Maximum duration of flux pulse swept.""" duration_step: int """Step duration of flux pulse swept.""" - dt: Optional[float] = 20 + dt: Optional[float] = 0 """Time delay between flux pulses and readout.""" - parking: bool = True - """Wether to park non interacting qubits or not.""" native: str = "CZ" """Two qubit interaction to be calibrated. @@ -141,7 +144,7 @@ def register_qubit( def _acquisition( params: OptimizeTwoQubitGateParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> OptimizeTwoQubitGateData: r""" @@ -154,97 +157,81 @@ def _acquisition( ) for pair in targets: # order the qubits so that the low frequency one is the first - ord_pair = order_pair(pair, platform) + ordered_pair = order_pair(pair, platform) for target_q, control_q in ( - (ord_pair[0], ord_pair[1]), - (ord_pair[1], ord_pair[0]), + (ordered_pair[0], ordered_pair[1]), + (ordered_pair[1], ordered_pair[0]), ): for setup in ("I", "X"): ( sequence, flux_pulse, theta_pulse, + ro_delays, ) = create_sequence( platform, setup, target_q, control_q, - ord_pair, + ordered_pair, params.native, params.dt, - params.parking, flux_pulse_max_duration=params.duration_max, ) - theta = np.arange( - params.theta_start, - params.theta_end, - params.theta_step, - dtype=float, - ) - - amplitude_range = np.arange( - params.flux_pulse_amplitude_min, - params.flux_pulse_amplitude_max, - params.flux_pulse_amplitude_step, - dtype=float, - ) - - duration_range = np.arange( - params.duration_min, - params.duration_max, - params.duration_step, - dtype=float, - ) - - data.amplitudes[ord_pair] = amplitude_range.tolist() - data.durations[ord_pair] = duration_range.tolist() sweeper_theta = Sweeper( - Parameter.relative_phase, - theta, + parameter=Parameter.relative_phase, + range=(params.theta_start, params.theta_end, params.theta_step), pulses=[theta_pulse], - type=SweeperType.ABSOLUTE, ) sweeper_amplitude = Sweeper( - Parameter.amplitude, - amplitude_range / flux_pulse.amplitude, + parameter=Parameter.amplitude, + range=( + params.flux_pulse_amplitude_min, + params.flux_pulse_amplitude_max, + params.flux_pulse_amplitude_step, + ), pulses=[flux_pulse], - type=SweeperType.FACTOR, ) sweeper_duration = Sweeper( - Parameter.duration, - duration_range, - pulses=[flux_pulse], - type=SweeperType.ABSOLUTE, + parameter=Parameter.duration, + range=( + params.duration_min, + params.duration_max, + params.duration_step, + ), + pulses=[flux_pulse] + ro_delays, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper_duration, - sweeper_amplitude, - sweeper_theta, + ro_target = list( + sequence.channel(platform.qubits[target_q].acquisition) + )[-1] + ro_control = list( + sequence.channel(platform.qubits[control_q].acquisition) + )[-1] + results = platform.execute( + [sequence], + [[sweeper_duration], [sweeper_amplitude], [sweeper_theta]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.CYCLIC, ) - result_target = results[target_q].probability(1) - result_control = results[control_q].probability(1) + data.amplitudes[ordered_pair] = sweeper_amplitude.values.tolist() + data.durations[ordered_pair] = sweeper_duration.values.tolist() data.register_qubit( target_q, control_q, setup, - theta, - data.amplitudes[ord_pair], - data.durations[ord_pair], - result_control, - result_target, + sweeper_theta.values, + sweeper_amplitude.values, + sweeper_duration.values, + results[ro_control.id], + results[ro_target.id], ) return data @@ -374,7 +361,6 @@ def _plot( """Plot routine for OptimizeTwoQubitGate.""" fitting_report = "" qubits = next(iter(data.amplitudes))[:2] - fig = make_subplots( rows=2, cols=2, @@ -402,7 +388,6 @@ def _plot( leakage.append(fit.leakages[qubits[0], qubits[1], i, j][control_q]) condition = [target_q, control_q] == list(target) - fig.add_trace( go.Heatmap( x=durs, @@ -458,7 +443,9 @@ def _plot( def _update( - results: OptimizeTwoQubitGateResults, platform: Platform, target: QubitPairId + results: OptimizeTwoQubitGateResults, + platform: CalibrationPlatform, + target: QubitPairId, ): # FIXME: quick fix for qubit order target = tuple(sorted(target)) diff --git a/src/qibocal/protocols/two_qubit_interaction/utils.py b/src/qibocal/protocols/two_qubit_interaction/utils.py index 06c912a30..b5eec8e99 100644 --- a/src/qibocal/protocols/two_qubit_interaction/utils.py +++ b/src/qibocal/protocols/two_qubit_interaction/utils.py @@ -1,18 +1,18 @@ import numpy as np -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId +from qibolab import Platform + +from qibocal.auto.operation import QubitId, QubitPairId from ..utils import fallback_period, guess_period def order_pair(pair: QubitPairId, platform: Platform) -> tuple[QubitId, QubitId]: """Order a pair of qubits by drive frequency.""" - if ( - platform.qubits[pair[0]].drive_frequency - > platform.qubits[pair[1]].drive_frequency - ): - return pair[1], pair[0] - return pair[0], pair[1] + q0, q1 = pair + + drive0 = platform.config(platform.qubits[q0].drive) + drive1 = platform.config(platform.qubits[q1].drive) + return (q1, q0) if drive0.frequency > drive1.frequency else (q0, q1) def fit_flux_amplitude(matrix, amps, times): diff --git a/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases.py b/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases.py index d450f6068..fafbc9e3a 100644 --- a/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases.py +++ b/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases.py @@ -1,24 +1,38 @@ """CZ virtual correction experiment for two qubit gates, tune landscape.""" from dataclasses import dataclass, field -from typing import Optional +from typing import Literal, Optional import numpy as np import numpy.typing as npt import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.pulses import Pulse, PulseSequence -from qibolab.qubits import QubitId, QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType +from qibolab import ( + AcquisitionType, + AveragingMode, + Delay, + Parameter, + Pulse, + PulseSequence, + Sweeper, + VirtualZ, +) from scipy.optimize import curve_fit -from qibocal import update -from qibocal.auto.operation import Data, Parameters, Results, Routine +from qibocal.auto.operation import ( + Data, + Parameters, + QubitId, + QubitPairId, + Results, + Routine, +) +from qibocal.calibration import CalibrationPlatform from qibocal.config import log from qibocal.protocols.utils import table_dict, table_html +from ... import update +from ...update import replace from .utils import order_pair @@ -40,8 +54,6 @@ class VirtualZPhasesParameters(Parameters): """ dt: Optional[float] = 20 """Time delay between flux pulses and readout.""" - parking: bool = True - """Wether to park non interacting qubits or not.""" @dataclass @@ -64,7 +76,6 @@ def __contains__(self, key: QubitPairId): While key is a QubitPairId both chsh and chsh_mitigated contain an additional key which represents the basis chosen. """ - # TODO: fix this (failing only for qq report) return key in [ (target, control) for target, control, _ in self.fitted_parameters ] @@ -90,13 +101,12 @@ def __getitem__(self, pair): def create_sequence( - platform: Platform, - setup: str, + platform: CalibrationPlatform, + setup: Literal["I", "X"], target_qubit: QubitId, control_qubit: QubitId, ordered_pair: list[QubitId, QubitId], - native: str, - parking: bool, + native: Literal["CZ", "iSWAP"], dt: float, flux_pulse_max_duration: float = None, ) -> tuple[ @@ -106,66 +116,112 @@ def create_sequence( ]: """Create the experiment PulseSequence.""" + target_natives = platform.natives.single_qubit[target_qubit] + control_natives = platform.natives.single_qubit[control_qubit] + sequence = PulseSequence() + # Y90 + sequence += target_natives.R(theta=np.pi / 2, phi=np.pi / 2) + # X + if setup == "X": + sequence += control_natives.RX() + else: + sequence.append( + (platform.qubits[control_qubit].drive, Delay(duration=sequence.duration)) + ) - Y90_pulse = platform.create_RX90_pulse( - target_qubit, start=0, relative_phase=np.pi / 2 - ) - RX_pulse_start = platform.create_RX_pulse(control_qubit, start=0, relative_phase=0) + drive_duration = sequence.duration - flux_sequence, _ = getattr(platform, f"create_{native}_pulse_sequence")( - (ordered_pair[1], ordered_pair[0]), - start=max(Y90_pulse.finish, RX_pulse_start.finish), - ) + # CZ + flux_sequence = getattr(platform.natives.two_qubit[ordered_pair], native)() + flux_pulses = [ + (ch, pulse) for ch, pulse in flux_sequence if not isinstance(pulse, VirtualZ) + ] - flux_pulse = flux_sequence.get_qubit_pulses(ordered_pair[1])[0] if flux_pulse_max_duration is not None: - flux_pulse.duration = flux_pulse_max_duration + flux_pulses[0] = ( + flux_pulses[0][0], + replace(flux_pulses[0][1], duration=flux_pulse_max_duration), + ) - theta_pulse = platform.create_RX90_pulse( - target_qubit, - start=flux_sequence.finish + dt, - relative_phase=0, - ) - RX_pulse_end = platform.create_RX_pulse( - control_qubit, - start=flux_sequence.finish + dt, - relative_phase=0, - ) - measure_target = platform.create_qubit_readout_pulse( - target_qubit, start=theta_pulse.finish - ) - measure_control = platform.create_qubit_readout_pulse( - control_qubit, start=theta_pulse.finish - ) + _, flux_pulse = flux_pulses[0] + flux_sequence = PulseSequence(flux_pulses) + sequence |= flux_sequence - sequence.add( - Y90_pulse, - flux_sequence.get_qubit_pulses(ordered_pair[1]), - flux_sequence.cf_pulses, - theta_pulse, - measure_target, - measure_control, - ) + flux_duration = flux_sequence.duration + dt_delay = Delay(duration=dt) + theta_sequence = PulseSequence( + [ + ( + platform.qubits[target_qubit].drive, + dt_delay, + ), + ( + platform.qubits[control_qubit].drive, + dt_delay, + ), + ( + platform.qubits[target_qubit].drive, + Delay(duration=flux_duration), + ), + ( + platform.qubits[control_qubit].drive, + Delay(duration=flux_duration), + ), + ] + ) + # R90 (angle to be swept) + theta_sequence += target_natives.R(theta=np.pi / 2, phi=0) + theta_pulse = theta_sequence[-1][1] + # X if setup == "X": - sequence.add( - RX_pulse_start, - RX_pulse_end, - ) + theta_sequence += control_natives.RX() + + sequence += theta_sequence + + ro_target_delay = Delay(duration=flux_duration) + ro_control_delay = Delay(duration=flux_duration) + # M + ro_sequence = PulseSequence( + [ + ( + platform.qubits[target_qubit].acquisition, + Delay(duration=drive_duration), + ), + ( + platform.qubits[control_qubit].acquisition, + Delay(duration=drive_duration), + ), + ( + platform.qubits[target_qubit].acquisition, + ro_target_delay, + ), + ( + platform.qubits[control_qubit].acquisition, + ro_control_delay, + ), + ( + platform.qubits[target_qubit].acquisition, + Delay(duration=theta_sequence.duration), + ), + ( + platform.qubits[control_qubit].acquisition, + Delay(duration=theta_sequence.duration), + ), + target_natives.MZ()[0], + control_natives.MZ()[0], + ] + ) - if parking: - for pulse in flux_sequence: - if pulse.qubit not in ordered_pair: - pulse.duration = theta_pulse.finish - sequence.add(pulse) + sequence += ro_sequence - return sequence, flux_pulse, theta_pulse + return sequence, flux_pulse, theta_pulse, [ro_target_delay, ro_control_delay] def _acquisition( params: VirtualZPhasesParameters, - platform: Platform, + platform: CalibrationPlatform, targets: list[QubitPairId], ) -> VirtualZPhasesData: r""" @@ -189,52 +245,49 @@ def _acquisition( data = VirtualZPhasesData(thetas=theta_absolute.tolist(), native=params.native) for pair in targets: # order the qubits so that the low frequency one is the first - ord_pair = order_pair(pair, platform) + ordered_pair = order_pair(pair, platform) for target_q, control_q in ( - (ord_pair[0], ord_pair[1]), - (ord_pair[1], ord_pair[0]), + (ordered_pair[0], ordered_pair[1]), + (ordered_pair[1], ordered_pair[0]), ): for setup in ("I", "X"): ( sequence, _, theta_pulse, + _, ) = create_sequence( platform, setup, target_q, control_q, - ord_pair, + ordered_pair, params.native, params.dt, - params.parking, - ) - theta = np.arange( - params.theta_start, - params.theta_end, - params.theta_step, - dtype=float, ) sweeper = Sweeper( - Parameter.relative_phase, - theta, + parameter=Parameter.relative_phase, + range=(params.theta_start, params.theta_end, params.theta_step), pulses=[theta_pulse], - type=SweeperType.ABSOLUTE, ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.DISCRIMINATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, + results = platform.execute( + [sequence], + [[sweeper]], + nshots=params.nshots, + relaxation_time=params.relaxation_time, + acquisition_type=AcquisitionType.DISCRIMINATION, + averaging_mode=AveragingMode.CYCLIC, ) - result_target = results[target_q].probability(1) - result_control = results[control_q].probability(1) + ro_target = list( + sequence.channel(platform.qubits[target_q].acquisition) + )[-1] + ro_control = list( + sequence.channel(platform.qubits[control_q].acquisition) + )[-1] + result_target = results[ro_target.id] + result_control = results[ro_control.id] data.register_qubit( VirtualZPhasesType, @@ -445,7 +498,9 @@ def _plot(data: VirtualZPhasesData, fit: VirtualZPhasesResults, target: QubitPai return [fig1, fig2], "".join(fitting_report) # target and control qubit -def _update(results: VirtualZPhasesResults, platform: Platform, target: QubitPairId): +def _update( + results: VirtualZPhasesResults, platform: CalibrationPlatform, target: QubitPairId +): # FIXME: quick fix for qubit order target = tuple(sorted(target)) update.virtual_phases( diff --git a/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases_signal.py b/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases_signal.py deleted file mode 100644 index 35db5dc37..000000000 --- a/src/qibocal/protocols/two_qubit_interaction/virtual_z_phases_signal.py +++ /dev/null @@ -1,149 +0,0 @@ -"""CZ virtual correction experiment for two qubit gates, tune landscape.""" - -from dataclasses import dataclass - -import numpy as np -from qibolab import AcquisitionType, AveragingMode, ExecutionParameters -from qibolab.platform import Platform -from qibolab.qubits import QubitPairId -from qibolab.sweeper import Parameter, Sweeper, SweeperType - -from qibocal.auto.operation import Routine - -from .utils import order_pair -from .virtual_z_phases import ( - VirtualZPhasesData, - VirtualZPhasesParameters, - VirtualZPhasesResults, - VirtualZPhasesType, - _fit, -) -from .virtual_z_phases import _plot as _plot_prob -from .virtual_z_phases import ( - _update, - create_sequence, -) - - -@dataclass -class VirtualZPhasesSignalParameters(VirtualZPhasesParameters): - """VirtualZ runcard inputs.""" - - -@dataclass -class VirtualZPhasesSignalResults(VirtualZPhasesResults): - """VirtualZ outputs when fitting will be done.""" - - -VirtualZPhasesType = np.dtype([("target", np.float64), ("control", np.float64)]) - - -@dataclass -class VirtualZPhasesSignalData(VirtualZPhasesData): - """VirtualZPhases data.""" - - -def _acquisition( - params: VirtualZPhasesSignalParameters, - platform: Platform, - targets: list[QubitPairId], -) -> VirtualZPhasesSignalData: - r""" - Acquisition for VirtualZPhases. See https://arxiv.org/pdf/1904.06560.pdf - - Check the two-qubit landscape created by a flux pulse of a given duration - and amplitude. - The system is initialized with a Y90 pulse on the low frequency qubit and either - an Id or an X gate on the high frequency qubit. Then the flux pulse is applied to - the high frequency qubit in order to perform a two-qubit interaction. The Id/X gate - is undone in the high frequency qubit and a theta90 pulse is applied to the low - frequency qubit before measurement. That is, a pi-half pulse around the relative phase - parametereized by the angle theta. - Measurements on the low frequency qubit yield the 2Q-phase of the gate and the - remnant single qubit Z phase aquired during the execution to be corrected. - Population of the high frequency qubit yield the leakage to the non-computational states - during the execution of the flux pulse. - """ - - theta_absolute = np.arange(params.theta_start, params.theta_end, params.theta_step) - data = VirtualZPhasesData(native=params.native, thetas=theta_absolute.tolist()) - for pair in targets: - # order the qubits so that the low frequency one is the first - ord_pair = order_pair(pair, platform) - - for target_q, control_q in ( - (ord_pair[0], ord_pair[1]), - (ord_pair[1], ord_pair[0]), - ): - for setup in ("I", "X"): - ( - sequence, - _, - theta_pulse, - ) = create_sequence( - platform, - setup, - target_q, - control_q, - ord_pair, - params.native, - params.dt, - params.parking, - ) - theta = np.arange( - params.theta_start, - params.theta_end, - params.theta_step, - dtype=float, - ) - sweeper = Sweeper( - Parameter.relative_phase, - theta, - pulses=[theta_pulse], - type=SweeperType.ABSOLUTE, - ) - results = platform.sweep( - sequence, - ExecutionParameters( - nshots=params.nshots, - relaxation_time=params.relaxation_time, - acquisition_type=AcquisitionType.INTEGRATION, - averaging_mode=AveragingMode.CYCLIC, - ), - sweeper, - ) - - result_target = results[target_q].magnitude - result_control = results[control_q].magnitude - - data.register_qubit( - VirtualZPhasesType, - (target_q, control_q, setup), - dict( - target=result_target, - control=result_control, - ), - ) - return data - - -def _plot( - data: VirtualZPhasesSignalData, - fit: VirtualZPhasesSignalResults, - target: QubitPairId, -): - """Plot routine for VirtualZPhases.""" - figs, fitting_report = _plot_prob(data, fit, target) - - for fig in figs: - fig.update_layout( - yaxis_title="Signal [a.u.]", - ) - - return figs, fitting_report - - -correct_virtual_z_phases_signal = Routine( - _acquisition, _fit, _plot, _update, two_qubit_gates=True -) -"""Virtual Z correction routine.""" diff --git a/src/qibocal/protocols/two_qubit_state_tomography.py b/src/qibocal/protocols/two_qubit_state_tomography.py index 58da7ae15..4914363bc 100644 --- a/src/qibocal/protocols/two_qubit_state_tomography.py +++ b/src/qibocal/protocols/two_qubit_state_tomography.py @@ -12,11 +12,17 @@ from qibo.backends import NumpyBackend, construct_backend from qibo.quantum_info import fidelity, partial_trace from qibo.result import QuantumState -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId -from qibocal.auto.operation import DATAFILE, Data, Results, Routine +from qibocal.auto.operation import ( + DATAFILE, + Data, + QubitId, + QubitPairId, + Results, + Routine, +) from qibocal.auto.transpile import dummy_transpiler, execute_transpiled_circuit +from qibocal.calibration import CalibrationPlatform from .state_tomography import StateTomographyParameters, plot_reconstruction from .utils import table_dict, table_html @@ -84,7 +90,9 @@ class StateTomographyResults(Results): def _acquisition( - params: StateTomographyParameters, platform: Platform, targets: list[QubitPairId] + params: StateTomographyParameters, + platform: CalibrationPlatform, + targets: list[QubitPairId], ) -> StateTomographyData: """Acquisition protocol for two qubit state tomography experiment.""" qubits = [q for pair in targets for q in pair] diff --git a/src/qibocal/protocols/utils.py b/src/qibocal/protocols/utils.py index 026a44240..474e6cfca 100644 --- a/src/qibocal/protocols/utils.py +++ b/src/qibocal/protocols/utils.py @@ -7,12 +7,11 @@ import plotly.graph_objects as go from numpy.typing import NDArray from plotly.subplots import make_subplots -from qibolab.qubits import QubitId from scipy import constants from scipy.optimize import curve_fit from scipy.signal import find_peaks -from qibocal.auto.operation import Data, Results +from qibocal.auto.operation import Data, QubitId, Results from qibocal.config import log from qibocal.fitting.classifier import run from qibocal.protocols.resonator_utils import ( @@ -81,7 +80,7 @@ def effective_qubit_temperature( return temp, error -def calculate_frequencies(results, qubit_list): +def calculate_frequencies(results, ro_pulses): """Calculates outcome frequencies from individual shots. Args: results (dict): return of execute_pulse_sequence @@ -90,9 +89,8 @@ def calculate_frequencies(results, qubit_list): Returns: dictionary containing frequencies. """ - shots = np.stack([results[i].samples for i in qubit_list]).T + shots = np.stack([results[ro_pulses[qubit].id] for qubit in ro_pulses]).T values, counts = np.unique(shots, axis=0, return_counts=True) - return {"".join(str(int(i)) for i in v): cnt for v, cnt in zip(values, counts)} @@ -368,26 +366,6 @@ def spectroscopy_plot(data, qubit, fit: Results = None): label = "Qubit Frequency [Hz]" freq = fit.frequency - if data.attenuations: - if data.attenuations[qubit] is not None: - if show_error_bars: - labels = [label, "Amplitude", "Attenuation", "Chi2 Reduced"] - values = [ - ( - freq[qubit], - fit.error_fit_pars[qubit][1], - ), - (data.amplitudes[qubit], 0), - (data.attenuations[qubit], 0), - fit.chi2_reduced[qubit], - ] - else: - labels = [label, "Amplitude", "Attenuation"] - values = [ - freq[qubit], - data.amplitudes[qubit], - data.attenuations[qubit], - ] if data.amplitudes[qubit] is not None: if show_error_bars: labels = [label, "Amplitude", "Chi2 reduced"] @@ -517,11 +495,10 @@ def s21_spectroscopy_plot(data, qubit, fit: Results = None): row=1, col=1, ) - fig_raw.add_trace( go.Scatter( x=np.concatenate((frequencies, frequencies[::-1])), - y=np.concatenate((phase + errors_phase), (phase - errors_phase[::-1])), + y=np.concatenate((phase + errors_phase, (phase - errors_phase)[::-1])), fill="toself", fillcolor=COLORBAND, line=dict(color=COLORBAND_LINE), @@ -712,12 +689,11 @@ def s21_spectroscopy_plot(data, qubit, fit: Results = None): row=1, col=1, ) - fig_calibrated.add_trace( go.Scatter( x=np.concatenate((frequencies, frequencies[::-1])), y=np.concatenate( - (phase + errors_phase), (phase - errors_phase[::-1]) + (phase + errors_phase, (phase - errors_phase)[::-1]) ), fill="toself", fillcolor=COLORBAND, diff --git a/src/qibocal/result.py b/src/qibocal/result.py new file mode 100644 index 000000000..675b8091e --- /dev/null +++ b/src/qibocal/result.py @@ -0,0 +1,88 @@ +"""Common result operations.""" + +import numpy as np +import numpy.typing as npt + +IQ = npt.NDArray[np.float64] +"""An array of I and Q values. + +It is assumed that the I and Q component are discriminated by the +innermost dimension of the array. +""" + + +def _lift(values: IQ) -> npt.NDArray: + """Transpose the innermost dimension to the outermost.""" + return np.moveaxis(values, -1, 0) + + +def _sink(values: npt.NDArray) -> IQ: + """Transpose the outermost dimension to the innermost. + + Inverse of :func:`_lift`. + """ + return np.moveaxis(values, 0, -1) + + +def collect(i: npt.NDArray, q: npt.NDArray) -> IQ: + """Collect I and Q components in a single array.""" + return _sink(np.stack([i, q])) + + +def unpack(iq: IQ) -> tuple[npt.NDArray, npt.NDArray]: + """Unpack I and Q components from single array. + + Inverse of :func:`collect`. + """ + i, q = tuple(_lift(iq)) + return i, q + + +def magnitude(iq: IQ): + """Signal magnitude. + + It is supposed to be a tension, possibly in arbitrary units. + """ + iq_ = _lift(iq) + return np.sqrt(iq_[0] ** 2 + iq_[1] ** 2) + + +def average(values: npt.NDArray) -> tuple[npt.NDArray, npt.NDArray]: + """Perform the values average. + + It returns both the average estimator itself, and its standard + deviation estimator. + + Use this also for I and Q values in the *standard layout*, cf. :class:`IQ`. + """ + mean = np.mean(values, axis=0) + std = np.std(values, axis=0, ddof=1) / np.sqrt(values.shape[0]) + return mean, std + + +def average_iq(i: npt.NDArray, q: npt.NDArray) -> tuple[npt.NDArray, npt.NDArray]: + """Perform the average over I and Q. + + Convenience wrapper over :func:`average` for separate i and q samples arrays. + """ + return average(collect(i, q)) + + +def phase(iq: npt.NDArray): + """Signal phase in radians. + + It is assumed that the I and Q component are discriminated by the + innermost dimension of the array. + """ + iq_ = _lift(iq) + return np.unwrap(np.arctan2(iq_[0], iq_[1])) + + +def probability(values: npt.NDArray, state: int = 0): + """Return the statistical frequency of the specified state. + + The only accepted values `state` are `0` and `1`. + """ + # The absolute value is only needed to make sure the result is always positive, even + # when extremely close to zero + return abs(1 - state - np.mean(values, axis=0)) diff --git a/src/qibocal/update.py b/src/qibocal/update.py index 376cad2c3..78f865df1 100644 --- a/src/qibocal/update.py +++ b/src/qibocal/update.py @@ -4,10 +4,10 @@ from typing import Union import numpy as np -from qibolab import pulses -from qibolab.native import VirtualZPulse -from qibolab.platform import Platform -from qibolab.qubits import QubitId, QubitPairId +from pydantic import BaseModel +from qibolab import Platform, PulseSequence, VirtualZ + +from qibocal.auto.operation import QubitId, QubitPairId CLASSIFICATION_PARAMS = [ "threshold", @@ -18,29 +18,30 @@ ] +def replace(model: BaseModel, **update): + """Replace interface for pydantic models.""" + return model.model_copy(update=update) + + def readout_frequency(freq: float, platform: Platform, qubit: QubitId): """Update readout frequency value in platform for specific qubit.""" - mz = platform.qubits[qubit].native_gates.MZ - freq_hz = int(freq) - mz.frequency = freq_hz - if mz.if_frequency is not None: - mz.if_frequency = freq_hz - platform.qubits[qubit].readout.lo_frequency - platform.qubits[qubit].readout_frequency = freq_hz + ro_channel = platform.qubits[qubit].probe + platform.update({f"configs.{ro_channel}.frequency": freq}) def bare_resonator_frequency(freq: float, platform: Platform, qubit: QubitId): """Update rbare frequency value in platform for specific qubit.""" - platform.qubits[qubit].bare_resonator_frequency = int(freq) + platform.calibration.single_qubits[qubit].resonator.bare_frequency = int(freq) -def readout_amplitude(amp: float, platform: Platform, qubit: QubitId): - """Update readout amplitude value in platform for specific qubit.""" - platform.qubits[qubit].native_gates.MZ.amplitude = float(amp) +def dressed_resonator_frequency(freq: float, platform: Platform, qubit: QubitId): + """Update rbare frequency value in platform for specific qubit.""" + platform.calibration.single_qubits[qubit].resonator.dressed_frequency = int(freq) -def readout_attenuation(att: int, platform: Platform, qubit: QubitId): - """Update readout attenuation value in platform for specific qubit.""" - platform.qubits[qubit].readout.attenuation = int(att) +def readout_amplitude(amp: float, platform: Platform, qubit: QubitId): + """Update readout amplitude value in platform for specific qubit.""" + platform.update({f"native_gates.single_qubit.{qubit}.MZ.0.1.probe.amplitude": amp}) def drive_frequency( @@ -49,183 +50,180 @@ def drive_frequency( """Update drive frequency value in platform for specific qubit.""" if isinstance(freq, Iterable): freq = freq[0] - freq = int(freq) - platform.qubits[qubit].native_gates.RX.frequency = int(freq) - platform.qubits[qubit].drive_frequency = int(freq) + drive_channel = platform.qubits[qubit].drive + platform.update({f"configs.{drive_channel}.frequency": freq}) -def drive_amplitude(amp: Union[float, tuple, list], platform: Platform, qubit: QubitId): +def drive_amplitude( + amp: Union[float, tuple, list], rx90: bool, platform: Platform, qubit: QubitId +): """Update drive frequency value in platform for specific qubit.""" if isinstance(amp, Iterable): amp = amp[0] - platform.qubits[qubit].native_gates.RX.amplitude = float(amp) + if rx90: + platform.update({f"native_gates.single_qubit.{qubit}.RX90.0.1.amplitude": amp}) + else: + platform.update({f"native_gates.single_qubit.{qubit}.RX.0.1.amplitude": amp}) def drive_duration( - duration: Union[int, tuple, list], platform: Platform, qubit: QubitId + duration: Union[int, tuple, list], rx90: bool, platform: Platform, qubit: QubitId ): """Update drive duration value in platform for specific qubit.""" if isinstance(duration, Iterable): duration = duration[0] - platform.qubits[qubit].native_gates.RX.duration = int(duration) + if rx90: + platform.update( + {f"native_gates.single_qubit.{qubit}.RX90.0.1.duration": int(duration)} + ) + else: + platform.update( + {f"native_gates.single_qubit.{qubit}.RX.0.1.duration": int(duration)} + ) def crosstalk_matrix( matrix_element: float, platform: Platform, qubit: QubitId, flux_qubit: QubitId ): """Update crosstalk_matrix element.""" - platform.qubits[qubit].crosstalk_matrix[flux_qubit] = float(matrix_element) + if platform.calibration.flux_crosstalk_matrix is None: + platform.calibration.flux_crosstalk_matrix = np.zeros( + (platform.calibration.nqubits, platform.calibration.nqubits) + ) + platform.calibration.set_crosstalk_element(qubit, flux_qubit, matrix_element) def iq_angle(angle: float, platform: Platform, qubit: QubitId): """Update iq angle value in platform for specific qubit.""" - platform.qubits[qubit].iq_angle = float(angle) + ro_channel = platform.qubits[qubit].acquisition + platform.update({f"configs.{ro_channel}.iq_angle": angle}) def threshold(threshold: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].threshold = float(threshold) + ro_channel = platform.qubits[qubit].acquisition + platform.update({f"configs.{ro_channel}.threshold": threshold}) -def mean_gnd_states(gnd_state: list, platform: Platform, qubit: QubitId): +def mean_gnd_states(ground_state: list, platform: Platform, qubit: QubitId): """Update mean ground state value in platform for specific qubit.""" - platform.qubits[qubit].mean_gnd_states = gnd_state + platform.calibration.single_qubits[qubit].readout.ground_state = ground_state -def mean_exc_states(exc_state: list, platform: Platform, qubit: QubitId): +def mean_exc_states(excited_state: list, platform: Platform, qubit: QubitId): """Update mean excited state value in platform for specific qubit.""" - platform.qubits[qubit].mean_exc_states = exc_state + platform.calibration.single_qubits[qubit].readout.excited_state = excited_state def readout_fidelity(fidelity: float, platform: Platform, qubit: QubitId): """Update fidelity of single shot classification.""" - platform.qubits[qubit].readout_fidelity = float(fidelity) - - -def assignment_fidelity(fidelity: float, platform: Platform, qubit: QubitId): - """Update fidelity of single shot classification.""" - platform.qubits[qubit].assignment_fidelity = float(fidelity) + platform.calibration.single_qubits[qubit].readout.fidelity = float(fidelity) def virtual_phases( phases: dict[QubitId, float], native: str, platform: Platform, pair: QubitPairId ): - """Update virtual phases for given qubits in pair in results.""" - virtual_z_pulses = { - pulse.qubit.name: pulse - for pulse in getattr(platform.pairs[pair].native_gates, native).pulses - if isinstance(pulse, VirtualZPulse) - } - for qubit_id, phase in phases.items(): - if qubit_id in virtual_z_pulses: - virtual_z_pulses[qubit_id].phase = phase - else: - virtual_z_pulses[qubit_id] = VirtualZPulse( - phase=phase, qubit=platform.qubits[qubit_id] - ) - getattr(platform.pairs[pair].native_gates, native).pulses.append( - virtual_z_pulses[qubit_id] - ) + native_sequence = getattr(platform.natives.two_qubit[pair], native) + new_native = PulseSequence() + if len(native_sequence) > 1: + new_native.append(native_sequence[0]) + else: # pragma: no cover + new_native = native_sequence + for qubit, phase in phases.items(): + new_native.append((platform.qubits[qubit].drive, VirtualZ(phase=phase))) + + platform.update( + {f"native_gates.two_qubit.{f'{pair[0]}-{pair[1]}'}.{native}": new_native} + ) def CZ_duration(duration: int, platform: Platform, pair: QubitPairId): """Update CZ duration for specific pair.""" - for pulse in platform.pairs[pair].native_gates.CZ.pulses: - if pulse.qubit.name == pair[1]: - pulse.duration = int(duration) + platform.update( + {f"native_gates.two_qubit.{f'{pair[0]}-{pair[1]}'}.CZ.0.1.duration": duration} + ) def CZ_amplitude(amp: float, platform: Platform, pair: QubitPairId): """Update CZ amplitude for specific pair.""" - for pulse in platform.pairs[pair].native_gates.CZ.pulses: - if pulse.qubit.name == pair[1]: - pulse.amplitude = float(amp) + platform.update( + {f"native_gates.two_qubit.{f'{pair[0]}-{pair[1]}'}.CZ.0.1.amplitude": amp} + ) def iSWAP_duration(duration: int, platform: Platform, pair: QubitPairId): """Update iSWAP_duration duration for specific pair.""" - for pulse in platform.pairs[pair].native_gates.iSWAP.pulses: - if pulse.qubit.name == pair[1]: - pulse.duration = int(duration) + platform.update( + {f"native_gates.two_qubit.{f'{pair[0]}-{pair[1]}'}.CZ.0.1.duration": duration} + ) def iSWAP_amplitude(amp: float, platform: Platform, pair: QubitPairId): """Update iSWAP_duration amplitude for specific pair.""" - for pulse in platform.pairs[pair].native_gates.iSWAP.pulses: - if pulse.qubit.name == pair[1]: - pulse.amplitude = float(amp) + platform.update( + {f"native_gates.two_qubit.{f'{pair[0]}-{pair[1]}'}.CZ.0.1.amplitude": amp} + ) def t1(t1: int, platform: Platform, qubit: QubitId): """Update t1 value in platform for specific qubit.""" - if isinstance(t1, Iterable): - platform.qubits[qubit].T1 = int(t1[0]) - else: - platform.qubits[qubit].T1 = int(t1) + platform.calibration.single_qubits[qubit].t1 = tuple(t1) def t2(t2: int, platform: Platform, qubit: QubitId): """Update t2 value in platform for specific qubit.""" - if isinstance(t2, Iterable): - platform.qubits[qubit].T2 = int(t2[0]) - else: - platform.qubits[qubit].T2 = int(t2) + platform.calibration.single_qubits[qubit].t2 = tuple(t2) def t2_spin_echo(t2_spin_echo: float, platform: Platform, qubit: QubitId): """Update t2 echo value in platform for specific qubit.""" - if isinstance(t2_spin_echo, Iterable): - platform.qubits[qubit].T2_spin_echo = int(t2_spin_echo[0]) - else: - platform.qubits[qubit].T2_spin_echo = int(t2_spin_echo) + platform.calibration.single_qubits[qubit].t2_spin_echo = tuple(t2_spin_echo) def drag_pulse_beta(beta: float, platform: Platform, qubit: QubitId): """Update beta parameter value in platform for specific qubit.""" - pulse = platform.qubits[qubit].native_gates.RX.pulse(start=0) - rel_sigma = pulse.shape.rel_sigma - drag_pulse = pulses.Drag(rel_sigma=rel_sigma, beta=beta) - platform.qubits[qubit].native_gates.RX.shape = repr(drag_pulse) + platform.update( + { + f"native_gates.single_qubit.{qubit}.RX.0.1.envelope.kind": "drag", + f"native_gates.single_qubit.{qubit}.RX.0.1.envelope.beta": beta, + } + ) def sweetspot(sweetspot: float, platform: Platform, qubit: QubitId): """Update sweetspot parameter in platform for specific qubit.""" - platform.qubits[qubit].sweetspot = float(sweetspot) + platform.calibration.single_qubits[qubit].qubit.sweetspot = float(sweetspot) + + +def flux_offset(offset: float, platform: Platform, qubit: QubitId): + """Update flux offset parameter in platform for specific qubit.""" + platform.update({f"configs.{platform.qubits[qubit].flux}.offset": offset}) def frequency_12_transition(frequency: int, platform: Platform, qubit: QubitId): - platform.qubits[qubit].native_gates.RX12.frequency = int(frequency) + channel = platform.qubits[qubit].drive_qudits[1, 2] + platform.update({f"configs.{channel}.frequency": frequency}) + platform.calibration.single_qubits[qubit].qubit.frequency_12 = int(frequency) def drive_12_amplitude(amplitude: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].native_gates.RX12.amplitude = float(amplitude) + platform.update( + {f"native_gates.single_qubit.{qubit}.RX12.0.1.amplitude": amplitude} + ) def drive_12_duration( duration: Union[int, tuple, list], platform: Platform, qubit: QubitId ): """Update drive duration value in platform for specific qubit.""" - platform.qubits[qubit].native_gates.RX12.duration = int(duration) - - -def twpa_frequency(frequency: int, platform: Platform, qubit: QubitId): - platform.qubits[qubit].twpa.local_oscillator.frequency = int(frequency) - - -def twpa_power(power: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].twpa.local_oscillator.power = float(power) - - -def anharmonicity(anharmonicity: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].anharmonicity = int(anharmonicity) - - -def asymmetry(asymmetry: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].asymmetry = float(asymmetry) + platform.update( + {f"native_gates.single_qubit.{qubit}.RX12.0.1.duration": int(duration)} + ) def coupling(g: float, platform: Platform, qubit: QubitId): - platform.qubits[qubit].g = float(g) + platform.calibration.single_qubits[qubit].readout.coupling = float(g) def kernel(kernel: np.ndarray, platform: Platform, qubit: QubitId): - platform.qubits[qubit].kernel = kernel + ro_channel = platform.qubits[qubit].acquisition + platform.update({f"configs.{ro_channel}.kernel": kernel}) diff --git a/src/qibocal/web/compared_report.py b/src/qibocal/web/compared_report.py index 562416c22..80af8be44 100644 --- a/src/qibocal/web/compared_report.py +++ b/src/qibocal/web/compared_report.py @@ -7,9 +7,9 @@ import pandas as pd import plotly.graph_objects as go from plotly.subplots import make_subplots -from qibolab.qubits import QubitId, QubitPairId from qibocal.auto.history import History +from qibocal.auto.operation import QubitId, QubitPairId from qibocal.auto.output import Output from qibocal.auto.task import Completed, TaskId from qibocal.cli.report import generate_figures_and_report diff --git a/src/qibocal/web/report.py b/src/qibocal/web/report.py index 64a54ffaa..591a19319 100644 --- a/src/qibocal/web/report.py +++ b/src/qibocal/web/report.py @@ -7,12 +7,18 @@ WEB_DIR = pathlib.Path(__file__).parent STYLES = WEB_DIR / "static" / "styles.css" +SCRIPT = WEB_DIR / "script.js" TEMPLATES = WEB_DIR / "templates" -def report_css_styles(styles_path: pathlib.Path): +def report_css_styles(path: pathlib.Path): """HTML string containing path of css file.""" - return f"" + return f"" + + +def report_script(path: pathlib.Path): + """HTML string containing path of js file.""" + return f"" @dataclass diff --git a/src/qibocal/web/script.js b/src/qibocal/web/script.js new file mode 100644 index 000000000..b61595653 --- /dev/null +++ b/src/qibocal/web/script.js @@ -0,0 +1,42 @@ +function redirectToRuncard() { + window.location.href = './action.yml'; +} + +function redirectToPlatform() { + window.location.href = './new_platform/parameters.json'; +} + + +// To Download PDF +var exportPDFButton = document.getElementById("export-pdf"); + +exportPDFButton.addEventListener("click", function() { + +document.body.classList.add("impresion"); + +var doc = new jsPDF({orientation: 'landscape',}); + +var iframes = document.querySelectorAll("iframe.gh-fit") +source = "" +for(var id = 0; id < iframes.length; id++) { + var win = iframes[id].contentWindow + var doc = win.document + var html = doc.documentElement + var body = doc.body + var ifrm = iframes[id] // or win.frameElement + source = source + html +} + +print(source) + +doc.fromHTML(source, 0, 0, { +width: 210, +margins: { + left: 10, + right: 10, + top: 10, + bottom: 10 +} +}); +doc.save("report.pdf"); +}); diff --git a/src/qibocal/web/static/styles.css b/src/qibocal/web/static/styles.css index 67964f6ac..7b48bba07 100644 --- a/src/qibocal/web/static/styles.css +++ b/src/qibocal/web/static/styles.css @@ -155,11 +155,10 @@ font-family: system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue","Noto Sans","Liberation Sans",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji"; } -.button-export -{ - background:#6400FF; - color:white; - font-family: system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue","Noto Sans","Liberation Sans",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji"; +.button-container button { + background: #6400FF; + color: white; + font-family: system-ui, -apple-system, "Segoe UI", Roboto, "Helvetica Neue", "Noto Sans", "Liberation Sans", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; font-size: 0.8em; } diff --git a/src/qibocal/web/templates/template.html b/src/qibocal/web/templates/template.html index 414dd1696..5f375a020 100644 --- a/src/qibocal/web/templates/template.html +++ b/src/qibocal/web/templates/template.html @@ -32,11 +32,18 @@
@@ -46,10 +53,10 @@