diff --git a/dev_tools/bloq-method-overrides-report.py b/dev_tools/bloq-method-overrides-report.py index 6f106bfc2..2135afaef 100644 --- a/dev_tools/bloq-method-overrides-report.py +++ b/dev_tools/bloq-method-overrides-report.py @@ -43,7 +43,7 @@ def _call_graph(bc: Type[Bloq]): if annot['ssa'] != 'SympySymbolAllocator': print(f"{bc}.build_call_graph `ssa: 'SympySymbolAllocator'`") if annot['return'] != Set[ForwardRef('BloqCountT')]: # type: ignore[misc] - print(f"{bc}.build_call_graph -> 'BloqCountT'") + print(f"{bc}.build_call_graph -> Set['BloqCountT'], not {annot['return']}") def report_call_graph_methods(): diff --git a/dev_tools/costing-report-card.ipynb b/dev_tools/costing-report-card.ipynb new file mode 100644 index 000000000..73fc9724b --- /dev/null +++ b/dev_tools/costing-report-card.ipynb @@ -0,0 +1,241 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1c2b1422-5a3b-401c-88fb-0c0b723c6633", + "metadata": {}, + "source": [ + "# Timing Cost Computation\n", + "\n", + "This notebook goes through each bloq example and calls `report_on_cost_timings`, which currently times how long it takes to do the `QubitCount` cost key. This uses the `ExecuteWithTimeout` fixture." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ae691c8-a5f8-4b66-a99c-7d82ad8ffc2e", + "metadata": {}, + "outputs": [], + "source": [ + "from qualtran_dev_tools.execute_with_timeout import ExecuteWithTimeout\n", + "from qualtran_dev_tools.bloq_report_card import report_on_cost_timings\n", + "from qualtran_dev_tools.bloq_finder import get_bloq_examples" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fcfb561-6cb9-4891-8157-547dbfa5502b", + "metadata": {}, + "outputs": [], + "source": [ + "bes = get_bloq_examples()\n", + "\n", + "# Imports to exclude certain bloqs, see following comment\n", + "from qualtran.bloqs.multiplexers.apply_gate_to_lth_target import ApplyGateToLthQubit" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cea586d9-55b3-4ee8-b255-ea88bdff75f4", + "metadata": {}, + "outputs": [], + "source": [ + "exec = ExecuteWithTimeout(timeout=20., max_workers=4)\n", + "for i, be in enumerate(bes):\n", + "\n", + " if be.bloq_cls == ApplyGateToLthQubit:\n", + " # This bloq uses a lambda function as one of its attributes, which\n", + " # can't be pickled and used with multiprocessing.\n", + " continue\n", + " \n", + " exec.submit(report_on_cost_timings, kwargs=dict(name=be.name, cls_name=be.bloq_cls.__name__, bloq=be.make()))\n", + "\n", + "records = []\n", + "while exec.work_to_be_done:\n", + " kwargs, record = exec.next_result()\n", + " print('\\r', f'{exec.work_to_be_done:5d} remaining', end='', flush=True)\n", + " \n", + " if record is None:\n", + " records.append({\n", + " 'name': kwargs['name'],\n", + " 'cls': kwargs['cls_name'],\n", + " 'err': 'Timeout',\n", + " })\n", + " else:\n", + " records.append(record)\n", + "\n", + "import pandas as pd\n", + "df = pd.DataFrame(records)" + ] + }, + { + "cell_type": "markdown", + "id": "abf0d3f1-bc5f-48a6-9d9b-b9fa1bbeb8d1", + "metadata": {}, + "source": [ + "## Slowest\n", + "\n", + "This prints the total number of bloq examples considered and then summarizes the 5 slowest-to-compute bloq examples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86f259b0-ec70-4860-b76c-0df993ca7934", + "metadata": {}, + "outputs": [], + "source": [ + "print(len(df))\n", + "df.sort_values(by='qubitcount_dur', ascending=False).head()" + ] + }, + { + "cell_type": "markdown", + "id": "a30190dd-550d-45d4-a572-94fdaa4a2f10", + "metadata": {}, + "source": [ + "## Errors and timeouts\n", + "\n", + "These bloq examples either time-out or encounter errors in the qubit computation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "513a3421-8f80-43ae-9449-89c8063da242", + "metadata": {}, + "outputs": [], + "source": [ + "df[df['qubitcount_dur'].isna()]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "605e8869-e77e-40b3-8455-07204b7b872c", + "metadata": {}, + "outputs": [], + "source": [ + "for i, row in df[df['qubitcount_dur'].isna()].iterrows():\n", + " print(\"### `{}`\".format(row['name']))\n", + " print(\"{}\\n\".format(row[\"err\"]))" + ] + }, + { + "cell_type": "markdown", + "id": "bceb65d1-d107-4c0d-8662-8ea80c00de16", + "metadata": {}, + "source": [ + "## Timeouts\n", + "\n", + "These examples specifically time out. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "043c3d7a-f4d4-486c-aa99-eb871e23df3b", + "metadata": {}, + "outputs": [], + "source": [ + "df[df['err'] == 'Timeout']" + ] + }, + { + "cell_type": "markdown", + "id": "c7b87a8d-c4eb-4b7b-abce-66dfa31a0699", + "metadata": {}, + "source": [ + "## Investigation\n", + "\n", + "Individual bloq examples can be investigated. Strangely, hubbard_time_evolution_by_gqsp times out when run through the fixture but appears reasonably quick when run directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0f486e8-8cbf-4907-a84d-9ff43cfcd43e", + "metadata": {}, + "outputs": [], + "source": [ + "def get_bloq_example(name):\n", + " results = [be for be in bes if be.name == name]\n", + " \n", + " if len(results) == 1:\n", + " return results[0]\n", + " if len(results) > 1:\n", + " raise ValueError(\"Found more than one result for the query\")\n", + " if len(results) == 0:\n", + " raise KeyError(f\"The bloq example {name} was not found\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d01d939-fa0d-4934-8a56-24f1087cd232", + "metadata": {}, + "outputs": [], + "source": [ + "from qualtran.drawing import show_call_graph\n", + "be = get_bloq_example('modexp_small')\n", + "bloq = be.make()\n", + "show_call_graph(bloq, max_depth=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b27557c-4806-4041-b9ae-7eb28b908bce", + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "logging.basicConfig(level=logging.INFO)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e402cd50-8015-4f14-9c6f-1a7755bf4936", + "metadata": {}, + "outputs": [], + "source": [ + "%%timeit\n", + "from qualtran.resource_counting import get_cost_value, QubitCount\n", + "\n", + "get_cost_value(bloq, QubitCount())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56391b2c-9381-480e-b204-e881fe0828ae", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/dev_tools/qualtran_dev_tools/bloq_report_card.py b/dev_tools/qualtran_dev_tools/bloq_report_card.py index 4d26c7ffb..9a5d292fa 100644 --- a/dev_tools/qualtran_dev_tools/bloq_report_card.py +++ b/dev_tools/qualtran_dev_tools/bloq_report_card.py @@ -19,6 +19,8 @@ import pandas.io.formats.style from qualtran import Bloq, BloqExample +from qualtran.resource_counting import get_cost_value, QubitCount +from qualtran.simulation.tensor import cbloq_to_quimb from qualtran.testing import ( BloqCheckResult, check_bloq_example_decompose, @@ -134,3 +136,44 @@ def summarize_results(report_card: pd.DataFrame) -> pd.DataFrame: ) summary.columns = [v.name.lower() for v in summary.columns] return summary + + +def report_on_tensors(name: str, cls_name: str, bloq: Bloq, cxn) -> None: + """Get timing information for tensor functionality. + + This should be used with `ExecuteWithTimeout`. The resultant + record dictionary is sent over `cxn`. + """ + record: Dict[str, Any] = {'name': name, 'cls': cls_name} + + try: + start = time.perf_counter() + flat = bloq.as_composite_bloq().flatten() + record['flat_dur'] = time.perf_counter() - start + + start = time.perf_counter() + tn = cbloq_to_quimb(flat) + record['tn_dur'] = time.perf_counter() - start + + start = time.perf_counter() + record['width'] = tn.contraction_width() + record['width_dur'] = time.perf_counter() - start + + except Exception as e: # pylint: disable=broad-exception-caught + record['err'] = str(e) + + cxn.send(record) + + +def report_on_cost_timings(name: str, cls_name: str, bloq: Bloq, cxn) -> None: + record: Dict[str, Any] = {'name': name, 'cls': cls_name} + + try: + start = time.perf_counter() + _ = get_cost_value(bloq, QubitCount()) + record['qubitcount_dur'] = time.perf_counter() - start + + except Exception as e: # pylint: disable=broad-exception-caught + record['err'] = str(e) + + cxn.send(record) diff --git a/dev_tools/qualtran_dev_tools/execute_with_timeout.py b/dev_tools/qualtran_dev_tools/execute_with_timeout.py new file mode 100644 index 000000000..842d1bd75 --- /dev/null +++ b/dev_tools/qualtran_dev_tools/execute_with_timeout.py @@ -0,0 +1,123 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import multiprocessing.connection +import time +from typing import Any, Callable, Dict, List, Optional, Tuple + +from attrs import define + + +@define +class _Pending: + """Helper dataclass to track currently executing processes in `ExecuteWithTimeout`.""" + + p: multiprocessing.Process + recv: multiprocessing.connection.Connection + start_time: float + kwargs: Dict[str, Any] + + +class ExecuteWithTimeout: + """Execute tasks in processes where each task will be killed if it exceeds `timeout`. + + Seemingly all the existing "timeout" parameters in the various built-in concurrency + primitives in Python won't actually terminate the process. This one does. + """ + + def __init__(self, timeout: float, max_workers: int): + self.timeout = timeout + self.max_workers = max_workers + + self.queued: List[Tuple[Callable, Dict[str, Any]]] = [] + self.pending: List[_Pending] = [] + + @property + def work_to_be_done(self) -> int: + """The number of tasks currently executing or queued.""" + return len(self.queued) + len(self.pending) + + def submit(self, func: Callable, kwargs: Dict[str, Any]) -> None: + """Add a task to the queue. + + `func` must be a callable that can accept `kwargs` in addition to + a keyword argument `cxn` which is a multiprocessing `Connection` object that forms + the sending-half of a `mp.Pipe`. The callable must call `cxn.send(...)` + to return a result. + """ + self.queued.append((func, kwargs)) + + def _submit_from_queue(self): + # helper method that takes an item from the queue, launches a process, + # and records it in the `pending` attribute. This must only be called + # if we're allowed to spawn a new process. + func, kwargs = self.queued.pop(0) + recv, send = multiprocessing.Pipe(duplex=False) + kwargs['cxn'] = send + p = multiprocessing.Process(target=func, kwargs=kwargs) + start_time = time.time() + p.start() + self.pending.append(_Pending(p=p, recv=recv, start_time=start_time, kwargs=kwargs)) + + def _scan_pendings(self) -> Optional[_Pending]: + # helper method that goes through the currently pending tasks, terminates the ones + # that have been going on too long, and accounts for ones that have finished. + # Returns the `_Pending` of the killed or completed job or `None` if each pending + # task is still running but none have exceeded the timeout. + for i in range(len(self.pending)): + pen = self.pending[i] + + if not pen.p.is_alive(): + self.pending.pop(i) + pen.p.join() + return pen + + if time.time() - pen.start_time > self.timeout: + pen.p.terminate() + self.pending.pop(i) + return pen + + return None + + def next_result(self) -> Tuple[Dict[str, Any], Optional[Any]]: + """Get the next available result. + + This call is blocking, but should never take longer than `self.timeout`. This should + be called in a loop to make sure the queue continues to be processed. + + Returns: + task kwargs: The keyword arguments used to submit the task. + result: If the process finished successfully, this is the object that was + sent through the multiprocessing pipe as the result. Otherwise, the result + is None. + """ + while len(self.queued) > 0 and len(self.pending) < self.max_workers: + self._submit_from_queue() + + while True: + finished = self._scan_pendings() + if finished is not None: + break + + if finished.p.exitcode == 0: + result = finished.recv.recv() + else: + result = None + + finished.recv.close() + + while len(self.queued) > 0 and len(self.pending) < self.max_workers: + self._submit_from_queue() + + return (finished.kwargs, result) diff --git a/dev_tools/qualtran_dev_tools/execute_with_timeout_test.py b/dev_tools/qualtran_dev_tools/execute_with_timeout_test.py new file mode 100644 index 000000000..5df2c92f8 --- /dev/null +++ b/dev_tools/qualtran_dev_tools/execute_with_timeout_test.py @@ -0,0 +1,38 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +from .execute_with_timeout import ExecuteWithTimeout + + +def a_long_function(n_seconds: int, cxn) -> None: + time.sleep(n_seconds) + cxn.send("Done") + + +def test_execute_with_timeout(): + exe = ExecuteWithTimeout(timeout=1, max_workers=1) + + for ns in [0.1, 100]: + exe.submit(a_long_function, {'n_seconds': ns}) + + results = [] + while exe.work_to_be_done: + kwargs, result = exe.next_result() + if result is None: + results.append('Timeout') + else: + results.append(result) + + assert set(results) == {'Done', 'Timeout'} diff --git a/dev_tools/tensor-report-card.ipynb b/dev_tools/tensor-report-card.ipynb new file mode 100644 index 000000000..49d08b1f9 --- /dev/null +++ b/dev_tools/tensor-report-card.ipynb @@ -0,0 +1,262 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9a9335f1", + "metadata": {}, + "source": [ + "# Tensor Report Card\n", + "\n", + "Not all bloq examples support tensor simulation. This report card automatically determines which bloq examples should be tensor simulable.\n", + "\n", + " - State vector simulation uses $2^n$ numbers to simulate a quantum state. The tensor protocol uses quimb to try to find more efficient contraction orderings. Quimb reports the contraction width, which is the minimum size of any intermediate tensor encountered in the contraciton. The simulation uses $2^w$ numbers, where $w$ is the width. We consider a width under 25 qubits as simulable.\n", + " - Qualtran requires \"flattening\" out the bloq to turn it into an efficient tensor network. This may take too much time itself for large algorithms with many levels of abstraction. If the process of turning a bloq into a quimb tensor network and finding a contraction ordering takes longer than 8 seconds, we don't consider the bloq simulable.\n", + " - The flattened structure needs to have explicit tensors. For bloqs with symbolic parameters, we either can't decompose & flatten them, or the tensors would be symbolic, which we don't support." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d2bf2bc-1b55-4f68-b0f4-68f62dd68bae", + "metadata": {}, + "outputs": [], + "source": [ + "from qualtran_dev_tools.bloq_finder import get_bloq_examples\n", + "from qualtran_dev_tools.tensor_report_card import report_on_tensors, ExecuteWithTimeout" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86168342-d740-446a-bc88-06eaec8ae3a8", + "metadata": {}, + "outputs": [], + "source": [ + "bes = get_bloq_examples()\n", + "\n", + "# Imports to exclude certain bloqs, see following comment\n", + "from qualtran.bloqs.multiplexers.apply_gate_to_lth_target import ApplyGateToLthQubit" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51781740-6417-4b3b-b5d8-86f51340a016", + "metadata": {}, + "outputs": [], + "source": [ + "exec = ExecuteWithTimeout(timeout=8., max_workers=4)\n", + "for i, be in enumerate(bes):\n", + "\n", + " if be.bloq_cls == ApplyGateToLthQubit:\n", + " # This bloq uses a lambda function as one of its attributes, which\n", + " # can't be pickled and used with multiprocessing.\n", + " continue\n", + " \n", + " exec.submit(report_on_tensors, kwargs=dict(name=be.name, cls_name=be.bloq_cls.__name__, bloq=be.make()))\n", + "\n", + "records = []\n", + "while exec.work_to_be_done:\n", + " kwargs, record = exec.next_result()\n", + " #print(kwargs['name'], end=' ', flush=True)\n", + " print('\\r', f'{exec.work_to_be_done:5d} remaining', end='', flush=True)\n", + " \n", + " if record is None:\n", + " records.append({\n", + " 'name': kwargs['name'],\n", + " 'cls': kwargs['cls_name'],\n", + " 'err': 'Timeout',\n", + " })\n", + " else:\n", + " records.append(record)\n", + "\n", + "import pandas as pd\n", + "df = pd.DataFrame(records)" + ] + }, + { + "cell_type": "markdown", + "id": "393a5aad-7a78-4167-8dd5-407d8a0b3241", + "metadata": {}, + "source": [ + "## Number of bloq examples considered" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d674962-27d7-40d1-9916-f0aba3457934", + "metadata": {}, + "outputs": [], + "source": [ + "print(len(df))" + ] + }, + { + "cell_type": "markdown", + "id": "6a59804e-afc6-4227-b74f-cbf82f14b773", + "metadata": {}, + "source": [ + "## Number of bloq examples successfully flattened" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b048464-083a-4e04-8135-176f449450ca", + "metadata": {}, + "outputs": [], + "source": [ + "print(len(df[df['flat_dur'] > 0]))" + ] + }, + { + "cell_type": "markdown", + "id": "a4194f99-1551-4a5e-b93d-fb043cc56f9e", + "metadata": {}, + "source": [ + "## Number of bloq examples with tensors" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cdfa4e3-e1db-4869-832c-63695a3fd3f4", + "metadata": {}, + "outputs": [], + "source": [ + "print(len(df[df['width'] > 0]))" + ] + }, + { + "cell_type": "markdown", + "id": "05bffa4b-6bbe-4901-b1de-aba1abebe552", + "metadata": {}, + "source": [ + "## Bloqs that are tensor simulable" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "578fd2b5-9f7e-4edc-81b2-a8aad989be68", + "metadata": {}, + "outputs": [], + "source": [ + "print(len(df[df['width'] <= 25]))\n", + "df[df['width'] <= 25]" + ] + }, + { + "cell_type": "markdown", + "id": "4ef49322-cffb-4ff9-8cb1-6e5a87749492", + "metadata": {}, + "source": [ + "## Bloqs whose tensor network is too big" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d529d144-b3c5-45e5-ae14-f6d3fb86f153", + "metadata": {}, + "outputs": [], + "source": [ + "df[df['width'] > 25].sort_values(by='width')" + ] + }, + { + "cell_type": "markdown", + "id": "41141f6f-4377-4cee-9ca7-d2981c94d2e4", + "metadata": {}, + "source": [ + "## Bloqs without tensors\n", + "\n", + "Due to errors encountered in flattening or if the bloq's callees don't support tensor simulation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3386091-491a-404f-b13d-72b401d2c267", + "metadata": {}, + "outputs": [], + "source": [ + "df[df['width'].isna()]" + ] + }, + { + "cell_type": "markdown", + "id": "46a79e7f-0715-4007-8c11-d959f8f2255b", + "metadata": {}, + "source": [ + "## Slowest to flatten\n", + "\n", + "Within the overall timeout" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df522d3f-4392-46ea-851e-321d0fdfd52f", + "metadata": {}, + "outputs": [], + "source": [ + "df.sort_values(by='flat_dur', ascending=False).head()" + ] + }, + { + "cell_type": "markdown", + "id": "48f419ef-2723-42ea-8670-81a729164cf7", + "metadata": {}, + "source": [ + "## Flattening is the rate-limiting step.\n", + "\n", + "For bloqs that have been successfully flattened, the maximum tensor-network-construction and tensor-contraction-ordering durations are less than 0.5s. Note: the contraction finding code uses the fast, naive approach. One can choose more expensive approaches where the contraciton-ordering-finding is more expensive." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eae32349-ad93-4072-9704-044ef390e59e", + "metadata": {}, + "outputs": [], + "source": [ + "# Slowest tn_dur\n", + "df.sort_values(by='tn_dur', ascending=False).head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9249f813-1d42-47f4-bd1a-f198b79298f6", + "metadata": {}, + "outputs": [], + "source": [ + "# Slowest width_dur\n", + "df.sort_values(by='width_dur', ascending=False).head()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/qualtran/_infra/bloq.py b/qualtran/_infra/bloq.py index 5fda9d70c..e9d48121f 100644 --- a/qualtran/_infra/bloq.py +++ b/qualtran/_infra/bloq.py @@ -52,9 +52,14 @@ def _decompose_from_build_composite_bloq(bloq: 'Bloq') -> 'CompositeBloq': from qualtran import BloqBuilder - bb, initial_soqs = BloqBuilder.from_signature(bloq.signature, add_registers_allowed=False) - out_soqs = bloq.build_composite_bloq(bb=bb, **initial_soqs) - return bb.finalize(**out_soqs) + try: + bb, initial_soqs = BloqBuilder.from_signature(bloq.signature, add_registers_allowed=False) + out_soqs = bloq.build_composite_bloq(bb=bb, **initial_soqs) + return bb.finalize(**out_soqs) + except (DecomposeTypeError, DecomposeNotImplementedError) as ex: + raise ex + except Exception as ex: + raise RuntimeError(f"Unexpected error when decomposing {bloq}: {ex}") from ex class DecomposeNotImplementedError(NotImplementedError): diff --git a/qualtran/bloqs/arithmetic/comparison.py b/qualtran/bloqs/arithmetic/comparison.py index ff4986262..290649b32 100644 --- a/qualtran/bloqs/arithmetic/comparison.py +++ b/qualtran/bloqs/arithmetic/comparison.py @@ -1080,6 +1080,8 @@ def build_composite_bloq( a = bb.add(SignExtend(self.dtype, QInt(self.dtype.bitsize + 1)), x=a) b = bb.add(SignExtend(self.dtype, QInt(self.dtype.bitsize + 1)), x=b) else: + if self.dtype.is_symbolic(): + raise DecomposeTypeError(f"Cannot decompose symbolic {self}") a = bb.join(np.concatenate([[bb.allocate(1)], bb.split(a)])) b = bb.join(np.concatenate([[bb.allocate(1)], bb.split(b)])) diff --git a/qualtran/bloqs/arithmetic/controlled_addition.py b/qualtran/bloqs/arithmetic/controlled_addition.py index cfe4bcc5d..da75a1e6e 100644 --- a/qualtran/bloqs/arithmetic/controlled_addition.py +++ b/qualtran/bloqs/arithmetic/controlled_addition.py @@ -23,6 +23,7 @@ bloq_example, BloqBuilder, BloqDocSpec, + DecomposeTypeError, QBit, QInt, QUInt, @@ -134,6 +135,9 @@ def wire_symbol(self, soq: 'Soquet') -> 'WireSymbol': def build_composite_bloq( self, bb: 'BloqBuilder', ctrl: 'Soquet', a: 'Soquet', b: 'Soquet' ) -> Dict[str, 'SoquetT']: + if self.a_dtype.is_symbolic() or self.b_dtype.is_symbolic(): + raise DecomposeTypeError(f"Cannot support symbolic {self}") + a_arr = bb.split(a) ctrl_q = bb.split(ctrl)[0] ancilla_arr = [] diff --git a/qualtran/bloqs/data_loading/select_swap_qrom.py b/qualtran/bloqs/data_loading/select_swap_qrom.py index 52952719e..c98e45c25 100644 --- a/qualtran/bloqs/data_loading/select_swap_qrom.py +++ b/qualtran/bloqs/data_loading/select_swap_qrom.py @@ -22,7 +22,15 @@ import sympy from numpy.typing import ArrayLike -from qualtran import bloq_example, BloqDocSpec, BQUInt, GateWithRegisters, Register, Signature +from qualtran import ( + bloq_example, + BloqDocSpec, + BQUInt, + DecomposeTypeError, + GateWithRegisters, + Register, + Signature, +) from qualtran.bloqs.arithmetic.bitwise import Xor from qualtran.bloqs.bookkeeping import Partition from qualtran.bloqs.data_loading.qrom import QROM @@ -394,7 +402,7 @@ def build_composite_bloq(self, bb: 'BloqBuilder', **soqs: 'SoquetT') -> Dict[str target = [soqs.pop(reg.name) for reg in self.target_registers] # Allocate intermediate clean/dirty ancilla for the underlying QROM call. if is_symbolic(*self.block_sizes): - raise ValueError( + raise DecomposeTypeError( f"Cannot decompose SelectSwapQROM bloq with symbolic block sizes. Found {self.block_sizes=}" ) block_sizes = cast(Tuple[int, ...], self.block_sizes) diff --git a/qualtran/bloqs/factoring/mod_exp.py b/qualtran/bloqs/factoring/mod_exp.py index cabc6e0bd..7e2ab73c9 100644 --- a/qualtran/bloqs/factoring/mod_exp.py +++ b/qualtran/bloqs/factoring/mod_exp.py @@ -145,7 +145,7 @@ def _generalize_k(b: Bloq) -> Optional[Bloq]: @bloq_example(generalizer=(ignore_split_join, _generalize_k)) def _modexp_small() -> ModExp: - modexp_small = ModExp(base=3, mod=15, exp_bitsize=3, x_bitsize=2048) + modexp_small = ModExp(base=4, mod=15, exp_bitsize=3, x_bitsize=2048) return modexp_small diff --git a/qualtran/bloqs/mcmt/and_bloq.py b/qualtran/bloqs/mcmt/and_bloq.py index 29f8d4188..0d8cc24cc 100644 --- a/qualtran/bloqs/mcmt/and_bloq.py +++ b/qualtran/bloqs/mcmt/and_bloq.py @@ -386,6 +386,8 @@ def decompose_from_registers( yield self._decompose_via_tree(control, self.concrete_cvs, ancilla, *target) def decompose_bloq(self) -> 'CompositeBloq': + if is_symbolic(self.cvs): + raise DecomposeTypeError(f"Cannot decompose symbolic {self}.") return decompose_from_cirq_style_method(self) def wire_symbol(self, reg: Optional[Register], idx: Tuple[int, ...] = tuple()) -> 'WireSymbol': diff --git a/qualtran/bloqs/mod_arithmetic/mod_addition.py b/qualtran/bloqs/mod_arithmetic/mod_addition.py index 31f66a847..4d4d31638 100644 --- a/qualtran/bloqs/mod_arithmetic/mod_addition.py +++ b/qualtran/bloqs/mod_arithmetic/mod_addition.py @@ -23,6 +23,7 @@ Bloq, bloq_example, BloqDocSpec, + DecomposeTypeError, GateWithRegisters, QBit, QMontgomeryUInt, @@ -89,7 +90,7 @@ def on_classical_vals( def build_composite_bloq(self, bb: 'BloqBuilder', x: Soquet, y: Soquet) -> Dict[str, 'SoquetT']: if is_symbolic(self.bitsize): - raise NotImplementedError(f'symbolic decomposition is not supported for {self}') + raise DecomposeTypeError(f'Symbolic decomposition is not supported for {self}') # Allocate ancilla bits for use in addition. junk_bit = bb.allocate(n=1) sign = bb.allocate(n=1) @@ -390,6 +391,9 @@ def on_classical_vals( def build_composite_bloq( self, bb: 'BloqBuilder', ctrl, x: Soquet, y: Soquet ) -> Dict[str, 'SoquetT']: + if self.dtype.is_symbolic(): + raise DecomposeTypeError(f"Cannot decompose symbolic {self}") + y_arr = bb.split(y) ancilla = bb.allocate(1) x = bb.add(Cast(self.dtype, QUInt(self.dtype.bitsize)), reg=x) diff --git a/qualtran/cirq_interop/_cirq_to_bloq.py b/qualtran/cirq_interop/_cirq_to_bloq.py index 447227a6b..3ffc18773 100644 --- a/qualtran/cirq_interop/_cirq_to_bloq.py +++ b/qualtran/cirq_interop/_cirq_to_bloq.py @@ -602,8 +602,12 @@ def decompose_from_registers( return cirq_optree_to_cbloq( decomposed_optree, signature=bloq.signature, in_quregs=in_quregs, out_quregs=out_quregs ) + except (DecomposeNotImplementedError, DecomposeTypeError) as exc: + raise exc except ValueError as exc: if "Only gate operations are supported" in str(exc): raise DecomposeNotImplementedError(str(exc)) from exc else: - raise exc + raise RuntimeError(f"Unexpected error when decomposing {bloq}: {exc}") from exc + except Exception as exc: + raise RuntimeError(f"Unexpected error when decomposing {bloq}: {exc}") from exc