diff --git a/include/cudaq/Optimizer/CodeGen/Pipelines.h b/include/cudaq/Optimizer/CodeGen/Pipelines.h index afb39831ba..08ef770c1b 100644 --- a/include/cudaq/Optimizer/CodeGen/Pipelines.h +++ b/include/cudaq/Optimizer/CodeGen/Pipelines.h @@ -30,35 +30,18 @@ void commonPipelineConvertToQIR(mlir::PassManager &pm, mlir::StringRef codeGenFor = "qir", mlir::StringRef passConfigAs = "qir"); -/// \deprecated{Only for Python, since it can't use the new QIR codegen.} -void commonPipelineConvertToQIR_PythonWorkaround( - mlir::PassManager &pm, const std::optional &convertTo); - /// \brief Pipeline builder to convert Quake to QIR. /// Does not specify a particular QIR profile. inline void addPipelineConvertToQIR(mlir::PassManager &pm) { commonPipelineConvertToQIR(pm); } -/// \deprecated{Only for Python, since it can't use the new QIR codegen.} -inline void addPipelineConvertToQIR_PythonWorkaround(mlir::PassManager &pm) { - commonPipelineConvertToQIR_PythonWorkaround(pm, std::nullopt); -} - /// \brief Pipeline builder to convert Quake to QIR. /// Specifies a particular QIR profile in \p convertTo. /// \p pm Pass manager to append passes to /// \p convertTo name of QIR profile (e.g., `qir-base`, `qir-adaptive`, ...) void addPipelineConvertToQIR(mlir::PassManager &pm, mlir::StringRef convertTo); -/// \deprecated{Only for Python, since it can't use the new QIR codegen.} -inline void -addPipelineConvertToQIR_PythonWorkaround(mlir::PassManager &pm, - mlir::StringRef convertTo) { - commonPipelineConvertToQIR_PythonWorkaround(pm, convertTo); - addQIRProfilePipeline(pm, convertTo); -} - void addLowerToCCPipeline(mlir::OpPassManager &pm); void addPipelineTranslateToOpenQASM(mlir::PassManager &pm); diff --git a/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp b/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp index bc79b54899..fb48d7c753 100644 --- a/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp +++ b/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp @@ -1092,7 +1092,7 @@ struct QuantumGatePattern : public OpConversionPattern { // Process the controls, sorting them by type. for (auto pr : llvm::zip(op.getControls(), adaptor.getControls())) { - if (isa(std::get<0>(pr).getType())) { + if (isaVeqArgument(std::get<0>(pr).getType())) { numArrayCtrls++; auto sizeCall = rewriter.create( loc, i64Ty, cudaq::opt::QIRArrayGetSize, @@ -1155,6 +1155,18 @@ struct QuantumGatePattern : public OpConversionPattern { return forwardOrEraseOp(); } + static bool isaVeqArgument(Type ty) { + // TODO: Need a way to identify arrays when using the opaque pointer + // variant. (In Python, the arguments may already be converted.) + auto alreadyConverted = [](Type ty) { + if (auto ptrTy = dyn_cast(ty)) + if (auto strTy = dyn_cast(ptrTy.getElementType())) + return strTy.isIdentified() && strTy.getName() == "Array"; + return false; + }; + return isa(ty) || alreadyConverted(ty); + } + static bool conformsToIntendedCall(std::size_t numControls, Value ctrl, OP op, StringRef qirFunctionName) { if (numControls != 1) @@ -1819,9 +1831,7 @@ struct QuakeToQIRAPIPrepPass } void guaranteeMzIsLabeled(quake::MzOp mz, int &counter, OpBuilder &builder) { - if (mz.getRegisterNameAttr() && - /* FIXME: issue 2538: the name should never be empty. */ - !mz.getRegisterNameAttr().getValue().empty()) { + if (mz.getRegisterNameAttr()) { mz->setAttr(cudaq::opt::MzAssignedNameAttrName, builder.getUnitAttr()); return; } diff --git a/lib/Optimizer/CodeGen/Pipelines.cpp b/lib/Optimizer/CodeGen/Pipelines.cpp index 393dad5c65..596d7adcf3 100644 --- a/lib/Optimizer/CodeGen/Pipelines.cpp +++ b/lib/Optimizer/CodeGen/Pipelines.cpp @@ -51,37 +51,6 @@ void cudaq::opt::commonPipelineConvertToQIR(PassManager &pm, pm.addPass(createCCToLLVM()); } -void cudaq::opt::commonPipelineConvertToQIR_PythonWorkaround( - PassManager &pm, const std::optional &convertTo) { - pm.addNestedPass(createApplyControlNegations()); - addAggressiveEarlyInlining(pm); - pm.addNestedPass(createCanonicalizerPass()); - pm.addNestedPass(createUnwindLoweringPass()); - pm.addNestedPass(createCanonicalizerPass()); - pm.addPass(createApplyOpSpecializationPass()); - pm.addNestedPass(createExpandMeasurementsPass()); - pm.addNestedPass(createClassicalMemToReg()); - pm.addNestedPass(createCanonicalizerPass()); - pm.addNestedPass(createCSEPass()); - pm.addNestedPass(createQuakeAddDeallocs()); - pm.addNestedPass(createQuakeAddMetadata()); - pm.addNestedPass(createLoopNormalize()); - LoopUnrollOptions luo; - luo.allowBreak = convertTo && (*convertTo == "qir-adaptive"); - pm.addNestedPass(createLoopUnroll(luo)); - pm.addNestedPass(createCanonicalizerPass()); - pm.addNestedPass(createCSEPass()); - pm.addNestedPass(createLowerToCFGPass()); - pm.addNestedPass(createCombineQuantumAllocations()); - pm.addNestedPass(createCanonicalizerPass()); - pm.addNestedPass(createCSEPass()); - if (convertTo && (*convertTo == "qir-base")) - pm.addNestedPass(createDelayMeasurementsPass()); - pm.addPass(createConvertMathToFuncs()); - pm.addPass(createSymbolDCEPass()); - pm.addPass(createConvertToQIR()); -} - void cudaq::opt::addPipelineTranslateToOpenQASM(PassManager &pm) { pm.addNestedPass(createCanonicalizerPass()); pm.addNestedPass(createCSEPass()); diff --git a/lib/Optimizer/Dialect/Quake/QuakeOps.cpp b/lib/Optimizer/Dialect/Quake/QuakeOps.cpp index ad35594fbb..b5697b789b 100644 --- a/lib/Optimizer/Dialect/Quake/QuakeOps.cpp +++ b/lib/Optimizer/Dialect/Quake/QuakeOps.cpp @@ -517,38 +517,41 @@ void quake::WrapOp::getCanonicalizationPatterns(RewritePatternSet &patterns, //===----------------------------------------------------------------------===// // Common verification for measurement operations. -static LogicalResult verifyMeasurements(Operation *const op, - TypeRange targetsType, - const Type bitsType) { +template +LogicalResult verifyMeasurements(MEAS op, TypeRange targetsType, + const Type bitsType) { if (failed(verifyWireResultsAreLinear(op))) return failure(); bool mustBeStdvec = targetsType.size() > 1 || (targetsType.size() == 1 && isa(targetsType[0])); if (mustBeStdvec) { - if (!isa(op->getResult(0).getType())) - return op->emitOpError("must return `!cc.stdvec`, when " - "measuring a qreg, a series of qubits, or both"); + if (!isa(op.getMeasOut().getType())) + return op.emitOpError("must return `!cc.stdvec`, when " + "measuring a qreg, a series of qubits, or both"); } else { - if (!isa(op->getResult(0).getType())) + if (!isa(op.getMeasOut().getType())) return op->emitOpError( "must return `!quake.measure` when measuring exactly one qubit"); } + if (op.getRegisterName()) + if (op.getRegisterName()->empty()) + return op->emitError("quake measurement name cannot be empty."); return success(); } LogicalResult quake::MxOp::verify() { - return verifyMeasurements(getOperation(), getTargets().getType(), + return verifyMeasurements(*this, getTargets().getType(), getMeasOut().getType()); } LogicalResult quake::MyOp::verify() { - return verifyMeasurements(getOperation(), getTargets().getType(), + return verifyMeasurements(*this, getTargets().getType(), getMeasOut().getType()); } LogicalResult quake::MzOp::verify() { - return verifyMeasurements(getOperation(), getTargets().getType(), + return verifyMeasurements(*this, getTargets().getType(), getMeasOut().getType()); } diff --git a/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp b/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp index b9728ed067..84dd992f3d 100644 --- a/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp +++ b/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp @@ -87,6 +87,23 @@ convertArrayAttrToGlobalConstant(MLIRContext *ctx, Location loc, } namespace { + +// This pattern replaces a cc.const_array with a global constant. It can +// recognize a couple of usage patterns and will generate efficient IR in those +// cases. +// +// Pattern 1: The entire constant array is stored to a stack variable(s). Here +// we can eliminate the stack allocation and use the global constant. +// +// Pattern 2: Individual elements at dynamic offsets are extracted from the +// constant array and used. This can be replaced with a compute pointer +// operation using the global constant and a load of the element at the computed +// offset. +// +// Default: If the usage is not recognized, the constant array value is replaced +// with a load of the entire global variable. In this case, LLVM's optimizations +// are counted on to help demote the (large?) sequence value to primitive memory +// address arithmetic. struct ConstantArrayPattern : public OpRewritePattern { explicit ConstantArrayPattern(MLIRContext *ctx, ModuleOp module, @@ -95,21 +112,30 @@ struct ConstantArrayPattern LogicalResult matchAndRewrite(cudaq::cc::ConstantArrayOp conarr, PatternRewriter &rewriter) const override { + auto func = conarr->getParentOfType(); + if (!func) + return failure(); + SmallVector allocas; SmallVector stores; + SmallVector extracts; + bool loadAsValue = false; for (auto *usr : conarr->getUsers()) { auto store = dyn_cast(usr); - if (!store) - return failure(); - auto alloca = store.getPtrvalue().getDefiningOp(); - if (!alloca) - return failure(); - stores.push_back(store); - allocas.push_back(alloca); + auto extract = dyn_cast(usr); + if (store) { + auto alloca = store.getPtrvalue().getDefiningOp(); + if (alloca) { + stores.push_back(store); + allocas.push_back(alloca); + continue; + } + } else if (extract) { + extracts.push_back(extract); + continue; + } + loadAsValue = true; } - auto func = conarr->getParentOfType(); - if (!func) - return failure(); std::string globalName = func.getName().str() + ".rodata_" + std::to_string(counter++); auto *ctx = rewriter.getContext(); @@ -118,12 +144,39 @@ struct ConstantArrayPattern if (failed(convertArrayAttrToGlobalConstant(ctx, conarr.getLoc(), valueAttr, module, globalName, eleTy))) return failure(); - for (auto alloca : allocas) - rewriter.replaceOpWithNewOp( - alloca, alloca.getType(), globalName); - for (auto store : stores) - rewriter.eraseOp(store); - rewriter.eraseOp(conarr); + auto loc = conarr.getLoc(); + if (!extracts.empty()) { + auto base = rewriter.create( + loc, cudaq::cc::PointerType::get(conarr.getType()), globalName); + auto elePtrTy = cudaq::cc::PointerType::get(eleTy); + for (auto extract : extracts) { + SmallVector args; + unsigned i = 0; + for (auto arg : extract.getRawConstantIndices()) { + if (arg == cudaq::cc::ExtractValueOp::getDynamicIndexValue()) + args.push_back(extract.getDynamicIndices()[i++]); + else + args.push_back(arg); + } + OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(extract); + auto addrVal = + rewriter.create(loc, elePtrTy, base, args); + rewriter.replaceOpWithNewOp(extract, addrVal); + } + } + if (!stores.empty()) { + for (auto alloca : allocas) + rewriter.replaceOpWithNewOp( + alloca, alloca.getType(), globalName); + for (auto store : stores) + rewriter.eraseOp(store); + } + if (loadAsValue) { + auto base = rewriter.create( + loc, cudaq::cc::PointerType::get(conarr.getType()), globalName); + rewriter.replaceOpWithNewOp(conarr, base); + } return success(); } diff --git a/python/cudaq/kernel/ast_bridge.py b/python/cudaq/kernel/ast_bridge.py index 06f77857c7..3463b4a1e0 100644 --- a/python/cudaq/kernel/ast_bridge.py +++ b/python/cudaq/kernel/ast_bridge.py @@ -1749,9 +1749,11 @@ def bodyBuilder(iterVal): self.ctx) if len(qubits) == 1 and quake.RefType.isinstance( qubits[0].type) else cc.StdvecType.get( self.ctx, quake.MeasureType.get(self.ctx)) - measureResult = opCtor(measTy, [], - qubits, - registerName=registerName).result + label = registerName + if not label: + label = None + measureResult = opCtor(measTy, [], qubits, + registerName=label).result if pushResultToStack: self.pushValue( quake.DiscriminateOp(resTy, measureResult).result) @@ -3152,6 +3154,73 @@ def bodyBuilder(iterVar): isDecrementing=isDecrementing) return + # We can simplify `for i,j in enumerate(L)` MLIR code immensely + # by just building a for loop over the iterable object L and using + # the index into that iterable and the element. + if isinstance(node.iter, ast.Call): + if node.iter.func.id == 'enumerate': + [self.visit(arg) for arg in node.iter.args] + if len(self.valueStack) == 2: + iterable = self.popValue() + self.popValue() + else: + assert len(self.valueStack) == 1 + iterable = self.popValue() + iterable = self.ifPointerThenLoad(iterable) + totalSize = None + extractFunctor = None + varNames = [] + for elt in node.target.elts: + varNames.append(elt.id) + + beEfficient = False + if quake.VeqType.isinstance(iterable.type): + totalSize = quake.VeqSizeOp(self.getIntegerType(), + iterable).result + + def functor(seq, idx): + q = quake.ExtractRefOp(self.getRefType(), + seq, + -1, + index=idx).result + return [idx, q] + + extractFunctor = functor + beEfficient = True + elif cc.StdvecType.isinstance(iterable.type): + totalSize = cc.StdvecSizeOp(self.getIntegerType(), + iterable).result + + def functor(seq, idx): + vecTy = cc.StdvecType.getElementType(seq.type) + dataTy = cc.PointerType.get(self.ctx, vecTy) + arrTy = vecTy + if not cc.ArrayType.isinstance(arrTy): + arrTy = cc.ArrayType.get(self.ctx, vecTy) + dataArrTy = cc.PointerType.get(self.ctx, arrTy) + data = cc.StdvecDataOp(dataArrTy, seq).result + v = cc.ComputePtrOp( + dataTy, data, [idx], + DenseI32ArrayAttr.get([kDynamicPtrIndex], + context=self.ctx)).result + return [idx, v] + + extractFunctor = functor + beEfficient = True + + if beEfficient: + + def bodyBuilder(iterVar): + self.symbolTable.pushScope() + values = extractFunctor(iterable, iterVar) + for i, v in enumerate(values): + self.symbolTable[varNames[i]] = v + [self.visit(b) for b in node.body] + self.symbolTable.popScope() + + self.createInvariantForLoop(totalSize, bodyBuilder) + return + self.visit(node.iter) assert len(self.valueStack) > 0 and len(self.valueStack) < 3 diff --git a/python/cudaq/kernel/kernel_builder.py b/python/cudaq/kernel/kernel_builder.py index f77b130830..ebba9803e0 100644 --- a/python/cudaq/kernel/kernel_builder.py +++ b/python/cudaq/kernel/kernel_builder.py @@ -1076,10 +1076,12 @@ def mz(self, target, regName=None): if quake.VeqType.isinstance(target.mlirValue.type): retTy = stdvecTy measTy = cc.StdvecType.get(self.ctx, measTy) - res = quake.MzOp( - measTy, [], [target.mlirValue], - registerName=StringAttr.get(regName, context=self.ctx) - if regName is not None else '') + if regName is not None: + res = quake.MzOp(measTy, [], [target.mlirValue], + registerName=StringAttr.get(regName, + context=self.ctx)) + else: + res = quake.MzOp(measTy, [], [target.mlirValue]) disc = quake.DiscriminateOp(retTy, res) return self.__createQuakeValue(disc.result) @@ -1120,10 +1122,12 @@ def mx(self, target, regName=None): if quake.VeqType.isinstance(target.mlirValue.type): retTy = stdvecTy measTy = cc.StdvecType.get(self.ctx, measTy) - res = quake.MxOp( - measTy, [], [target.mlirValue], - registerName=StringAttr.get(regName, context=self.ctx) - if regName is not None else '') + if regName is not None: + res = quake.MxOp(measTy, [], [target.mlirValue], + registerName=StringAttr.get(regName, + context=self.ctx)) + else: + res = quake.MxOp(measTy, [], [target.mlirValue]) disc = quake.DiscriminateOp(retTy, res) return self.__createQuakeValue(disc.result) @@ -1165,10 +1169,12 @@ def my(self, target, regName=None): if quake.VeqType.isinstance(target.mlirValue.type): retTy = stdvecTy measTy = cc.StdvecType.get(self.ctx, measTy) - res = quake.MyOp( - measTy, [], [target.mlirValue], - registerName=StringAttr.get(regName, context=self.ctx) - if regName is not None else '') + if regName is not None: + res = quake.MyOp(measTy, [], [target.mlirValue], + registerName=StringAttr.get(regName, + context=self.ctx)) + else: + res = quake.MyOp(measTy, [], [target.mlirValue]) disc = quake.DiscriminateOp(retTy, res) return self.__createQuakeValue(disc.result) diff --git a/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp b/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp index 9a6e48203f..3232e26b11 100644 --- a/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp +++ b/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp @@ -104,7 +104,7 @@ jitAndCreateArgs(const std::string &name, MlirModule module, {.startingArgIdx = startingArgIdx})); pm.addPass(cudaq::opt::createLambdaLiftingPass()); pm.addPass(createSymbolDCEPass()); - cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm); + cudaq::opt::addPipelineConvertToQIR(pm); DefaultTimingManager tm; tm.setEnabled(cudaq::isTimingTagEnabled(cudaq::TIMING_JIT_PASSES)); @@ -596,9 +596,9 @@ std::string getQIR(const std::string &name, MlirModule module, PassManager pm(context); pm.addPass(cudaq::opt::createLambdaLiftingPass()); if (profile.empty()) - cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm); + cudaq::opt::addPipelineConvertToQIR(pm); else - cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm, profile); + cudaq::opt::addPipelineConvertToQIR(pm, profile); DefaultTimingManager tm; tm.setEnabled(cudaq::isTimingTagEnabled(cudaq::TIMING_JIT_PASSES)); auto timingScope = tm.getRootScope(); // starts the timer diff --git a/python/tests/mlir/adjoint.py b/python/tests/mlir/adjoint.py index 34a9e775f5..8a492ef9b7 100644 --- a/python/tests/mlir/adjoint.py +++ b/python/tests/mlir/adjoint.py @@ -241,7 +241,7 @@ def test_sample_adjoint_qubit(): # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> () # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!quake.ref) -> () -# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] : (!quake.ref) -> !quake.measure # CHECK: return # CHECK: } @@ -302,7 +302,7 @@ def test_sample_adjoint_qreg(): # CHECK: } {invariant} # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_3]]) : (!quake.veq) -> () # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_3]] : (!quake.veq) -> () -# CHECK: %[[VAL_13:.*]] = quake.mz %0 name "" : (!quake.veq) -> !cc.stdvec +# CHECK: %[[VAL_13:.*]] = quake.mz %0 : (!quake.veq) -> !cc.stdvec # CHECK: return # CHECK: } diff --git a/python/tests/mlir/ast_elif.py b/python/tests/mlir/ast_elif.py index 45c028b69d..83f703685d 100644 --- a/python/tests/mlir/ast_elif.py +++ b/python/tests/mlir/ast_elif.py @@ -30,58 +30,80 @@ def cost(thetas: np.ndarray): # can pass 1D ndarray or list # CHECK-LABEL: func.func @__nvqpp__mlirgen__cost( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint", "cudaq-kernel"} { -# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 -# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 -# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 -# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 4 : i64 -# CHECK: %[[VAL_5:.*]] = quake.alloca !quake.veq<4> -# CHECK: %[[VAL_6:.*]] = cc.stdvec_size %[[VAL_0]] : (!cc.stdvec) -> i64 -# CHECK: %[[VAL_7:.*]] = cc.alloca !cc.struct<{i64, f64}>{{\[}}%[[VAL_6]] : i64] -# CHECK: %[[VAL_8:.*]] = cc.loop while ((%[[VAL_9:.*]] = %[[VAL_3]]) -> (i64)) { -# CHECK: %[[VAL_10:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_6]] : i64 -# CHECK: cc.condition %[[VAL_10]](%[[VAL_9]] : i64) +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint", "cudaq-kernel"} { +# CHECK: %[[VAL_1:.*]] = arith.constant 4 : i64 +# CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq{{\[}}%[[VAL_1]] : i64] +# CHECK: %[[VAL_3:.*]] = cc.stdvec_size %[[VAL_0]] : (!cc.stdvec) -> i64 +# CHECK: %[[VAL_4:.*]] = arith.constant 0 : i64 +# CHECK: %[[VAL_5:.*]] = arith.constant 1 : i64 +# CHECK: %[[VAL_6:.*]] = cc.loop while ((%[[VAL_7:.*]] = %[[VAL_4]]) -> (i64)) { +# CHECK: %[[VAL_8:.*]] = arith.cmpi slt, %[[VAL_7]], %[[VAL_3]] : i64 +# CHECK: cc.condition %[[VAL_8]](%[[VAL_7]] : i64) # CHECK: } do { -# CHECK: ^bb0(%[[VAL_11:.*]]: i64): -# CHECK: %[[VAL_12:.*]] = cc.undef !cc.struct<{i64, f64}> -# CHECK: %[[VAL_13:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> -# CHECK: %[[VAL_14:.*]] = cc.compute_ptr %[[VAL_13]][%[[VAL_11]]] : (!cc.ptr>, i64) -> !cc.ptr -# CHECK: %[[VAL_15:.*]] = cc.load %[[VAL_14]] : !cc.ptr -# CHECK: %[[VAL_16:.*]] = cc.compute_ptr %[[VAL_7]]{{\[}}%[[VAL_11]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_17:.*]] = cc.insert_value %[[VAL_12]][0], %[[VAL_11]] : (!cc.struct<{i64, f64}>, i64) -> !cc.struct<{i64, f64}> -# CHECK: %[[VAL_18:.*]] = cc.insert_value %[[VAL_17]][1], %[[VAL_15]] : (!cc.struct<{i64, f64}>, f64) -> !cc.struct<{i64, f64}> -# CHECK: cc.store %[[VAL_18]], %[[VAL_16]] : !cc.ptr> -# CHECK: cc.continue %[[VAL_11]] : i64 +# CHECK: ^bb0(%[[VAL_9:.*]]: i64): +# CHECK: %[[VAL_10:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_11:.*]] = cc.compute_ptr %[[VAL_10]]{{\[}}%[[VAL_9]]] : (!cc.ptr>, i64) -> !cc.ptr +# CHECK: %[[VAL_12:.*]] = arith.constant 2.000000e+00 : f64 +# CHECK: %[[VAL_13:.*]] = arith.fptosi %[[VAL_12]] : f64 to i64 +# CHECK: %[[VAL_14:.*]] = arith.remui %[[VAL_9]], %[[VAL_13]] : i64 +# CHECK: %[[VAL_15:.*]] = arith.constant 0 : i64 +# CHECK: %[[VAL_16:.*]] = arith.cmpi ne, %[[VAL_14]], %[[VAL_15]] : i64 +# CHECK: cc.if(%[[VAL_16]]) { +# CHECK: %[[VAL_17:.*]] = cc.load %[[VAL_11]] : !cc.ptr +# CHECK: %[[VAL_18:.*]] = arith.constant 4 : i64 +# CHECK: %[[VAL_19:.*]] = arith.remui %[[VAL_9]], %[[VAL_18]] : i64 +# CHECK: %[[VAL_20:.*]] = quake.extract_ref %[[VAL_2]]{{\[}}%[[VAL_19]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: quake.ry (%[[VAL_17]]) %[[VAL_20]] : (f64, !quake.ref) -> () +# CHECK: } else { +# CHECK: %[[VAL_21:.*]] = cc.load %[[VAL_11]] : !cc.ptr +# CHECK: %[[VAL_22:.*]] = arith.constant 4 : i64 +# CHECK: %[[VAL_23:.*]] = arith.remui %[[VAL_9]], %[[VAL_22]] : i64 +# CHECK: %[[VAL_24:.*]] = quake.extract_ref %[[VAL_2]]{{\[}}%[[VAL_23]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: quake.rx (%[[VAL_21]]) %[[VAL_24]] : (f64, !quake.ref) -> () +# CHECK: } +# CHECK: cc.continue %[[VAL_9]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_19:.*]]: i64): -# CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_19]], %[[VAL_2]] : i64 -# CHECK: cc.continue %[[VAL_20]] : i64 +# CHECK: ^bb0(%[[VAL_25:.*]]: i64): +# CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_25]], %[[VAL_5]] : i64 +# CHECK: cc.continue %[[VAL_26]] : i64 # CHECK: } {invariant} -# CHECK: %[[VAL_21:.*]] = cc.loop while ((%[[VAL_22:.*]] = %[[VAL_3]]) -> (i64)) { -# CHECK: %[[VAL_23:.*]] = arith.cmpi slt, %[[VAL_22]], %[[VAL_6]] : i64 -# CHECK: cc.condition %[[VAL_23]](%[[VAL_22]] : i64) +# CHECK: return +# CHECK: } + +# CHECK-LABEL: func.func @__nvqpp__mlirgen__cost( +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint", "cudaq-kernel"} { +# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 +# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 +# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 +# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 4 : i64 +# CHECK-DAG: %[[VAL_5:.*]] = quake.alloca !quake.veq<4> +# CHECK: %[[VAL_6:.*]] = cc.stdvec_size %[[VAL_0]] : (!cc.stdvec) -> i64 +# CHECK: %[[VAL_7:.*]] = cc.loop while ((%[[VAL_8:.*]] = %[[VAL_3]]) -> (i64)) { +# CHECK: %[[VAL_9:.*]] = arith.cmpi slt, %[[VAL_8]], %[[VAL_6]] : i64 +# CHECK: cc.condition %[[VAL_9]](%[[VAL_8]] : i64) # CHECK: } do { -# CHECK: ^bb0(%[[VAL_24:.*]]: i64): -# CHECK: %[[VAL_25:.*]] = cc.compute_ptr %[[VAL_7]]{{\[}}%[[VAL_24]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_26:.*]] = cc.load %[[VAL_25]] : !cc.ptr> -# CHECK: %[[VAL_27:.*]] = cc.extract_value %[[VAL_26]][0] : (!cc.struct<{i64, f64}>) -> i64 -# CHECK: %[[VAL_28:.*]] = cc.extract_value %[[VAL_26]][1] : (!cc.struct<{i64, f64}>) -> f64 -# CHECK: %[[VAL_29:.*]] = arith.remui %[[VAL_27]], %[[VAL_1]] : i64 -# CHECK: %[[VAL_30:.*]] = arith.cmpi ne, %[[VAL_29]], %[[VAL_3]] : i64 -# CHECK: cc.if(%[[VAL_30]]) { -# CHECK: %[[VAL_31:.*]] = arith.remui %[[VAL_27]], %[[VAL_4]] : i64 -# CHECK: %[[VAL_32:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_31]]] : (!quake.veq<4>, i64) -> !quake.ref -# CHECK: quake.ry (%[[VAL_28]]) %[[VAL_32]] : (f64, !quake.ref) -> () +# CHECK: ^bb0(%[[VAL_10:.*]]: i64): +# CHECK: %[[VAL_11:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_12:.*]] = cc.compute_ptr %[[VAL_11]]{{\[}}%[[VAL_10]]] : (!cc.ptr>, i64) -> !cc.ptr +# CHECK: %[[VAL_13:.*]] = arith.remui %[[VAL_10]], %[[VAL_1]] : i64 +# CHECK: %[[VAL_14:.*]] = arith.cmpi ne, %[[VAL_13]], %[[VAL_3]] : i64 +# CHECK: cc.if(%[[VAL_14]]) { +# CHECK: %[[VAL_15:.*]] = cc.load %[[VAL_12]] : !cc.ptr +# CHECK: %[[VAL_16:.*]] = arith.remui %[[VAL_10]], %[[VAL_4]] : i64 +# CHECK: %[[VAL_17:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_16]]] : (!quake.veq<4>, i64) -> !quake.ref +# CHECK: quake.ry (%[[VAL_15]]) %[[VAL_17]] : (f64, !quake.ref) -> () # CHECK: } else { -# CHECK: %[[VAL_33:.*]] = arith.remui %[[VAL_27]], %[[VAL_4]] : i64 -# CHECK: %[[VAL_34:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_33]]] : (!quake.veq<4>, i64) -> !quake.ref -# CHECK: quake.rx (%[[VAL_28]]) %[[VAL_34]] : (f64, !quake.ref) -> () +# CHECK: %[[VAL_18:.*]] = cc.load %[[VAL_12]] : !cc.ptr +# CHECK: %[[VAL_19:.*]] = arith.remui %[[VAL_10]], %[[VAL_4]] : i64 +# CHECK: %[[VAL_20:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_19]]] : (!quake.veq<4>, i64) -> !quake.ref +# CHECK: quake.rx (%[[VAL_18]]) %[[VAL_20]] : (f64, !quake.ref) -> () # CHECK: } -# CHECK: cc.continue %[[VAL_24]] : i64 +# CHECK: cc.continue %[[VAL_10]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_35:.*]]: i64): -# CHECK: %[[VAL_36:.*]] = arith.addi %[[VAL_35]], %[[VAL_2]] : i64 -# CHECK: cc.continue %[[VAL_36]] : i64 +# CHECK: ^bb0(%[[VAL_21:.*]]: i64): +# CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_21]], %[[VAL_2]] : i64 +# CHECK: cc.continue %[[VAL_22]] : i64 # CHECK: } {invariant} # CHECK: return # CHECK: } + diff --git a/python/tests/mlir/ast_list_init.py b/python/tests/mlir/ast_list_init.py index 5cf923a107..b676365b0d 100644 --- a/python/tests/mlir/ast_list_init.py +++ b/python/tests/mlir/ast_list_init.py @@ -34,56 +34,36 @@ def kernel(): # CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1.000000e+00 : f64 # CHECK-DAG: %[[VAL_7:.*]] = quake.alloca !quake.veq<6> # CHECK-DAG: %[[VAL_8:.*]] = cc.alloca !cc.array -# CHECK: %[[VAL_91:.*]] = cc.cast %[[VAL_8]] : (!cc.ptr>) -> !cc.ptr> -# CHECK: %[[VAL_9:.*]] = cc.cast %[[VAL_8]] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_6]], %[[VAL_9]] : !cc.ptr -# CHECK: %[[VAL_10:.*]] = cc.compute_ptr %[[VAL_8]][1] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_5]], %[[VAL_10]] : !cc.ptr -# CHECK: %[[VAL_11:.*]] = cc.compute_ptr %[[VAL_8]][2] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_4]], %[[VAL_11]] : !cc.ptr -# CHECK: %[[VAL_12:.*]] = cc.compute_ptr %[[VAL_8]][3] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_3]], %[[VAL_12]] : !cc.ptr -# CHECK: %[[VAL_13:.*]] = cc.stdvec_init %[[VAL_91]], %[[VAL_2]] : (!cc.ptr>, i64) -> !cc.stdvec -# CHECK: %[[VAL_14:.*]] = cc.alloca !cc.stdvec -# CHECK: cc.store %[[VAL_13]], %[[VAL_14]] : !cc.ptr> -# CHECK: %[[VAL_15:.*]] = cc.load %[[VAL_14]] : !cc.ptr> -# CHECK: %[[VAL_16:.*]] = cc.stdvec_size %[[VAL_15]] : (!cc.stdvec) -> i64 -# CHECK: %[[VAL_17:.*]] = cc.alloca !cc.struct<{i64, f64}>[%[[VAL_16]] : i64] +# CHECK: %[[VAL_9:.*]] = cc.cast %[[VAL_8]] : (!cc.ptr>) -> !cc.ptr> +# CHECK: %[[VAL_10:.*]] = cc.cast %[[VAL_8]] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_6]], %[[VAL_10]] : !cc.ptr +# CHECK: %[[VAL_11:.*]] = cc.compute_ptr %[[VAL_8]][1] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_5]], %[[VAL_11]] : !cc.ptr +# CHECK: %[[VAL_12:.*]] = cc.compute_ptr %[[VAL_8]][2] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_4]], %[[VAL_12]] : !cc.ptr +# CHECK: %[[VAL_13:.*]] = cc.compute_ptr %[[VAL_8]][3] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_3]], %[[VAL_13]] : !cc.ptr +# CHECK: %[[VAL_14:.*]] = cc.stdvec_init %[[VAL_9]], %[[VAL_2]] : (!cc.ptr>, i64) -> !cc.stdvec +# CHECK: %[[VAL_15:.*]] = cc.alloca !cc.stdvec +# CHECK: cc.store %[[VAL_14]], %[[VAL_15]] : !cc.ptr> +# CHECK: %[[VAL_16:.*]] = cc.load %[[VAL_15]] : !cc.ptr> +# CHECK: %[[VAL_17:.*]] = cc.stdvec_size %[[VAL_16]] : (!cc.stdvec) -> i64 # CHECK: %[[VAL_18:.*]] = cc.loop while ((%[[VAL_19:.*]] = %[[VAL_1]]) -> (i64)) { -# CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_19]], %[[VAL_16]] : i64 +# CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_19]], %[[VAL_17]] : i64 # CHECK: cc.condition %[[VAL_20]](%[[VAL_19]] : i64) # CHECK: } do { # CHECK: ^bb0(%[[VAL_21:.*]]: i64): -# CHECK: %[[VAL_22:.*]] = cc.undef !cc.struct<{i64, f64}> -# CHECK: %[[VAL_23:.*]] = cc.stdvec_data %[[VAL_15]] : (!cc.stdvec) -> !cc.ptr> -# CHECK: %[[VAL_24:.*]] = cc.compute_ptr %[[VAL_23]][%[[VAL_21]]] : (!cc.ptr>, i64) -> !cc.ptr -# CHECK: %[[VAL_25:.*]] = cc.load %[[VAL_24]] : !cc.ptr -# CHECK: %[[VAL_26:.*]] = cc.compute_ptr %[[VAL_17]][%[[VAL_21]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_27:.*]] = cc.insert_value %[[VAL_22]][0], %[[VAL_21]] : (!cc.struct<{i64, f64}>, i64) -> !cc.struct<{i64, f64}> -# CHECK: %[[VAL_28:.*]] = cc.insert_value %[[VAL_27]][1], %[[VAL_25]] : (!cc.struct<{i64, f64}>, f64) -> !cc.struct<{i64, f64}> -# CHECK: cc.store %[[VAL_28]], %[[VAL_26]] : !cc.ptr> +# CHECK: %[[VAL_22:.*]] = cc.stdvec_data %[[VAL_16]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_23:.*]] = cc.compute_ptr %[[VAL_22]]{{\[}}%[[VAL_21]]] : (!cc.ptr>, i64) -> !cc.ptr +# CHECK: %[[VAL_24:.*]] = cc.load %[[VAL_23]] : !cc.ptr +# CHECK: %[[VAL_25:.*]] = quake.extract_ref %[[VAL_7]]{{\[}}%[[VAL_21]]] : (!quake.veq<6>, i64) -> !quake.ref +# CHECK: quake.ry (%[[VAL_24]]) %[[VAL_25]] : (f64, !quake.ref) -> () # CHECK: cc.continue %[[VAL_21]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_29:.*]]: i64): -# CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_29]], %[[VAL_0]] : i64 -# CHECK: cc.continue %[[VAL_30]] : i64 -# CHECK: } {invariant} -# CHECK: %[[VAL_31:.*]] = cc.loop while ((%[[VAL_32:.*]] = %[[VAL_1]]) -> (i64)) { -# CHECK: %[[VAL_33:.*]] = arith.cmpi slt, %[[VAL_32]], %[[VAL_16]] : i64 -# CHECK: cc.condition %[[VAL_33]](%[[VAL_32]] : i64) -# CHECK: } do { -# CHECK: ^bb0(%[[VAL_34:.*]]: i64): -# CHECK: %[[VAL_35:.*]] = cc.compute_ptr %[[VAL_17]]{{\[}}%[[VAL_34]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_36:.*]] = cc.load %[[VAL_35]] : !cc.ptr> -# CHECK: %[[VAL_37:.*]] = cc.extract_value %[[VAL_36]][0] : (!cc.struct<{i64, f64}>) -> i64 -# CHECK: %[[VAL_38:.*]] = cc.extract_value %[[VAL_36]][1] : (!cc.struct<{i64, f64}>) -> f64 -# CHECK: %[[VAL_39:.*]] = quake.extract_ref %[[VAL_7]]{{\[}}%[[VAL_37]]] : (!quake.veq<6>, i64) -> !quake.ref -# CHECK: quake.ry (%[[VAL_38]]) %[[VAL_39]] : (f64, !quake.ref) -> () -# CHECK: cc.continue %[[VAL_34]] : i64 -# CHECK: } step { -# CHECK: ^bb0(%[[VAL_40:.*]]: i64): -# CHECK: %[[VAL_41:.*]] = arith.addi %[[VAL_40]], %[[VAL_0]] : i64 -# CHECK: cc.continue %[[VAL_41]] : i64 +# CHECK: ^bb0(%[[VAL_26:.*]]: i64): +# CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_26]], %[[VAL_0]] : i64 +# CHECK: cc.continue %[[VAL_27]] : i64 # CHECK: } {invariant} # CHECK: return # CHECK: } + diff --git a/python/tests/mlir/ast_list_int.py b/python/tests/mlir/ast_list_int.py index 752170b69e..fd3c371560 100644 --- a/python/tests/mlir/ast_list_int.py +++ b/python/tests/mlir/ast_list_int.py @@ -24,55 +24,34 @@ def oracle(register: cudaq.qview, auxillary_qubit: cudaq.qubit, print(oracle) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__oracle( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq, -# CHECK-SAME: %[[VAL_1:.*]]: !quake.ref, -# CHECK-SAME: %[[VAL_2:.*]]: !cc.stdvec) -# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 -# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : i64 +# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq, +# CHECK-SAME: %[[VAL_1:.*]]: !quake.ref, +# CHECK-SAME: %[[VAL_2:.*]]: !cc.stdvec) attributes {"cudaq-kernel"} { +# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 +# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_5:.*]] = cc.stdvec_size %[[VAL_2]] : (!cc.stdvec) -> i64 # CHECK: %[[VAL_6:.*]] = cc.alloca i64 # CHECK: cc.store %[[VAL_5]], %[[VAL_6]] : !cc.ptr -# CHECK: %[[VAL_7:.*]] = cc.alloca !cc.struct<{i64, i64}>{{\[}}%[[VAL_5]] : i64] -# CHECK: %[[VAL_8:.*]] = cc.loop while ((%[[VAL_9:.*]] = %[[VAL_4]]) -> (i64)) { -# CHECK: %[[VAL_10:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_5]] : i64 -# CHECK: cc.condition %[[VAL_10]](%[[VAL_9]] : i64) +# CHECK: %[[VAL_7:.*]] = cc.loop while ((%[[VAL_8:.*]] = %[[VAL_4]]) -> (i64)) { +# CHECK: %[[VAL_9:.*]] = arith.cmpi slt, %[[VAL_8]], %[[VAL_5]] : i64 +# CHECK: cc.condition %[[VAL_9]](%[[VAL_8]] : i64) # CHECK: } do { -# CHECK: ^bb0(%[[VAL_11:.*]]: i64): -# CHECK: %[[VAL_12:.*]] = cc.undef !cc.struct<{i64, i64}> -# CHECK: %[[VAL_13:.*]] = cc.stdvec_data %[[VAL_2]] : (!cc.stdvec) -> !cc.ptr> -# CHECK: %[[VAL_14:.*]] = cc.compute_ptr %[[VAL_13]][%[[VAL_11]]] : (!cc.ptr>, i64) -> !cc.ptr -# CHECK: %[[VAL_15:.*]] = cc.load %[[VAL_14]] : !cc.ptr -# CHECK: %[[VAL_16:.*]] = cc.compute_ptr %[[VAL_7]]{{\[}}%[[VAL_11]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_17:.*]] = cc.insert_value %[[VAL_12]][0], %[[VAL_11]] : (!cc.struct<{i64, i64}>, i64) -> !cc.struct<{i64, i64}> -# CHECK: %[[VAL_18:.*]] = cc.insert_value %[[VAL_17]][1], %[[VAL_15]] : (!cc.struct<{i64, i64}>, i64) -> !cc.struct<{i64, i64}> -# CHECK: cc.store %[[VAL_18]], %[[VAL_16]] : !cc.ptr> -# CHECK: cc.continue %[[VAL_11]] : i64 -# CHECK: } step { -# CHECK: ^bb0(%[[VAL_19:.*]]: i64): -# CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_19]], %[[VAL_3]] : i64 -# CHECK: cc.continue %[[VAL_20]] : i64 -# CHECK: } {invariant} -# CHECK: %[[VAL_21:.*]] = cc.loop while ((%[[VAL_22:.*]] = %[[VAL_4]]) -> (i64)) { -# CHECK: %[[VAL_23:.*]] = arith.cmpi slt, %[[VAL_22]], %[[VAL_5]] : i64 -# CHECK: cc.condition %[[VAL_23]](%[[VAL_22]] : i64) -# CHECK: } do { -# CHECK: ^bb0(%[[VAL_24:.*]]: i64): -# CHECK: %[[VAL_25:.*]] = cc.compute_ptr %[[VAL_7]]{{\[}}%[[VAL_24]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_26:.*]] = cc.load %[[VAL_25]] : !cc.ptr> -# CHECK: %[[VAL_27:.*]] = cc.extract_value %[[VAL_26]][0] : (!cc.struct<{i64, i64}>) -> i64 -# CHECK: %[[VAL_28:.*]] = cc.extract_value %[[VAL_26]][1] : (!cc.struct<{i64, i64}>) -> i64 -# CHECK: %[[VAL_29:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_3]] : i64 -# CHECK: cc.if(%[[VAL_29]]) { -# CHECK: %[[VAL_30:.*]] = quake.extract_ref %[[VAL_0]]{{\[}}%[[VAL_27]]] : (!quake.veq, i64) -> !quake.ref -# CHECK: quake.x {{\[}}%[[VAL_30]]] %[[VAL_1]] : (!quake.ref, !quake.ref) -> () +# CHECK: ^bb0(%[[VAL_10:.*]]: i64): +# CHECK: %[[VAL_11:.*]] = cc.stdvec_data %[[VAL_2]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_12:.*]] = cc.compute_ptr %[[VAL_11]]{{\[}}%[[VAL_10]]] : (!cc.ptr>, i64) -> !cc.ptr +# CHECK: %[[VAL_13:.*]] = cc.load %[[VAL_12]] : !cc.ptr +# CHECK: %[[VAL_14:.*]] = arith.cmpi eq, %[[VAL_13]], %[[VAL_3]] : i64 +# CHECK: cc.if(%[[VAL_14]]) { +# CHECK: %[[VAL_15:.*]] = quake.extract_ref %[[VAL_0]]{{\[}}%[[VAL_10]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: quake.x {{\[}}%[[VAL_15]]] %[[VAL_1]] : (!quake.ref, !quake.ref) -> () # CHECK: } -# CHECK: cc.continue %[[VAL_24]] : i64 +# CHECK: cc.continue %[[VAL_10]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_31:.*]]: i64): -# CHECK: %[[VAL_32:.*]] = arith.addi %[[VAL_31]], %[[VAL_3]] : i64 -# CHECK: cc.continue %[[VAL_32]] : i64 +# CHECK: ^bb0(%[[VAL_16:.*]]: i64): +# CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_16]], %[[VAL_3]] : i64 +# CHECK: cc.continue %[[VAL_17]] : i64 # CHECK: } {invariant} # CHECK: return # CHECK: } + diff --git a/python/tests/mlir/ast_qreg_slice.py b/python/tests/mlir/ast_qreg_slice.py index 9de9e5dbe7..04dbbcb583 100644 --- a/python/tests/mlir/ast_qreg_slice.py +++ b/python/tests/mlir/ast_qreg_slice.py @@ -49,16 +49,15 @@ def slice(): print(slice) slice() - # CHECK-LABEL: func.func @__nvqpp__mlirgen__slice() attributes {"cudaq-entrypoint", "cudaq-kernel"} { -# CHECK-DAG: %[[VAL_0:.*]] = arith.constant 3 : i64 -# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 4 : i64 -# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 -# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 -# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3.1415926535897931 : f64 -# CHECK-DAG: %[[VAL_5:.*]] = arith.constant 5 : i64 -# CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : i64 -# CHECK: %[[VAL_7:.*]] = quake.alloca !quake.veq<4> +# CHECK-DAG: %[[VAL_0:.*]] = arith.constant 3 : i64 +# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 4 : i64 +# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 +# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 +# CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3.1415926535897931 : f64 +# CHECK-DAG: %[[VAL_5:.*]] = arith.constant 5 : i64 +# CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : i64 +# CHECK-DAG: %[[VAL_7:.*]] = quake.alloca !quake.veq<4> # CHECK: %[[VAL_8:.*]] = quake.subveq %[[VAL_7]], 2, 3 : (!quake.veq<4>) -> !quake.veq<2> # CHECK: %[[VAL_9:.*]] = quake.extract_ref %[[VAL_8]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.x %[[VAL_9]] : (!quake.ref) -> () @@ -75,68 +74,48 @@ def slice(): # CHECK: %[[VAL_16:.*]] = quake.extract_ref %[[VAL_14]][1] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.z %[[VAL_16]] : (!quake.ref) -> () # CHECK: %[[VAL_17:.*]] = cc.alloca !cc.array -# CHECK: %[[VAL_172:.*]] = cc.cast %[[VAL_17]] : (!cc.ptr>) -> !cc.ptr> -# CHECK: %[[VAL_18:.*]] = cc.cast %[[VAL_17]] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_3]], %[[VAL_18]] : !cc.ptr -# CHECK: %[[VAL_19:.*]] = cc.compute_ptr %[[VAL_17]][1] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_2]], %[[VAL_19]] : !cc.ptr -# CHECK: %[[VAL_20:.*]] = cc.compute_ptr %[[VAL_17]][2] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_0]], %[[VAL_20]] : !cc.ptr -# CHECK: %[[VAL_21:.*]] = cc.compute_ptr %[[VAL_17]][3] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_1]], %[[VAL_21]] : !cc.ptr -# CHECK: %[[VAL_22:.*]] = cc.compute_ptr %[[VAL_17]][4] : (!cc.ptr>) -> !cc.ptr -# CHECK: cc.store %[[VAL_5]], %[[VAL_22]] : !cc.ptr -# CHECK: %[[VAL_23:.*]] = cc.stdvec_init %[[VAL_172]], %[[VAL_5]] : (!cc.ptr>, i64) -> !cc.stdvec -# CHECK: %[[VAL_24:.*]] = cc.alloca !cc.stdvec -# CHECK: cc.store %[[VAL_23]], %[[VAL_24]] : !cc.ptr> -# CHECK: %[[VAL_25:.*]] = cc.load %[[VAL_24]] : !cc.ptr> -# CHECK: %[[VAL_26:.*]] = cc.stdvec_data %[[VAL_25]] : (!cc.stdvec) -> !cc.ptr> -# CHECK: %[[VAL_27:.*]] = cc.compute_ptr %[[VAL_26]][2] : (!cc.ptr>) -> !cc.ptr -# CHECK: %[[VAL_28:.*]] = cc.stdvec_init %[[VAL_27]], %[[VAL_2]] : (!cc.ptr, i64) -> !cc.stdvec -# CHECK: %[[VAL_29:.*]] = cc.alloca !cc.stdvec -# CHECK: cc.store %[[VAL_28]], %[[VAL_29]] : !cc.ptr> -# CHECK: %[[VAL_30:.*]] = cc.load %[[VAL_29]] : !cc.ptr> -# CHECK: %[[VAL_31:.*]] = cc.stdvec_size %[[VAL_30]] : (!cc.stdvec) -> i64 -# CHECK: %[[VAL_32:.*]] = cc.alloca !cc.struct<{i64, i64}>{{\[}}%[[VAL_31]] : i64] +# CHECK: %[[VAL_18:.*]] = cc.cast %[[VAL_17]] : (!cc.ptr>) -> !cc.ptr> +# CHECK: %[[VAL_19:.*]] = cc.cast %[[VAL_17]] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_3]], %[[VAL_19]] : !cc.ptr +# CHECK: %[[VAL_20:.*]] = cc.compute_ptr %[[VAL_17]][1] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_2]], %[[VAL_20]] : !cc.ptr +# CHECK: %[[VAL_21:.*]] = cc.compute_ptr %[[VAL_17]][2] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_0]], %[[VAL_21]] : !cc.ptr +# CHECK: %[[VAL_22:.*]] = cc.compute_ptr %[[VAL_17]][3] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_1]], %[[VAL_22]] : !cc.ptr +# CHECK: %[[VAL_23:.*]] = cc.compute_ptr %[[VAL_17]][4] : (!cc.ptr>) -> !cc.ptr +# CHECK: cc.store %[[VAL_5]], %[[VAL_23]] : !cc.ptr +# CHECK: %[[VAL_24:.*]] = cc.stdvec_init %[[VAL_18]], %[[VAL_5]] : (!cc.ptr>, i64) -> !cc.stdvec +# CHECK: %[[VAL_25:.*]] = cc.alloca !cc.stdvec +# CHECK: cc.store %[[VAL_24]], %[[VAL_25]] : !cc.ptr> +# CHECK: %[[VAL_26:.*]] = cc.load %[[VAL_25]] : !cc.ptr> +# CHECK: %[[VAL_27:.*]] = cc.stdvec_data %[[VAL_26]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_28:.*]] = cc.compute_ptr %[[VAL_27]][2] : (!cc.ptr>) -> !cc.ptr +# CHECK: %[[VAL_29:.*]] = cc.stdvec_init %[[VAL_28]], %[[VAL_2]] : (!cc.ptr, i64) -> !cc.stdvec +# CHECK: %[[VAL_30:.*]] = cc.alloca !cc.stdvec +# CHECK: cc.store %[[VAL_29]], %[[VAL_30]] : !cc.ptr> +# CHECK: %[[VAL_31:.*]] = cc.load %[[VAL_30]] : !cc.ptr> +# CHECK: %[[VAL_32:.*]] = cc.stdvec_size %[[VAL_31]] : (!cc.stdvec) -> i64 # CHECK: %[[VAL_33:.*]] = cc.loop while ((%[[VAL_34:.*]] = %[[VAL_6]]) -> (i64)) { -# CHECK: %[[VAL_35:.*]] = arith.cmpi slt, %[[VAL_34]], %[[VAL_31]] : i64 +# CHECK: %[[VAL_35:.*]] = arith.cmpi slt, %[[VAL_34]], %[[VAL_32]] : i64 # CHECK: cc.condition %[[VAL_35]](%[[VAL_34]] : i64) # CHECK: } do { # CHECK: ^bb0(%[[VAL_36:.*]]: i64): -# CHECK: %[[VAL_37:.*]] = cc.undef !cc.struct<{i64, i64}> -# CHECK: %[[VAL_38:.*]] = cc.stdvec_data %[[VAL_30]] : (!cc.stdvec) -> !cc.ptr> -# CHECK: %[[VAL_39:.*]] = cc.compute_ptr %[[VAL_38]][%[[VAL_36]]] : (!cc.ptr>, i64) -> !cc.ptr -# CHECK: %[[VAL_40:.*]] = cc.load %[[VAL_39]] : !cc.ptr -# CHECK: %[[VAL_41:.*]] = cc.compute_ptr %[[VAL_32]]{{\[}}%[[VAL_36]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_42:.*]] = cc.insert_value %[[VAL_37]][0], %[[VAL_36]] : (!cc.struct<{i64, i64}>, i64) -> !cc.struct<{i64, i64}> -# CHECK: %[[VAL_43:.*]] = cc.insert_value %[[VAL_42]][1], %[[VAL_40]] : (!cc.struct<{i64, i64}>, i64) -> !cc.struct<{i64, i64}> -# CHECK: cc.store %[[VAL_43]], %[[VAL_41]] : !cc.ptr> +# CHECK: %[[VAL_37:.*]] = cc.stdvec_data %[[VAL_31]] : (!cc.stdvec) -> !cc.ptr> +# CHECK: %[[VAL_38:.*]] = cc.compute_ptr %[[VAL_37]]{{\[}}%[[VAL_36]]] : (!cc.ptr>, i64) -> !cc.ptr +# CHECK: %[[VAL_39:.*]] = cc.load %[[VAL_38]] : !cc.ptr +# CHECK: %[[VAL_40:.*]] = arith.remui %[[VAL_36]], %[[VAL_1]] : i64 +# CHECK: %[[VAL_41:.*]] = quake.extract_ref %[[VAL_7]]{{\[}}%[[VAL_40]]] : (!quake.veq<4>, i64) -> !quake.ref +# CHECK: %[[VAL_42:.*]] = arith.sitofp %[[VAL_39]] : i64 to f64 +# CHECK: quake.ry (%[[VAL_42]]) %[[VAL_41]] : (f64, !quake.ref) -> () # CHECK: cc.continue %[[VAL_36]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_44:.*]]: i64): -# CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_44]], %[[VAL_3]] : i64 -# CHECK: cc.continue %[[VAL_45]] : i64 +# CHECK: ^bb0(%[[VAL_43:.*]]: i64): +# CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_43]], %[[VAL_3]] : i64 +# CHECK: cc.continue %[[VAL_44]] : i64 # CHECK: } {invariant} -# CHECK: %[[VAL_46:.*]] = cc.loop while ((%[[VAL_47:.*]] = %[[VAL_6]]) -> (i64)) { -# CHECK: %[[VAL_48:.*]] = arith.cmpi slt, %[[VAL_47]], %[[VAL_31]] : i64 -# CHECK: cc.condition %[[VAL_48]](%[[VAL_47]] : i64) -# CHECK: } do { -# CHECK: ^bb0(%[[VAL_49:.*]]: i64): -# CHECK: %[[VAL_50:.*]] = cc.compute_ptr %[[VAL_32]]{{\[}}%[[VAL_49]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_51:.*]] = cc.load %[[VAL_50]] : !cc.ptr> -# CHECK: %[[VAL_52:.*]] = cc.extract_value %[[VAL_51]][0] : (!cc.struct<{i64, i64}>) -> i64 -# CHECK: %[[VAL_53:.*]] = cc.extract_value %[[VAL_51]][1] : (!cc.struct<{i64, i64}>) -> i64 -# CHECK: %[[VAL_54:.*]] = arith.remui %[[VAL_52]], %[[VAL_1]] : i64 -# CHECK: %[[VAL_55:.*]] = quake.extract_ref %[[VAL_7]]{{\[}}%[[VAL_54]]] : (!quake.veq<4>, i64) -> !quake.ref -# CHECK: %[[VAL_56:.*]] = arith.sitofp %[[VAL_53]] : i64 to f64 -# CHECK: quake.ry (%[[VAL_56]]) %[[VAL_55]] : (f64, !quake.ref) -> () -# CHECK: cc.continue %[[VAL_49]] : i64 -# CHECK: } step { -# CHECK: ^bb0(%[[VAL_57:.*]]: i64): -# CHECK: %[[VAL_58:.*]] = arith.addi %[[VAL_57]], %[[VAL_3]] : i64 -# CHECK: cc.continue %[[VAL_58]] : i64 -# CHECK: } {invariant} -# CHECK: %[[VAL_59:.*]] = quake.extract_ref %[[VAL_7]][3] : (!quake.veq<4>) -> !quake.ref -# CHECK: quake.rz (%[[VAL_4]]) %[[VAL_59]] : (f64, !quake.ref) -> () +# CHECK: %[[VAL_45:.*]] = quake.extract_ref %[[VAL_7]][3] : (!quake.veq<4>) -> !quake.ref +# CHECK: quake.rz (%[[VAL_4]]) %[[VAL_45]] : (f64, !quake.ref) -> () # CHECK: return # CHECK: } + diff --git a/python/tests/mlir/conditional.py b/python/tests/mlir/conditional.py index d2d46e2934..1578c759c9 100644 --- a/python/tests/mlir/conditional.py +++ b/python/tests/mlir/conditional.py @@ -59,7 +59,7 @@ def test_function(): # CHECK: %[[VAL_9:.*]] = quake.discriminate %[[VAL_8]] : (!quake.measure) -> i1 # CHECK: cc.if(%[[VAL_9]]) { # CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> () -# CHECK: %[[VAL_10:.*]] = quake.mz %[[VAL_7]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_10:.*]] = quake.mz %[[VAL_7]] : (!quake.ref) -> !quake.measure # CHECK: } # CHECK: %[[VAL_11:.*]] = cc.loop while ((%[[VAL_12:.*]] = %[[VAL_2]]) -> (i64)) { # CHECK: %[[VAL_13:.*]] = arith.cmpi slt, %[[VAL_12]], %[[VAL_0]] : i64 @@ -76,7 +76,7 @@ def test_function(): # CHECK: } {invariant} # CHECK: cc.if(%[[VAL_9]]) { # CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> () -# CHECK: %[[VAL_18:.*]] = quake.mz %[[VAL_7]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_18:.*]] = quake.mz %[[VAL_7]] : (!quake.ref) -> !quake.measure # CHECK: } # CHECK: return # CHECK: } @@ -117,7 +117,7 @@ def then_function(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () -# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] : (!quake.ref) -> !quake.measure # CHECK: %[[VAL_2:.*]] = quake.discriminate %[[VAL_1]] : (!quake.measure) -> i1 # CHECK: cc.if(%[[VAL_2]]) { # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () diff --git a/python/tests/mlir/control.py b/python/tests/mlir/control.py index feace59c9f..68e4bb2310 100644 --- a/python/tests/mlir/control.py +++ b/python/tests/mlir/control.py @@ -288,7 +288,7 @@ def test_sample_control_qubit_args(): # CHECK: quake.h %{{[0-2]}} : (!quake.ref) -> () # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}[%{{[0-2]}}] %{{[0-2]}} : (!quake.ref, !quake.ref) -> () # CHECK: quake.h %{{[0-2]}} : (!quake.ref) -> () -# CHECK: %[[VAL_2:.*]] = quake.mz %{{[0-2]}} name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_2:.*]] = quake.mz %{{[0-2]}} : (!quake.ref) -> !quake.measure # CHECK: return # CHECK: } @@ -345,7 +345,7 @@ def test_sample_control_qreg_args(): # CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> () # CHECK: quake.x %[[VAL_6]] : (!quake.ref) -> () # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}[%[[VAL_5]]] %[[VAL_6]] : (!quake.veq<2>, !quake.ref) -> () -# CHECK: %[[VAL_19:.*]] = quake.mz %[[VAL_6]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_19:.*]] = quake.mz %[[VAL_6]] : (!quake.ref) -> !quake.measure # CHECK: return # CHECK: } @@ -403,7 +403,7 @@ def test_sample_apply_call_control(): # CHECK: quake.h %{{[0-2]}} : (!quake.ref) -> () # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} [%{{[0-2]}}] %{{[0-2]}} : (!quake.ref, !quake.ref) -> () # CHECK: quake.h %{{[0-2]}} : (!quake.ref) -> () -# CHECK: %[[VAL_2:.*]] = quake.mz %{{[0-2]}} name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_2:.*]] = quake.mz %{{[0-2]}} : (!quake.ref) -> !quake.measure # CHECK: return # CHECK: } diff --git a/python/tests/mlir/ghz.py b/python/tests/mlir/ghz.py index 28cf62a1da..8064bb5913 100644 --- a/python/tests/mlir/ghz.py +++ b/python/tests/mlir/ghz.py @@ -22,35 +22,35 @@ def ghz(N: int): print(ghz) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__ghz( - # CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { - # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 - # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 - # CHECK: %[[VAL_3:.*]] = cc.alloca i64 - # CHECK: cc.store %[[VAL_0]], %[[VAL_3]] : !cc.ptr - # CHECK: %[[VAL_4:.*]] = cc.load %[[VAL_3]] : !cc.ptr - # CHECK: %[[VAL_5:.*]] = quake.alloca !quake.veq{{\[}}%[[VAL_4]] : i64] - # CHECK: %[[VAL_6:.*]] = quake.extract_ref %[[VAL_5]][0] : (!quake.veq) -> !quake.ref - # CHECK: quake.h %[[VAL_6]] : (!quake.ref) -> () - # CHECK: %[[VAL_7:.*]] = cc.load %[[VAL_3]] : !cc.ptr - # CHECK: %[[VAL_8:.*]] = arith.subi %[[VAL_7]], %[[VAL_1]] : i64 - # CHECK: %[[VAL_9:.*]] = cc.loop while ((%[[VAL_10:.*]] = %[[VAL_2]]) -> (i64)) { - # CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_10]], %[[VAL_8]] : i64 - # CHECK: cc.condition %[[VAL_11]](%[[VAL_10]] : i64) - # CHECK: } do { - # CHECK: ^bb0(%[[VAL_12:.*]]: i64): - # CHECK: %[[VAL_13:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_12]]] : (!quake.veq, i64) -> !quake.ref - # CHECK: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_1]] : i64 - # CHECK: %[[VAL_15:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_14]]] : (!quake.veq, i64) -> !quake.ref - # CHECK: quake.x {{\[}}%[[VAL_13]]] %[[VAL_15]] : (!quake.ref, !quake.ref) -> () - # CHECK: cc.continue %[[VAL_12]] : i64 - # CHECK: } step { - # CHECK: ^bb0(%[[VAL_16:.*]]: i64): - # CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_16]], %[[VAL_1]] : i64 - # CHECK: cc.continue %[[VAL_17]] : i64 - # CHECK: } {invariant} - # CHECK: return - # CHECK: } +# CHECK-LABEL: func.func @__nvqpp__mlirgen__ghz( +# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { +# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 +# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 +# CHECK-DAG: %[[VAL_3:.*]] = cc.alloca i64 +# CHECK: cc.store %[[VAL_0]], %[[VAL_3]] : !cc.ptr +# CHECK: %[[VAL_4:.*]] = cc.load %[[VAL_3]] : !cc.ptr +# CHECK: %[[VAL_5:.*]] = quake.alloca !quake.veq{{\[}}%[[VAL_4]] : i64] +# CHECK: %[[VAL_6:.*]] = quake.extract_ref %[[VAL_5]][0] : (!quake.veq) -> !quake.ref +# CHECK: quake.h %[[VAL_6]] : (!quake.ref) -> () +# CHECK: %[[VAL_7:.*]] = cc.load %[[VAL_3]] : !cc.ptr +# CHECK: %[[VAL_8:.*]] = arith.subi %[[VAL_7]], %[[VAL_1]] : i64 +# CHECK: %[[VAL_9:.*]] = cc.loop while ((%[[VAL_10:.*]] = %[[VAL_2]]) -> (i64)) { +# CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_10]], %[[VAL_8]] : i64 +# CHECK: cc.condition %[[VAL_11]](%[[VAL_10]] : i64) +# CHECK: } do { +# CHECK: ^bb0(%[[VAL_12:.*]]: i64): +# CHECK: %[[VAL_13:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_12]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_1]] : i64 +# CHECK: %[[VAL_15:.*]] = quake.extract_ref %[[VAL_5]]{{\[}}%[[VAL_14]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: quake.x {{\[}}%[[VAL_13]]] %[[VAL_15]] : (!quake.ref, !quake.ref) -> () +# CHECK: cc.continue %[[VAL_12]] : i64 +# CHECK: } step { +# CHECK: ^bb0(%[[VAL_16:.*]]: i64): +# CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_16]], %[[VAL_1]] : i64 +# CHECK: cc.continue %[[VAL_17]] : i64 +# CHECK: } {invariant} +# CHECK: return +# CHECK: } @cudaq.kernel def simple(numQubits: int): @@ -61,13 +61,12 @@ def simple(numQubits: int): print(simple) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__simple( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { -# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 -# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 -# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 -# CHECK: %[[VAL_4:.*]] = cc.alloca i64 +# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { +# CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 +# CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 +# CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 +# CHECK-DAG: %[[VAL_4:.*]] = cc.alloca i64 # CHECK: cc.store %[[VAL_0]], %[[VAL_4]] : !cc.ptr # CHECK: %[[VAL_5:.*]] = cc.load %[[VAL_4]] : !cc.ptr # CHECK: %[[VAL_6:.*]] = quake.alloca !quake.veq{{\[}}%[[VAL_5]] : i64] @@ -77,41 +76,20 @@ def simple(numQubits: int): # CHECK: %[[VAL_9:.*]] = arith.subi %[[VAL_8]], %[[VAL_1]] : i64 # CHECK: %[[VAL_10:.*]] = quake.subveq %[[VAL_6]], 0, %[[VAL_9]] : (!quake.veq, i64) -> !quake.veq # CHECK: %[[VAL_11:.*]] = quake.veq_size %[[VAL_10]] : (!quake.veq) -> i64 -# CHECK: %[[VAL_12:.*]] = cc.alloca !cc.struct<{i64, !quake.ref}>{{\[}}%[[VAL_11]] : i64] -# CHECK: %[[VAL_13:.*]] = cc.loop while ((%[[VAL_14:.*]] = %[[VAL_3]]) -> (i64)) { -# CHECK: %[[VAL_15:.*]] = arith.cmpi slt, %[[VAL_14]], %[[VAL_11]] : i64 -# CHECK: cc.condition %[[VAL_15]](%[[VAL_14]] : i64) -# CHECK: } do { -# CHECK: ^bb0(%[[VAL_16:.*]]: i64): -# CHECK: %[[VAL_17:.*]] = cc.undef !cc.struct<{i64, !quake.ref}> -# CHECK: %[[VAL_18:.*]] = quake.extract_ref %[[VAL_10]]{{\[}}%[[VAL_16]]] : (!quake.veq, i64) -> !quake.ref -# CHECK: %[[VAL_19:.*]] = cc.compute_ptr %[[VAL_12]]{{\[}}%[[VAL_16]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_20:.*]] = cc.insert_value %[[VAL_17]][0], %[[VAL_16]] : (!cc.struct<{i64, !quake.ref}>, i64) -> !cc.struct<{i64, !quake.ref}> -# CHECK: %[[VAL_21:.*]] = cc.insert_value %[[VAL_20]][1], %[[VAL_18]] : (!cc.struct<{i64, !quake.ref}>, !quake.ref) -> !cc.struct<{i64, !quake.ref}> -# CHECK: cc.store %[[VAL_21]], %[[VAL_19]] : !cc.ptr> -# CHECK: cc.continue %[[VAL_16]] : i64 -# CHECK: } step { -# CHECK: ^bb0(%[[VAL_22:.*]]: i64): -# CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_22]], %[[VAL_2]] : i64 -# CHECK: cc.continue %[[VAL_23]] : i64 -# CHECK: } {invariant} -# CHECK: %[[VAL_24:.*]] = cc.loop while ((%[[VAL_25:.*]] = %[[VAL_3]]) -> (i64)) { -# CHECK: %[[VAL_26:.*]] = arith.cmpi slt, %[[VAL_25]], %[[VAL_11]] : i64 -# CHECK: cc.condition %[[VAL_26]](%[[VAL_25]] : i64) +# CHECK: %[[VAL_12:.*]] = cc.loop while ((%[[VAL_13:.*]] = %[[VAL_3]]) -> (i64)) { +# CHECK: %[[VAL_14:.*]] = arith.cmpi slt, %[[VAL_13]], %[[VAL_11]] : i64 +# CHECK: cc.condition %[[VAL_14]](%[[VAL_13]] : i64) # CHECK: } do { -# CHECK: ^bb0(%[[VAL_27:.*]]: i64): -# CHECK: %[[VAL_28:.*]] = cc.compute_ptr %[[VAL_12]]{{\[}}%[[VAL_27]]] : (!cc.ptr x ?>>, i64) -> !cc.ptr> -# CHECK: %[[VAL_29:.*]] = cc.load %[[VAL_28]] : !cc.ptr> -# CHECK: %[[VAL_30:.*]] = cc.extract_value %[[VAL_29]][0] : (!cc.struct<{i64, !quake.ref}>) -> i64 -# CHECK: %[[VAL_31:.*]] = cc.extract_value %[[VAL_29]][1] : (!cc.struct<{i64, !quake.ref}>) -> !quake.ref -# CHECK: %[[VAL_32:.*]] = arith.addi %[[VAL_30]], %[[VAL_2]] : i64 -# CHECK: %[[VAL_33:.*]] = quake.extract_ref %[[VAL_6]]{{\[}}%[[VAL_32]]] : (!quake.veq, i64) -> !quake.ref -# CHECK: quake.x {{\[}}%[[VAL_31]]] %[[VAL_33]] : (!quake.ref, !quake.ref) -> () -# CHECK: cc.continue %[[VAL_27]] : i64 +# CHECK: ^bb0(%[[VAL_15:.*]]: i64): +# CHECK: %[[VAL_16:.*]] = quake.extract_ref %[[VAL_10]]{{\[}}%[[VAL_15]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_15]], %[[VAL_2]] : i64 +# CHECK: %[[VAL_18:.*]] = quake.extract_ref %[[VAL_6]]{{\[}}%[[VAL_17]]] : (!quake.veq, i64) -> !quake.ref +# CHECK: quake.x {{\[}}%[[VAL_16]]] %[[VAL_18]] : (!quake.ref, !quake.ref) -> () +# CHECK: cc.continue %[[VAL_15]] : i64 # CHECK: } step { -# CHECK: ^bb0(%[[VAL_34:.*]]: i64): -# CHECK: %[[VAL_35:.*]] = arith.addi %[[VAL_34]], %[[VAL_2]] : i64 -# CHECK: cc.continue %[[VAL_35]] : i64 +# CHECK: ^bb0(%[[VAL_19:.*]]: i64): +# CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_19]], %[[VAL_2]] : i64 +# CHECK: cc.continue %[[VAL_20]] : i64 # CHECK: } {invariant} # CHECK: return # CHECK: } diff --git a/python/tests/mlir/measure.py b/python/tests/mlir/measure.py index c1bb76b332..3dc2a8c4bb 100644 --- a/python/tests/mlir/measure.py +++ b/python/tests/mlir/measure.py @@ -41,12 +41,12 @@ def test_kernel_measure_1q(): # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_2]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_2]][1] : (!quake.veq<2>) -> !quake.ref -# CHECK: %[[VAL_5:.*]] = quake.mx %[[VAL_3]] name "" : (!quake.ref) -> !quake.measure -# CHECK: %[[VAL_6:.*]] = quake.mx %[[VAL_4]] name "" : (!quake.ref) -> !quake.measure -# CHECK: %[[VAL_7:.*]] = quake.my %[[VAL_3]] name "" : (!quake.ref) -> !quake.measure -# CHECK: %[[VAL_8:.*]] = quake.my %[[VAL_4]] name "" : (!quake.ref) -> !quake.measure -# CHECK: %[[VAL_9:.*]] = quake.mz %[[VAL_3]] name "" : (!quake.ref) -> !quake.measure -# CHECK: %[[VAL_10:.*]] = quake.mz %[[VAL_4]] name "" : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_5:.*]] = quake.mx %[[VAL_3]] : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_6:.*]] = quake.mx %[[VAL_4]] : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_7:.*]] = quake.my %[[VAL_3]] : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_8:.*]] = quake.my %[[VAL_4]] : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_9:.*]] = quake.mz %[[VAL_3]] : (!quake.ref) -> !quake.measure +# CHECK: %[[VAL_10:.*]] = quake.mz %[[VAL_4]] : (!quake.ref) -> !quake.measure # CHECK: return # CHECK: } @@ -70,9 +70,9 @@ def test_kernel_measure_qreg(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> -# CHECK: %[[VAL_1:.*]] = quake.mx %[[VAL_0]] name "" : (!quake.veq<3>) -> !cc.stdvec -# CHECK: %[[VAL_2:.*]] = quake.my %[[VAL_0]] name "" : (!quake.veq<3>) -> !cc.stdvec -# CHECK: %[[VAL_3:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.veq<3>) -> !cc.stdvec +# CHECK: %[[VAL_1:.*]] = quake.mx %[[VAL_0]] : (!quake.veq<3>) -> !cc.stdvec +# CHECK: %[[VAL_2:.*]] = quake.my %[[VAL_0]] : (!quake.veq<3>) -> !cc.stdvec +# CHECK: %[[VAL_3:.*]] = quake.mz %[[VAL_0]] : (!quake.veq<3>) -> !cc.stdvec # CHECK: return # CHECK: } diff --git a/python/tests/mlir/swap.py b/python/tests/mlir/swap.py index aeae75340b..20f27daae1 100644 --- a/python/tests/mlir/swap.py +++ b/python/tests/mlir/swap.py @@ -39,7 +39,7 @@ def test_swap_2q(): # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][1] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> () # CHECK: quake.swap %[[VAL_1]], %[[VAL_2]] : (!quake.ref, !quake.ref) -> () -# CHECK: %[[VAL_3:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.veq<2>) -> !cc.stdvec +# CHECK: %[[VAL_3:.*]] = quake.mz %[[VAL_0]] : (!quake.veq<2>) -> !cc.stdvec # CHECK: return # CHECK: } diff --git a/python/tests/mlir/test_output_qir.py b/python/tests/mlir/test_output_qir.py index 224308de1f..f054c97db7 100644 --- a/python/tests/mlir/test_output_qir.py +++ b/python/tests/mlir/test_output_qir.py @@ -25,64 +25,68 @@ def ghz(numQubits: int): print(cudaq.translate(ghz_synth, format='qir-base')) -# CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_2:.*]]) -# CHECK: %[[VAL_3:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -# CHECK: %[[VAL_4:.*]] = bitcast i8* %[[VAL_3]] to %[[VAL_5:.*]]** -# CHECK: %[[VAL_6:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_4]], align 8 -# CHECK: tail call void @__quantum__qis__h(%[[VAL_5]]* %[[VAL_6]]) -# CHECK: %[[VAL_7:.*]] = add i64 %[[VAL_2]], -1 -# CHECK: %[[VAL_8:.*]] = tail call i64 @llvm.abs.i64(i64 %[[VAL_7]], i1 false) -# CHECK: %[[VAL_9:.*]] = alloca i64, i64 %[[VAL_8]], align 8 -# CHECK: %[[VAL_10:.*]] = icmp sgt i64 %[[VAL_7]], 0 -# CHECK: br i1 %[[VAL_10]], label %[[VAL_11:.*]], label %[[VAL_12:.*]] -# CHECK: .lr.ph: ; preds = %[[VAL_13:.*]], %[[VAL_11]] -# CHECK: %[[VAL_14:.*]] = phi i64 [ %[[VAL_15:.*]], %[[VAL_11]] ], [ 0, %[[VAL_13]] ] -# CHECK: %[[VAL_16:.*]] = getelementptr i64, i64* %[[VAL_9]], i64 %[[VAL_14]] -# CHECK: store i64 %[[VAL_14]], i64* %[[VAL_16]], align 8 -# CHECK: %[[VAL_15]] = add nuw nsw i64 %[[VAL_14]], 1 -# CHECK: %[[VAL_17:.*]] = icmp slt i64 %[[VAL_15]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_17]], label %[[VAL_11]], label %[[VAL_21:.*]] -# CHECK: ._crit_edge: ; preds = %[[VAL_11]] -# CHECK: %[[VAL_18:.*]] = alloca { i64, i64 }, i64 %[[VAL_7]], align 8 -# CHECK: br i1 %[[VAL_10]], label %[[VAL_20:.*]], label %[[VAL_21]] -# CHECK: .preheader: ; preds = %[[VAL_20]] -# CHECK: br i1 %[[VAL_10]], label %[[VAL_22:.*]], label %[[VAL_21]] -# CHECK: .lr.ph10: ; preds = %[[VAL_21]], %[[VAL_20]] -# CHECK: %[[VAL_23:.*]] = phi i64 [ %[[VAL_24:.*]], %[[VAL_20]] ], [ 0, %[[VAL_21]] ] -# CHECK: %[[VAL_25:.*]] = getelementptr i64, i64* %[[VAL_9]], i64 %[[VAL_23]] -# CHECK: %[[VAL_26:.*]] = load i64, i64* %[[VAL_25]], align 8 -# CHECK: %[[VAL_27:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_23]], i32 0 -# CHECK: store i64 %[[VAL_23]], i64* %[[VAL_27]], align 8 -# CHECK: %[[VAL_28:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_23]], i32 1 -# CHECK: store i64 %[[VAL_26]], i64* %[[VAL_28]], align 8 -# CHECK: %[[VAL_24]] = add nuw nsw i64 %[[VAL_23]], 1 -# CHECK: %[[VAL_29:.*]] = icmp slt i64 %[[VAL_24]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_29]], label %[[VAL_20]], label %[[VAL_30:.*]] -# CHECK: .lr.ph11: ; preds = %[[VAL_30]], %[[VAL_22]] -# CHECK: %[[VAL_31:.*]] = phi i64 [ %[[VAL_32:.*]], %[[VAL_22]] ], [ 0, %[[VAL_30]] ] -# CHECK: %[[VAL_33:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_31]], i32 0 -# CHECK: %[[VAL_34:.*]] = load i64, i64* %[[VAL_33]], align 8 -# CHECK: %[[VAL_35:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_31]], i32 1 -# CHECK: %[[VAL_36:.*]] = load i64, i64* %[[VAL_35]], align 8 -# CHECK: %[[VAL_37:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 %[[VAL_34]]) -# CHECK: %[[VAL_38:.*]] = bitcast i8* %[[VAL_37]] to %[[VAL_5]]** -# CHECK: %[[VAL_39:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_38]], align 8 -# CHECK: %[[VAL_40:.*]] = add i64 %[[VAL_36]], 1 -# CHECK: %[[VAL_41:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 %[[VAL_40]]) -# CHECK: %[[VAL_42:.*]] = bitcast i8* %[[VAL_41]] to %[[VAL_5]]** -# CHECK: %[[VAL_43:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_42]], align 8 -# CHECK: tail call void (i64, void (%[[VAL_1]]*, %[[VAL_5]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_1]]*, %[[VAL_5]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_5]]* %[[VAL_39]], %[[VAL_5]]* %[[VAL_43]]) -# CHECK: %[[VAL_32]] = add nuw nsw i64 %[[VAL_31]], 1 -# CHECK: %[[VAL_44:.*]] = icmp slt i64 %[[VAL_32]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_44]], label %[[VAL_22]], label %[[VAL_21]] -# CHECK: ._crit_edge12: ; preds = %[[VAL_22]], %[[VAL_13]], %[[VAL_21]], %[[VAL_30]] -# CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +# CHECK-LABEL: define void @__nvqpp__mlirgen__ghz(i64 +# CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +# CHECK: %[[VAL_1:.*]] = tail call %[[VAL_2:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_0]]) +# CHECK: %[[VAL_3:.*]] = tail call %[[VAL_4:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_2]]* %[[VAL_1]], i64 0) +# CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]] +# CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) +# CHECK: %[[VAL_6:.*]] = add i64 %[[VAL_0]], -1 +# CHECK: %[[VAL_7:.*]] = tail call i64 @llvm.abs.i64(i64 %[[VAL_6]], i1 false) +# CHECK: %[[VAL_8:.*]] = alloca i64, i64 %[[VAL_7]] +# CHECK: %[[VAL_9:.*]] = icmp sgt i64 %[[VAL_6]], 0 +# CHECK: br i1 %[[VAL_9]], label %[[VAL_10:.*]], label %[[VAL_11:.*]] +# CHECK: : ; preds = %[[VAL_12:.*]], %[[VAL_10]] +# CHECK: %[[VAL_13:.*]] = phi i64 [ %[[VAL_14:.*]], %[[VAL_10]] ], [ 0, %[[VAL_12]] ] +# CHECK: %[[VAL_15:.*]] = getelementptr i64, i64* %[[VAL_8]], i64 %[[VAL_13]] +# CHECK: store i64 %[[VAL_13]], i64* %[[VAL_15]] +# CHECK: %[[VAL_14]] = add nuw nsw i64 %[[VAL_13]], 1 +# CHECK: %[[VAL_16:.*]] = icmp slt i64 %[[VAL_14]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_16]], label %[[VAL_10]], label %[[VAL_17:.*]] +# CHECK: : ; preds = %[[VAL_10]] +# CHECK: %[[VAL_18:.*]] = alloca { i64, i64 }, i64 %[[VAL_6]] +# CHECK: br i1 %[[VAL_9]], label %[[VAL_19:.*]], label %[[VAL_11]] +# CHECK: : ; preds = %[[VAL_19]] +# CHECK: br i1 %[[VAL_9]], label %[[VAL_20:.*]], label %[[VAL_11]] +# CHECK: : ; preds = %[[VAL_17]], %[[VAL_19]] +# CHECK: %[[VAL_21:.*]] = phi i64 [ %[[VAL_22:.*]], %[[VAL_19]] ], [ 0, %[[VAL_17]] ] +# CHECK: %[[VAL_23:.*]] = getelementptr i64, i64* %[[VAL_8]], i64 %[[VAL_21]] +# CHECK: %[[VAL_24:.*]] = load i64, i64* %[[VAL_23]] +# CHECK: %[[VAL_25:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_21]], i32 0 +# CHECK: store i64 %[[VAL_21]], i64* %[[VAL_25]] +# CHECK: %[[VAL_26:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_21]], i32 1 +# CHECK: store i64 %[[VAL_24]], i64* %[[VAL_26]] +# CHECK: %[[VAL_22]] = add nuw nsw i64 %[[VAL_21]], 1 +# CHECK: %[[VAL_27:.*]] = icmp slt i64 %[[VAL_22]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_27]], label %[[VAL_19]], label %[[VAL_28:.*]] +# CHECK: : ; preds = %[[VAL_28]], %[[VAL_20]] +# CHECK: %[[VAL_29:.*]] = phi i64 [ %[[VAL_30:.*]], %[[VAL_20]] ], [ 0, %[[VAL_28]] ] +# CHECK: %[[VAL_31:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_29]], i32 0 +# CHECK: %[[VAL_32:.*]] = load i64, i64* %[[VAL_31]] +# CHECK: %[[VAL_33:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_29]], i32 1 +# CHECK: %[[VAL_34:.*]] = load i64, i64* %[[VAL_33]] +# CHECK: %[[VAL_35:.*]] = tail call %[[VAL_4]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_2]]* %[[VAL_1]], i64 %[[VAL_32]]) +# CHECK: %[[VAL_36:.*]] = bitcast %[[VAL_4]]** %[[VAL_35]] to i8** +# CHECK: %[[VAL_37:.*]] = load i8*, i8** %[[VAL_36]] +# CHECK: %[[VAL_38:.*]] = add i64 %[[VAL_34]], 1 +# CHECK: %[[VAL_39:.*]] = tail call %[[VAL_4]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_2]]* %[[VAL_1]], i64 %[[VAL_38]]) +# CHECK: %[[VAL_40:.*]] = bitcast %[[VAL_4]]** %[[VAL_39]] to i8** +# CHECK: %[[VAL_41:.*]] = load i8*, i8** %[[VAL_40]] +# CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_2]]*, %[[VAL_4]]*)* @__quantum__qis__x__ctl to i8*), i8* %[[VAL_37]], i8* %[[VAL_41]]) +# CHECK: %[[VAL_30]] = add nuw nsw i64 %[[VAL_29]], 1 +# CHECK: %[[VAL_42:.*]] = icmp slt i64 %[[VAL_30]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_42]], label %[[VAL_20]], label %[[VAL_11]] +# CHECK: : ; preds = %[[VAL_20]], %[[VAL_12]], %[[VAL_17]], %[[VAL_28]] +# CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_2]]* %[[VAL_1]]) # CHECK: ret void +# CHECK: } -# CHECK: tail call void @__quantum__qis__h__body( -# CHECK: %[[VAL_0:.*]]* null) +# CHECK-LABEL: define void @__nvqpp__mlirgen__ghz() local_unnamed_addr #0 { +# CHECK: tail call void @__quantum__qis__h__body(%[[VAL_0:.*]]* null) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* null, %[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 3 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 3 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 4 to %[[VAL_0]]*)) # CHECK: ret void +# CHECK: } + diff --git a/python/tests/mlir/test_output_translate_qir.py b/python/tests/mlir/test_output_translate_qir.py index 40f78ab7e4..b0e2b6e661 100644 --- a/python/tests/mlir/test_output_translate_qir.py +++ b/python/tests/mlir/test_output_translate_qir.py @@ -25,64 +25,68 @@ def ghz(numQubits: int): print(cudaq.translate(ghz_synth, format='qir-base')) -# CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_2:.*]]) -# CHECK: %[[VAL_3:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -# CHECK: %[[VAL_4:.*]] = bitcast i8* %[[VAL_3]] to %[[VAL_5:.*]]** -# CHECK: %[[VAL_6:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_4]], align 8 -# CHECK: tail call void @__quantum__qis__h(%[[VAL_5]]* %[[VAL_6]]) -# CHECK: %[[VAL_7:.*]] = add i64 %[[VAL_2]], -1 -# CHECK: %[[VAL_8:.*]] = tail call i64 @llvm.abs.i64(i64 %[[VAL_7]], i1 false) -# CHECK: %[[VAL_9:.*]] = alloca i64, i64 %[[VAL_8]], align 8 -# CHECK: %[[VAL_10:.*]] = icmp sgt i64 %[[VAL_7]], 0 -# CHECK: br i1 %[[VAL_10]], label %[[VAL_11:.*]], label %[[VAL_12:.*]] -# CHECK: .lr.ph: ; preds = %[[VAL_13:.*]], %[[VAL_11]] -# CHECK: %[[VAL_14:.*]] = phi i64 [ %[[VAL_15:.*]], %[[VAL_11]] ], [ 0, %[[VAL_13]] ] -# CHECK: %[[VAL_16:.*]] = getelementptr i64, i64* %[[VAL_9]], i64 %[[VAL_14]] -# CHECK: store i64 %[[VAL_14]], i64* %[[VAL_16]], align 8 -# CHECK: %[[VAL_15]] = add nuw nsw i64 %[[VAL_14]], 1 -# CHECK: %[[VAL_17:.*]] = icmp slt i64 %[[VAL_15]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_17]], label %[[VAL_11]], label %[[VAL_21:.*]] -# CHECK: ._crit_edge: ; preds = %[[VAL_11]] -# CHECK: %[[VAL_18:.*]] = alloca { i64, i64 }, i64 %[[VAL_7]], align 8 -# CHECK: br i1 %[[VAL_10]], label %[[VAL_20:.*]], label %[[VAL_21]] -# CHECK: .preheader: ; preds = %[[VAL_20]] -# CHECK: br i1 %[[VAL_10]], label %[[VAL_22:.*]], label %[[VAL_21]] -# CHECK: .lr.ph10: ; preds = %[[VAL_21]], %[[VAL_20]] -# CHECK: %[[VAL_23:.*]] = phi i64 [ %[[VAL_24:.*]], %[[VAL_20]] ], [ 0, %[[VAL_21]] ] -# CHECK: %[[VAL_25:.*]] = getelementptr i64, i64* %[[VAL_9]], i64 %[[VAL_23]] -# CHECK: %[[VAL_26:.*]] = load i64, i64* %[[VAL_25]], align 8 -# CHECK: %[[VAL_27:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_23]], i32 0 -# CHECK: store i64 %[[VAL_23]], i64* %[[VAL_27]], align 8 -# CHECK: %[[VAL_28:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_23]], i32 1 -# CHECK: store i64 %[[VAL_26]], i64* %[[VAL_28]], align 8 -# CHECK: %[[VAL_24]] = add nuw nsw i64 %[[VAL_23]], 1 -# CHECK: %[[VAL_29:.*]] = icmp slt i64 %[[VAL_24]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_29]], label %[[VAL_20]], label %[[VAL_30:.*]] -# CHECK: .lr.ph11: ; preds = %[[VAL_30]], %[[VAL_22]] -# CHECK: %[[VAL_31:.*]] = phi i64 [ %[[VAL_32:.*]], %[[VAL_22]] ], [ 0, %[[VAL_30]] ] -# CHECK: %[[VAL_33:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_31]], i32 0 -# CHECK: %[[VAL_34:.*]] = load i64, i64* %[[VAL_33]], align 8 -# CHECK: %[[VAL_35:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_31]], i32 1 -# CHECK: %[[VAL_36:.*]] = load i64, i64* %[[VAL_35]], align 8 -# CHECK: %[[VAL_37:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 %[[VAL_34]]) -# CHECK: %[[VAL_38:.*]] = bitcast i8* %[[VAL_37]] to %[[VAL_5]]** -# CHECK: %[[VAL_39:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_38]], align 8 -# CHECK: %[[VAL_40:.*]] = add i64 %[[VAL_36]], 1 -# CHECK: %[[VAL_41:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 %[[VAL_40]]) -# CHECK: %[[VAL_42:.*]] = bitcast i8* %[[VAL_41]] to %[[VAL_5]]** -# CHECK: %[[VAL_43:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_42]], align 8 -# CHECK: tail call void (i64, void (%[[VAL_1]]*, %[[VAL_5]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_1]]*, %[[VAL_5]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_5]]* %[[VAL_39]], %[[VAL_5]]* %[[VAL_43]]) -# CHECK: %[[VAL_32]] = add nuw nsw i64 %[[VAL_31]], 1 -# CHECK: %[[VAL_44:.*]] = icmp slt i64 %[[VAL_32]], %[[VAL_7]] -# CHECK: br i1 %[[VAL_44]], label %[[VAL_22]], label %[[VAL_21]] -# CHECK: ._crit_edge12: ; preds = %[[VAL_22]], %[[VAL_13]], %[[VAL_21]], %[[VAL_30]] -# CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +# CHECK-LABEL: define void @__nvqpp__mlirgen__ghz(i64 +# CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +# CHECK: %[[VAL_1:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_0]]) +# CHECK: %[[VAL_3:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_1]], i64 0) +# CHECK: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_3]] +# CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) +# CHECK: %[[VAL_6:.*]] = add i64 %[[VAL_0]], -1 +# CHECK: %[[VAL_7:.*]] = tail call i64 @llvm.abs.i64(i64 %[[VAL_6]], i1 false) +# CHECK: %[[VAL_8:.*]] = alloca i64, i64 %[[VAL_7]] +# CHECK: %[[VAL_9:.*]] = icmp sgt i64 %[[VAL_6]], 0 +# CHECK: br i1 %[[VAL_9]], label %[[VAL_10:.*]], label %[[VAL_11:.*]] +# CHECK: : ; preds = %[[VAL_12:.*]], %[[VAL_10]] +# CHECK: %[[VAL_13:.*]] = phi i64 [ %[[VAL_14:.*]], %[[VAL_10]] ], [ 0, %[[VAL_12]] ] +# CHECK: %[[VAL_15:.*]] = getelementptr i64, i64* %[[VAL_8]], i64 %[[VAL_13]] +# CHECK: store i64 %[[VAL_13]], i64* %[[VAL_15]] +# CHECK: %[[VAL_14]] = add nuw nsw i64 %[[VAL_13]], 1 +# CHECK: %[[VAL_16:.*]] = icmp slt i64 %[[VAL_14]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_16]], label %[[VAL_10]], label %[[VAL_17:.*]] +# CHECK: : ; preds = %[[VAL_10]] +# CHECK: %[[VAL_18:.*]] = alloca { i64, i64 }, i64 %[[VAL_6]] +# CHECK: br i1 %[[VAL_9]], label %[[VAL_19:.*]], label %[[VAL_11]] +# CHECK: : ; preds = %[[VAL_19]] +# CHECK: br i1 %[[VAL_9]], label %[[VAL_20:.*]], label %[[VAL_11]] +# CHECK: : ; preds = %[[VAL_17]], %[[VAL_19]] +# CHECK: %[[VAL_21:.*]] = phi i64 [ %[[VAL_22:.*]], %[[VAL_19]] ], [ 0, %[[VAL_17]] ] +# CHECK: %[[VAL_23:.*]] = getelementptr i64, i64* %[[VAL_8]], i64 %[[VAL_21]] +# CHECK: %[[VAL_24:.*]] = load i64, i64* %[[VAL_23]] +# CHECK: %[[VAL_25:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_21]], i32 0 +# CHECK: store i64 %[[VAL_21]], i64* %[[VAL_25]] +# CHECK: %[[VAL_26:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_21]], i32 1 +# CHECK: store i64 %[[VAL_24]], i64* %[[VAL_26]] +# CHECK: %[[VAL_22]] = add nuw nsw i64 %[[VAL_21]], 1 +# CHECK: %[[VAL_27:.*]] = icmp slt i64 %[[VAL_22]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_27]], label %[[VAL_19]], label %[[VAL_28:.*]] +# CHECK: : ; preds = %[[VAL_28]], %[[VAL_20]] +# CHECK: %[[VAL_29:.*]] = phi i64 [ %[[VAL_30:.*]], %[[VAL_20]] ], [ 0, %[[VAL_28]] ] +# CHECK: %[[VAL_31:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_29]], i32 0 +# CHECK: %[[VAL_32:.*]] = load i64, i64* %[[VAL_31]] +# CHECK: %[[VAL_33:.*]] = getelementptr { i64, i64 }, { i64, i64 }* %[[VAL_18]], i64 %[[VAL_29]], i32 1 +# CHECK: %[[VAL_34:.*]] = load i64, i64* %[[VAL_33]] +# CHECK: %[[VAL_35:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_1]], i64 %[[VAL_32]]) +# CHECK: %[[VAL_36:.*]] = bitcast %Qubit** %[[VAL_35]] to i8** +# CHECK: %[[VAL_37:.*]] = load i8*, i8** %[[VAL_36]] +# CHECK: %[[VAL_38:.*]] = add i64 %[[VAL_34]], 1 +# CHECK: %[[VAL_39:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_1]], i64 %[[VAL_38]]) +# CHECK: %[[VAL_40:.*]] = bitcast %Qubit** %[[VAL_39]] to i8** +# CHECK: %[[VAL_41:.*]] = load i8*, i8** %[[VAL_40]] +# CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), i8* %[[VAL_37]], i8* %[[VAL_41]]) +# CHECK: %[[VAL_30]] = add nuw nsw i64 %[[VAL_29]], 1 +# CHECK: %[[VAL_42:.*]] = icmp slt i64 %[[VAL_30]], %[[VAL_6]] +# CHECK: br i1 %[[VAL_42]], label %[[VAL_20]], label %[[VAL_11]] +# CHECK: ._crit_edge13: ; preds = %[[VAL_20]], %[[VAL_12]], %[[VAL_17]], %[[VAL_28]] +# CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_1]]) # CHECK: ret void +# CHECK: } -# CHECK: tail call void @__quantum__qis__h__body( -# CHECK: %[[VAL_0:.*]]* null) +# CHECK-LABEL: define void @__nvqpp__mlirgen__ghz() local_unnamed_addr #0 { +# CHECK: tail call void @__quantum__qis__h__body(%[[VAL_0:.*]]* null) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* null, %[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 3 to %[[VAL_0]]*)) # CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 3 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 4 to %[[VAL_0]]*)) # CHECK: ret void +# CHECK: } + diff --git a/runtime/common/Trace.cpp b/runtime/common/Trace.cpp index be60099b1c..d309feecfa 100644 --- a/runtime/common/Trace.cpp +++ b/runtime/common/Trace.cpp @@ -10,11 +10,10 @@ #include #include -namespace cudaq { - -void Trace::appendInstruction(std::string_view name, std::vector params, - std::vector controls, - std::vector targets) { +void cudaq::Trace::appendInstruction(std::string_view name, + std::vector params, + std::vector controls, + std::vector targets) { assert(!targets.empty() && "An instruction must have at least one target"); auto findMaxID = [](const std::vector &qudits) -> std::size_t { return std::max_element(qudits.cbegin(), qudits.cend(), @@ -27,5 +26,3 @@ void Trace::appendInstruction(std::string_view name, std::vector params, numQudits = std::max(numQudits, maxID + 1); instructions.emplace_back(name, params, controls, targets); } - -} // namespace cudaq diff --git a/runtime/cudaq/builder/kernel_builder.cpp b/runtime/cudaq/builder/kernel_builder.cpp index c5a8a15c82..e1d409430e 100644 --- a/runtime/cudaq/builder/kernel_builder.cpp +++ b/runtime/cudaq/builder/kernel_builder.cpp @@ -776,16 +776,28 @@ QuakeValue applyMeasure(ImplicitLocOpBuilder &builder, Value value, cudaq::info("kernel_builder apply measurement"); - auto strAttr = builder.getStringAttr(regName); + // FIXME: regName cannot be empty, but the prototypes give an empty string as + // the default. This is a workaround to clear out the empty string so we don't + // build broken IR. + StringAttr strAttr; + if (!regName.empty()) + strAttr = builder.getStringAttr(regName); + Type resTy = builder.getI1Type(); Type measTy = quake::MeasureType::get(builder.getContext()); if (!type.isa()) { resTy = cc::StdvecType::get(resTy); measTy = cc::StdvecType::get(measTy); } - Value measureResult = - builder.template create(measTy, value, strAttr) - .getMeasOut(); + Value measureResult; + if (strAttr) + measureResult = + builder.template create(measTy, value, strAttr) + .getMeasOut(); + else + measureResult = + builder.template create(measTy, value).getMeasOut(); + Value bits = builder.create(resTy, measureResult); return QuakeValue(builder, bits); } diff --git a/runtime/nvqir/NVQIR.cpp b/runtime/nvqir/NVQIR.cpp index f06c967e11..9be1571ed6 100644 --- a/runtime/nvqir/NVQIR.cpp +++ b/runtime/nvqir/NVQIR.cpp @@ -1029,7 +1029,7 @@ void generalizedInvokeWithRotationsControlsTargets( controls[i] = va_arg(args, Qubit *); } for (i = 0; i < numControlQubitOperands; ++i) { - arrayAndLength[i] = 0; + arrayAndLength[numControlArrayOperands + i] = 0; controls[numControlArrayOperands + i] = va_arg(args, Qubit *); } for (i = 0; i < numTargetOperands; ++i) diff --git a/test/Quake/OpenQASM/bugReport_641.qke b/test/Quake/OpenQASM/bugReport_641.qke index 6a452037d7..6d5b6efade 100644 --- a/test/Quake/OpenQASM/bugReport_641.qke +++ b/test/Quake/OpenQASM/bugReport_641.qke @@ -22,7 +22,7 @@ module { quake.h %arg0 : (!quake.ref) -> () call @__nvqpp__mlirgen____nvqppBuilderKernel_093606261879(%arg0) : (!quake.ref) -> () quake.h %arg0 : (!quake.ref) -> () - %bits = quake.mz %arg0 name "" : (!quake.ref) -> !quake.measure + %bits = quake.mz %arg0 : (!quake.ref) -> !quake.measure return } func.func @__nvqpp__mlirgen____nvqppBuilderKernel_093606261879(%arg0: !quake.ref) { diff --git a/test/Quake/OpenQASM/callOp_380.qke b/test/Quake/OpenQASM/callOp_380.qke index 70881fd12d..fab728e3c2 100644 --- a/test/Quake/OpenQASM/callOp_380.qke +++ b/test/Quake/OpenQASM/callOp_380.qke @@ -12,7 +12,7 @@ module { func.func @__nvqpp__mlirgen____nvqppBuilderKernel_093606261879() attributes {"cudaq-entrypoint"} { %0 = quake.alloca !quake.ref call @__nvqpp__mlirgen____nvqppBuilderKernel_367535629127(%0) : (!quake.ref) -> () - %1 = quake.mz %0 name "" : (!quake.ref) -> !quake.measure + %1 = quake.mz %0 : (!quake.ref) -> !quake.measure return } func.func @__nvqpp__mlirgen____nvqppBuilderKernel_367535629127(%arg0: !quake.ref) { diff --git a/test/Quake/OpenQASM/topologicalSort_603.qke b/test/Quake/OpenQASM/topologicalSort_603.qke index e0b9f67e13..00d9e32e8a 100644 --- a/test/Quake/OpenQASM/topologicalSort_603.qke +++ b/test/Quake/OpenQASM/topologicalSort_603.qke @@ -18,7 +18,7 @@ module { quake.h %arg0 : (!quake.ref) -> () call @__nvqpp__mlirgen____nvqppBuilderKernel_367535629127(%arg0) : (!quake.ref) -> () quake.h %arg0 : (!quake.ref) -> () - %0 = quake.mz %arg0 name "" : (!quake.ref) -> !quake.measure + %0 = quake.mz %arg0 : (!quake.ref) -> !quake.measure return } func.func @__nvqpp__mlirgen____nvqppBuilderKernel_367535629127(%arg0: !quake.ref) {