Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion python/runtime/common/py_ExecutionContext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ void bindExecutionContext(py::module &mod) {
.def("setSpinOperator", [](cudaq::ExecutionContext &ctx,
cudaq::spin_op &spin) { ctx.spin = &spin; })
.def("getExpectationValue",
[](cudaq::ExecutionContext &ctx) { return ctx.expectationValue; });
[](cudaq::ExecutionContext &ctx) { return ctx.expectationValue; })
.def_readwrite("noiseModel", &cudaq::ExecutionContext::noiseModel);
mod.def(
"setExecutionContext",
[](cudaq::ExecutionContext &ctx) {
Expand Down
1 change: 1 addition & 0 deletions runtime/cudaq/platform/default/rest/helpers/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ add_subdirectory(oqc)
add_subdirectory(ionq)
add_subdirectory(quantinuum)
add_subdirectory(iqm)
add_subdirectory(equal1)
20 changes: 20 additions & 0 deletions runtime/cudaq/platform/default/rest/helpers/equal1/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# ============================================================================ #
# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #

message(STATUS "Building Equal1 REST QPU")

add_target_config(equal1)

target_sources(cudaq-rest-qpu PRIVATE Equal1ServerHelper.cpp)
add_library(cudaq-serverhelper-equal1 SHARED Equal1ServerHelper.cpp)
target_link_libraries(cudaq-serverhelper-equal1
PUBLIC
cudaq-common
fmt::fmt-header-only
)
install(TARGETS cudaq-serverhelper-equal1 DESTINATION lib)
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
/*******************************************************************************
* Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/
#include "common/Logger.h"
#include "common/MeasureCounts.h"
#include "common/RestClient.h"
#include "common/ServerHelper.h"
#include "cudaq/utils/cudaq_utils.h"

#include "nlohmann/json.hpp"

#include <fstream>
#include <iostream>
#include <llvm/ADT/STLExtras.h>
#include <llvm/ADT/StringRef.h>
#include <llvm/Support/Process.h>
#include <optional>
#include <regex>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>

namespace cudaq {

class Equal1ServerHelper : public ServerHelper {
static constexpr const llvm::StringLiteral DEFAULT_URL = "http://localhost/";

public:

const std::string name() const override {return "equal1";}

void initialize(BackendConfig config) override;

RestHeaders getHeaders() override;

ServerJobPayload createJob(std::vector<KernelExecution> &circuitNodes) override;

std::string extractJobId(ServerMessage &postResponse) override;

std::string constructGetJobPath(ServerMessage &postResponse) override;

std::string constructGetJobPath(std::string &jobId) override;

bool jobIsDone(ServerMessage &getJobResponse) override;

cudaq::sample_result processResults(ServerMessage &postJobResponse, std::string &jobId) override;

private:
std::string equal1ServerURL;

inline std::optional<std::string> getEnv(llvm::StringRef envVar) {
const char* val = std::getenv(envVar.data());
if(!val)
return std::nullopt;
return std::string(val);
}
};

void Equal1ServerHelper::initialize(BackendConfig config) {
cudaq::debug("{}ServerHelper::initialize", name());

equal1ServerURL = getEnv("EQUAL1_SERVER_URL").value_or(DEFAULT_URL.str());

return;
}

RestHeaders Equal1ServerHelper::getHeaders() {
cudaq::debug("{}ServerHelper::getHeaders", name());

RestHeaders headers;
headers["Content-Type"] = "application/json";
headers["Connection"] = "keep-alive";
headers["Accept"] = "*/*";
// Return the headers
return headers;
}

ServerJobPayload Equal1ServerHelper::createJob(std::vector<KernelExecution> &circuitNodes) {
cudaq::debug("{}ServerHelper::createJob", name());

assert(circuitNodes.size() == 1 && "Currently only supporting one message");

std::vector<ServerMessage> messages;
for(const auto& code : circuitNodes) {
ServerMessage m;
m["target"] = name();
m["format"] = "QIR";
m["program"] = code.code;
m["programName"] = code.name;
m["shots"] = "1000";

messages.push_back(m);
}

return std::make_tuple(equal1ServerURL + "jobs", getHeaders(), messages);
}

std::string Equal1ServerHelper::extractJobId(ServerMessage &postResponse) {
cudaq::debug("{}ServerHelper::extractJobId", name());

std::string jobToken =
postResponse["job_id"]
.get<std::string>();
return jobToken;
}

std::string Equal1ServerHelper::constructGetJobPath(ServerMessage &postResponse) {
cudaq::debug("{}ServerHelper::extractJobId", name());
return equal1ServerURL + "jobs/" + extractJobId(postResponse);
}

std::string Equal1ServerHelper::constructGetJobPath(std::string &jobId) {
cudaq::debug("{}ServerHelper::extractJobId", name());
return equal1ServerURL + "jobs/" + jobId;
}

bool Equal1ServerHelper::jobIsDone(ServerMessage &getJobResponse) {
auto status = getJobResponse["status"]
.get<std::string>(); // All job get and post responses at an
// array of [resdata, httpstatuscode]
if (status == "failed") {
std::string msg = "";
if (getJobResponse.count("error"))
msg = getJobResponse["error"]["text"].get<std::string>();
throw std::runtime_error("Job failed to execute msg = [" + msg + "]");
} else if (status == "waiting") {
return false;
} else if (status == "executing") {
return false;
} else
return status == "done";
}


cudaq::sample_result Equal1ServerHelper::processResults(ServerMessage &getJobResponse, std::string &jobId) {
cudaq::info("postJobResponse: {}", name());

auto result = getJobResponse["results"].get<std::vector<std::size_t>>();
auto sample = cudaq::sample_result();
sample.deserialize(result);
return sample;
};

} // namespace cudaq

// Register the Equal1 server helper in the CUDA-Q server helper factory
CUDAQ_REGISTER_TYPE(cudaq::ServerHelper, cudaq::Equal1ServerHelper, equal1)
27 changes: 27 additions & 0 deletions runtime/cudaq/platform/default/rest/helpers/equal1/equal1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# ============================================================================ #
# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================

name: equal1
description:
"CUDA-Q target for Equal1 Platforms"

config:
platform-qpu: "remote_rest"
# Tell NVQ++ to generate glue code to set the target backend name
gen-target-backend: true
# Add the orca-qpu library to the link list
link-libs: ["-lcudaq-equal1-qpu"]

codegen-emission: qir-adaptive

target-arguments:
- key: machine
required: false
type: string
platform-arg: machine
help-string: "Specify the Equal1 QPU family."
132 changes: 132 additions & 0 deletions utils/mock_qpu/equal1/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# ============================================================================ #
# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #

import cudaq
from fastapi import FastAPI, HTTPException, Header, Request
from typing import Union
import uvicorn, uuid, base64, ctypes
from pydantic import BaseModel
from llvmlite import binding as llvm

# Define the REST Server App
app = FastAPI()

# Jobs look like the following type
class Job(BaseModel):
target: str
format: str
program: str
programName: str
shots: int

# Keep track of Job Ids to their Names
createdJobs = {}

# Could how many times the client has requested the Job
countJobGetRequests = 0

# Save how many qubits were needed for each test (emulates real backend)
numQubitsRequired = 0

llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
target = llvm.Target.from_default_triple()
targetMachine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, targetMachine)


def getKernelFunction(module):
for f in module.functions:
if not f.is_declaration:
return f
return None


def getNumRequiredQubits(function):
for a in function.attributes:
if "requiredQubits" in str(a):
return int(
str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace(
"\"", ""))

# Here we expose a way to post jobs,
# Must have a Access Token, Job Program must be Adaptive Profile
# with entry_point tag

@app.post("/jobs")
async def postJob(job: Job):
global createdJobs, shots, numQubitsRequired

print('Posting job with shots = ', job.shots)
newId = str(uuid.uuid4())
shots = 1500
program = job.program
decoded = base64.b64decode(program)
m = llvm.module.parse_bitcode(decoded)
mstr = str(m)
assert ('entry_point' in mstr)
print(mstr)
# Get the function, number of qubits, and kernel name
function = getKernelFunction(m)
if function == None:
raise Exception("Could not find kernel function")
numQubitsRequired = getNumRequiredQubits(function)
kernelFunctionName = function.name

print("Kernel name = ", kernelFunctionName)
print("Requires {} qubits".format(numQubitsRequired))

# JIT Compile and get Function Pointer
engine.add_module(m)
engine.finalize_object()
engine.run_static_constructors()
funcPtr = engine.get_function_address(kernelFunctionName)
kernel = ctypes.CFUNCTYPE(None)(funcPtr)

# Invoke the Kernel
cudaq.testing.toggleDynamicQubitManagement()
qubits, context = cudaq.testing.initialize(numQubitsRequired, shots)

kernel()
results = cudaq.testing.finalize(qubits, context)
results.dump()
createdJobs[newId] = (job.programName, results)

engine.remove_module(m)

# Job "created", return the id
return {"job_id": newId}

# Retrieve the job, simulate having to wait by counting to 3
# until we return the job results
@app.get("/jobs/{jobId}")
async def getResults(jobId: str):
global countJobGetRequests, createdJobs, shots

# Simulate asynchronous execution
if countJobGetRequests < 3:
countJobGetRequests += 1
return {"status": "executing"}

countJobGetRequests = 0
name, results = createdJobs[jobId]

print(results)
res = {"status": "done", "results": results.serialize() }
return res



def startServer(port):
uvicorn.run(app, port=port, host='0.0.0.0', log_level="debug", access_log=True)


if __name__ == '__main__':
startServer(62444)