Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,29 +7,23 @@
******************************************************************************/
#include "common/Logger.h"
#include "common/MeasureCounts.h"
#include "common/RestClient.h"
#include "common/ServerHelper.h"
#include "cudaq/utils/cudaq_utils.h"

#include "nlohmann/json.hpp"

#include <fstream>
#include <iostream>
#include <llvm/ADT/STLExtras.h>
#include <llvm/ADT/StringRef.h>
#include <llvm/Support/Process.h>

#include <cstdlib>
#include <optional>
#include <regex>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>

namespace cudaq {

class Equal1ServerHelper : public ServerHelper {
static constexpr const llvm::StringLiteral DEFAULT_URL = "http://localhost/";
static constexpr const llvm::StringLiteral DEFAULT_URL = "http://localhost:62444";

public:

Expand All @@ -53,19 +47,46 @@ class Equal1ServerHelper : public ServerHelper {

private:
std::string equal1ServerURL;
std::string machine;
std::string optimizationLevel;

inline std::optional<std::string> getEnv(llvm::StringRef envVar) {
inline std::optional<std::string> getEnv(llvm::StringRef envVar) const {
const char* val = std::getenv(envVar.data());
if(!val)
return std::nullopt;
return std::string(val);
}

inline std::string getConfig(const std::string& envVarName, const std::string& configName, const std::string& defaultValue) {
if(envVarName.data()) {
auto env = getEnv(envVarName);
if(env.has_value())
return env.value();
}

auto iter = backendConfig.find(configName);
if(iter != backendConfig.end())
return iter->second;

return defaultValue;
}
};

void Equal1ServerHelper::initialize(BackendConfig config) {
cudaq::debug("{}ServerHelper::initialize", name());

equal1ServerURL = getEnv("EQUAL1_SERVER_URL").value_or(DEFAULT_URL.str());
backendConfig = config;

equal1ServerURL = getConfig("EQUAL1_SERVER_URL", "url", DEFAULT_URL.str());

if(!equal1ServerURL.ends_with("/"))
equal1ServerURL += "/";

machine = getConfig("EQUAL1_TARGET_MACHINE", "machine", "default");

optimizationLevel = getConfig("EQUAL1_OPTIMIZATION_LEVEL", "opt", "1");

parseConfigForCommonParams(config);

return;
}
Expand All @@ -77,6 +98,8 @@ RestHeaders Equal1ServerHelper::getHeaders() {
headers["Content-Type"] = "application/json";
headers["Connection"] = "keep-alive";
headers["Accept"] = "*/*";
headers["User-Agent"] = "cudaq/" + name();

// Return the headers
return headers;
}
Expand All @@ -90,10 +113,11 @@ ServerJobPayload Equal1ServerHelper::createJob(std::vector<KernelExecution> &cir
for(const auto& code : circuitNodes) {
ServerMessage m;
m["target"] = name();
m["machine"] = machine;
m["format"] = "QIR";
m["program"] = code.code;
m["programName"] = code.name;
m["shots"] = "1000";
m["shots"] = shots;

messages.push_back(m);
}
Expand All @@ -105,7 +129,7 @@ std::string Equal1ServerHelper::extractJobId(ServerMessage &postResponse) {
cudaq::debug("{}ServerHelper::extractJobId", name());

std::string jobToken =
postResponse["job_id"]
postResponse["jobId"]
.get<std::string>();
return jobToken;
}
Expand All @@ -124,13 +148,9 @@ bool Equal1ServerHelper::jobIsDone(ServerMessage &getJobResponse) {
auto status = getJobResponse["status"]
.get<std::string>(); // All job get and post responses at an
// array of [resdata, httpstatuscode]
if (status == "failed") {
std::string msg = "";
if (getJobResponse.count("error"))
msg = getJobResponse["error"]["text"].get<std::string>();
if (status == "error") {
std::string msg = getJobResponse["message"].get<std::string>();
throw std::runtime_error("Job failed to execute msg = [" + msg + "]");
} else if (status == "waiting") {
return false;
} else if (status == "executing") {
return false;
} else
Expand Down
15 changes: 14 additions & 1 deletion runtime/cudaq/platform/default/rest/helpers/equal1/equal1.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,26 @@ config:
# Tell NVQ++ to generate glue code to set the target backend name
gen-target-backend: true
# Add the orca-qpu library to the link list
link-libs: ["-lcudaq-equal1-qpu"]
link-libs: ["-lcudaq-rest-qpu"]

codegen-emission: qir-adaptive

library-mode: false

target-arguments:
- key: machine
required: false
type: string
platform-arg: machine
help-string: "Specify the Equal1 QPU family."
- key: url
required: false
type: string
platform-arg: url
help-string: "Specify the URL."
- key: opt
required: false
type: string
platform-arg: opt
help-string: "Specify the optimization levels to be applied to the circuit"

25 changes: 16 additions & 9 deletions utils/mock_qpu/equal1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,32 @@
# ============================================================================ #

import cudaq

from fastapi import FastAPI, HTTPException, Header, Request
from typing import Union
from typing import Union, Optional
import uvicorn, uuid, base64, ctypes
from pydantic import BaseModel
from llvmlite import binding as llvm

# Define the REST Server App
app = FastAPI()

# Jobs look like the following type
#Define the Request and Response API's
class Job(BaseModel):
target: str
machine : str
format: str
program: str
programName: str
shots: int

class Response(BaseModel):
status: str
jobId: str
results : Optional[list] = list()
logs : Optional[str] = ""
message : Optional[str] = ""

# Keep track of Job Ids to their Names
createdJobs = {}

Expand Down Expand Up @@ -66,7 +75,7 @@ async def postJob(job: Job):

print('Posting job with shots = ', job.shots)
newId = str(uuid.uuid4())
shots = 1500
shots = job.shots
program = job.program
decoded = base64.b64decode(program)
m = llvm.module.parse_bitcode(decoded)
Expand Down Expand Up @@ -97,12 +106,12 @@ async def postJob(job: Job):
kernel()
results = cudaq.testing.finalize(qubits, context)
results.dump()
createdJobs[newId] = (job.programName, results)

createdJobs[newId] = (job.programName, results)
engine.remove_module(m)

# Job "created", return the id
return {"job_id": newId}
return Response(status="executing", jobId=newId)

# Retrieve the job, simulate having to wait by counting to 3
# until we return the job results
Expand All @@ -113,15 +122,13 @@ async def getResults(jobId: str):
# Simulate asynchronous execution
if countJobGetRequests < 3:
countJobGetRequests += 1
return {"status": "executing"}
return Response(status="executing", jobId=jobId)

countJobGetRequests = 0
name, results = createdJobs[jobId]

print(results)
res = {"status": "done", "results": results.serialize() }
return res

return Response(status="done", jobId=jobId, results=results.serialize())


def startServer(port):
Expand Down