Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions docs/modules/classes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ Quantum Kernel Core
kernel.matrix.FidelityKernel
kernel.matrix.ProjectedQuantumKernel

.. automodule:: squlearn.kernel.optimization
.. automodule:: squlearn.kernel.loss
:no-members:
:no-inherited-members:

Expand All @@ -173,9 +173,8 @@ Quantum Kernel Core
:toctree: generated/
:template: class.rst

kernel.optimization.kernel_optimizer.KernelOptimizer
kernel.optimization.negative_log_likelihood.NLL
kernel.optimization.target_alignment.TargetAlignment
kernel.loss.negative_log_likelihood.NLL
kernel.loss.target_alignment.TargetAlignment

QNN Core
------------------------------------
Expand Down Expand Up @@ -280,8 +279,7 @@ Base Classes
observables.observable_base.ObservableBase
encoding_circuit.encoding_circuit_base.EncodingCircuitBase
kernel.matrix.kernel_matrix_base.KernelMatrixBase
kernel.optimization.kernel_loss_base.KernelLossBase
kernel.optimization.kernel_optimization_base.KernelOptimizerBase
kernel.loss.kernel_loss_base.KernelLossBase
optimizers.optimizer_base.OptimizerBase
qnn.base_qnn.BaseQNN
qnn.loss.LossBase
Expand Down
75 changes: 23 additions & 52 deletions docs/user_guide/kernel_methods.rst
Original file line number Diff line number Diff line change
Expand Up @@ -222,8 +222,10 @@ parameters are obtained from classical optimization loops which attempt to minim
function.

sQUlearn implements the kernel target alignment procedure as well as the Negative-Log-Likelihood.
At the same time it provides several optimizers such as :code:`Adam` and :code:`SLSQP`; cf.
:class:`squlearn.kernel.optimization.kernel_optimizer.KernelOptimizer` for details.
At the same time it provides several optimizers such as :code:`Adam` and :code:`SLSQP`;
This can be achieved by employing the :class:`KernelOptimizer` class which automatically
enables the optimization of quantum kernels when used in the high-level methods.


The following examples assume that you have some data set available which you previously split into
training and test data and shows how to optimize kernels.
Expand All @@ -232,65 +234,34 @@ training and test data and shows how to optimize kernels.

.. code-block:: python

import numpy as np
from qiskit.primitives import Estimator
from squlearn.util import Executor
from squlearn.encoding_circuit import ChebyshevPQC
from squlearn.optimizers import Adam
from squlearn.kernel import ProjectedQuantumKernel
from squlearn.kernel.optimization import KernelOptimizer, TargetAlignment, NLL
enc_circ = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)
pqk_instance = ProjectedQuantumKernel(
encoding_circuit=enc_circ,
executor=Executor(),
measurement='XYZ',
outer_kernel='gaussian',
parameter_seed=0
)
from squlearn.kernel import ProjectedQuantumKernel, KernelOptimizer, QKRR
from squlearn.kernel.loss import TargetAlignment

# set up the encoding circuit
encoding_circuit = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)

# set up the quantum kernel
pqk_instance = ProjectedQuantumKernel(encoding_circuit,Executor())

# set up the optimizer
adam_opt = Adam(options={"maxiter":100, "lr": 0.1})
# define KTA loss function
kta_loss = TargetAlignment(quantum_kernel=pqk_instance)
kta_optimizer = KernelOptimizer(loss=kta_loss, optimizer=adam_opt)
opt_kta_result = kta_optimizer.run_optimization(X=x_train, y=y_train)
# retrieve optimized parameters
opt_kta_params = opt_kta_result.x
# assign optimal kta parameters to kernel
pqk_instance.assign_parameters(opt_kta_params)

*Note:* The same workflow has to be used for FQKs!
# define KTA loss function
kta_loss = TargetAlignment()

**Example - Negative-Log-Likelihood**
# set up the kernel optimizer
kernel_optimizer = KernelOptimizer(quantum_kernel=pqk_instance, loss=kta_loss, optimizer=adam_opt)

.. code-block:: python
# initialize the QKRR model with the kernel optimizer
qkrr = QKRR(kernel_optimizer)

import numpy as np
from qiskit.primitives import Estimator
from squlearn.util import Executor
from squlearn.encoding_circuit import ChebyshevPQC
from squlearn.optimizers import Adam
from squlearn.kernel import ProjectedQuantumKernel
from squlearn.kernel.optimization import KernelOptimizer, TargetAlignment, NLL
enc_circ = ChebyshevPQC(num_qubits=4, num_features=1, num_layers=2)
pqk_instance = ProjectedQuantumKernel(
encoding_circuit=enc_circ,
executor=Executor(),
measurement='XYZ',
outer_kernel='gaussian',
parameter_seed=0
)
# set up the optimizer
adam_opt = Adam(options={"maxiter":100, "lr": 0.1})
# define NLL loss function (note that noise_val needs to bet set)
nll_loss = NLL(quantum_kernel=pqk_instance, sigma=noise_val)
nll_optimizer = KernelOptimizer(loss=nll_loss, optimizer=adam_opt)
opt_nll_result = nll_optimizer.run_optimization(X=x_train, y=y_train)
# retrieve optimized parameters
opt_nll_params = opt_nll_params.x
# assign optimal nll parameters to kernel
pqk_instance.assign_parameters(opt_nll_params)

*Note:* The same workflow has to be used for FQKs!
# Simple example
x_train = [[0.1], [0.2], [0.3], [0.4], [0.5]]
y_train = [0.1, 0.2, 0.3, 0.4, 0.5]
qkrr.fit(X=x_train, y=y_train)

.. rubric:: References

Expand Down
50 changes: 15 additions & 35 deletions examples/kernel/qgpr_optimization_workflow.ipynb

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions src/squlearn/kernel/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from . import matrix, ml, optimization
from . import matrix, ml, loss

from .matrix import FidelityKernel, ProjectedQuantumKernel
from .matrix import FidelityKernel, ProjectedQuantumKernel, KernelOptimizer
from .ml import QKRR, QGPC, QGPR, QSVR, QSVC

__all__ = [
"matrix",
"ml",
"optimization",
"FidelityKernel",
"ProjectedQuantumKernel",
"KernelOptimizer",
"QKRR",
"QGPC",
"QGPR",
Expand Down
4 changes: 4 additions & 0 deletions src/squlearn/kernel/loss/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .negative_log_likelihood import NLL
from .target_alignment import TargetAlignment

__all__ = ["NLL", "TargetAlignment"]
41 changes: 41 additions & 0 deletions src/squlearn/kernel/loss/kernel_loss_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
from abc import ABC, abstractmethod

from ..matrix.kernel_matrix_base import KernelMatrixBase


class KernelLossBase(ABC):
"""Empty parent class for a kernel loss function."""

def __init__(self) -> None:
self._quantum_kernel = None

def set_quantum_kernel(self, quantum_kernel: KernelMatrixBase) -> None:
"""Set the quantum kernel matrix to be used in the loss.

Args:
quantum_kernel (KernelMatrixBase): The quantum kernel matrix to be used in the loss.
"""
self._quantum_kernel = quantum_kernel

@abstractmethod
def compute(
self,
quantum_kernel: KernelMatrixBase,
parameter_values: np.array,
data: np.ndarray,
labels: np.ndarray,
) -> float:
"""Compute the target alignment loss.

Args:
quantum_kernel (KernelMatrixBase): The quantum kernel matrix to be used in the loss.
parameter_values (np.ndarray): The parameter values for the variational quantum
kernel parameters.
data (np.ndarray): The training data to be used for the kernel matrix.
labels (np.ndarray): The training labels.

Returns:
float: The loss value.
"""
raise NotImplementedError
Original file line number Diff line number Diff line change
Expand Up @@ -9,24 +9,21 @@


class NLL(KernelLossBase):
"""
r"""
Negative log likelihood loss function.
This class can be used to compute the negative log likelihood loss function
for a given quantum kernel
:math:`K_{θ}` with variational parameters :math:`θ`.
:math:`K_{\theta}` with variational parameters :math:`\theta`.
The definition of the function is taken from Equation 5.8 Chapter 5.4 of Ref. [1].

The log-likelihood function is defined as:

.. math::

L(θ) =
-\\frac{1}{2} log(|K_{θ} + σI|)-\\frac{1}{2} y^{T}(K_{θ} + σI)^{-1}y-\\frac{n}{2} log()
L(\theta) = -\frac{1}{2} log(|K_{\theta} + \sigmaI|)-\frac{1}{2} y^{T}(K_{\theta}
+ \sigmaI)^{-1}y-\frac{n}{2} log(2\pi)

Args:
quantum_kernel (KernelMatrixBase): The quantum kernel to be used
(either a fidelity quantum kernel (FQK)
or projected quantum kernel (PQK) must be provided).
sigma: (float), default=0.0: Hyperparameter for the regularization strength.

References
Expand All @@ -39,26 +36,33 @@ class NLL(KernelLossBase):
--------
"""

def __init__(self, quantum_kernel: KernelMatrixBase, sigma=0.0):
super().__init__(quantum_kernel)
def __init__(self, sigma=0.0):
super().__init__()
self._sigma = sigma

# ProjectedQuantumKernel might cause errors since its not present in original KernelLoss
def compute(
self, parameter_values: Sequence[float], data: np.ndarray, labels: np.ndarray
self,
parameter_values: np.ndarray,
data: np.ndarray,
labels: np.ndarray,
) -> float:
"""Compute the negative log likelihood loss function.

Args:
parameter_values: (Sequence[float]):
The parameter values for the variational quantum kernel parameters.
data (np.ndarray): The training data to be used for the kernel matrix.
parameter_values (np.ndarray): The parameter values for the variational quantum
kernel parameters.
data (np.ndarray): The training data to be used for the kernel matrix.
labels (np.ndarray): The training labels.

Returns:
float: The negative log likelihood loss function.
float: The negative log likelihood loss value.
"""

if self._quantum_kernel is None:
raise ValueError(
"Quantum kernel is not set, please set the quantum kernel with set_quantum_kernel method"
)

# Bind training parameters
self._quantum_kernel.assign_parameters(parameter_values)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,27 @@

import numpy as np

from typing import Sequence
from .kernel_loss_base import KernelLossBase
from ..matrix.kernel_matrix_base import KernelMatrixBase


class TargetAlignment(KernelLossBase):
"""
r"""
Target alignment loss function.
This class can be used to compute the target alignment for a given quantum kernel
:math:`K_{θ}` with variational parameters :math:`θ`.
:math:`K_{\theta}` with variational parameters :math:`\theta`.
The definition of the function is taken from Equation (27,28) of [1].
The log-likelihood function is defined as:
The target alignment loss is defined as:

.. math::

TA(K_{θ}) =
\\frac{\\sum_{i,j} K_{θ}(x_i, x_j) y_i y_j}
{\\sqrt{\\sum_{i,j} K_{θ}(x_i, x_j)^2 \\sum_{i,j} y_i^2 y_j^2}}
TA(K_{\theta}) =
\frac{\\um_{i,j} K_{\theta}(x_i, x_j) y_i y_j}
{\sqrt{\sum_{i,j} K_{\theta}(x_i, x_j)^2 \sum_{i,j} y_i^2 y_j^2}}

Args:
quantum_kernel (KernelMatrixBase): The quantum kernel to be used
(either a fidelity quantum kernel (FQK)
or projected quantum kernel (PQK) must be provided).
rescale_class_labels (bool): Whether to rescale the class labels to -1 and 1
(default=True).

References
-----------
Expand All @@ -36,36 +34,40 @@ class TargetAlignment(KernelLossBase):
--------
"""

def __init__(self, quantum_kernel: KernelMatrixBase):
super().__init__(quantum_kernel)
def __init__(self, rescale_class_labels=True) -> None:
""" """
super().__init__()
self._rescale_class_labels = rescale_class_labels

def compute(
self,
parameter_values: Sequence[float],
parameter_values: np.ndarray,
data: np.ndarray,
labels: np.ndarray,
rescale_class_labels=True,
) -> float:
"""Compute the target alignment.
"""Compute the target alignment loss.

Args:
parameter_values: (Sequence[float]):
The parameter values for the variational quantum kernel parameters.
parameter_values (np.ndarray): The parameter values for the variational quantum kernel
parameters.
data (np.ndarray): The training data to be used for the kernel matrix.
labels (np.ndarray): The training labels.
rescale_class_labels: (bool), default=True:
Whether to rescale the class labels to -1 and 1.
labels (np.ndarray): The labels of the training data.

Returns:
float: The negative target alignment.
"""

if self._quantum_kernel is None:
raise ValueError(
"Quantum kernel is not set, please set the quantum kernel with set_quantum_kernel method"
)

# Bind training parameters
self._quantum_kernel.assign_parameters(parameter_values)

# Get estimated kernel matrix
kmatrix = self._quantum_kernel.evaluate(data)
if rescale_class_labels:
if self._rescale_class_labels:
nplus = np.count_nonzero(np.array(labels) == 1)
nminus = len(labels) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in labels])
Expand Down
3 changes: 2 additions & 1 deletion src/squlearn/kernel/matrix/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .fidelity_kernel import FidelityKernel
from .projected_quantum_kernel import ProjectedQuantumKernel
from .kernel_optimizer import KernelOptimizer

__all__ = ["FidelityKernel", "ProjectedQuantumKernel"]
__all__ = ["FidelityKernel", "ProjectedQuantumKernel", "KernelOptimizer"]
Loading