Skip to content

Commit

Permalink
Merge pull request #51 from ChitambarLab/behavior_fn-bug
Browse files Browse the repository at this point in the history
Behavior fn bug and support for Adam
  • Loading branch information
bdoolittle authored Oct 1, 2023
2 parents 72c37ee + f841cb8 commit f143950
Show file tree
Hide file tree
Showing 4 changed files with 111 additions and 17 deletions.
14 changes: 13 additions & 1 deletion src/qnetvo/gradient_descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ def gradient_descent(
grad_fn=None,
verbose=True,
interface="autograd",
optimizer=None,
optimizer_kwargs={},
):
"""Performs a numerical gradient descent optimization on the provided ``cost`` function.
The optimization is seeded with (random) ``init_settings`` which are then varied to
Expand Down Expand Up @@ -42,6 +44,13 @@ def gradient_descent(
:param interface: Specifies the optimizer software either ``"autograd"`` or ``"tf"`` (TensorFlow).
:type interface: string, default ``"autograd``"
:param optimizer: Specifies the PennyLane optimizer to use. Default ``qml.GradientDescentOptimizer``.
Set to ``"adam"`` to use the ``qml.AdamOptimizer``, note that ``interface="autograd"`` must be set.
:type optimizer: String
:param optimizer_kwargs: Keyword arguments to pass to the specified optimizer.
:type optimizer_kwargs: Dict
:return: Data regarding the gradient descent optimization.
:rtype: dictionary, contains the following keys:
Expand Down Expand Up @@ -69,7 +78,10 @@ def gradient_descent(
"""

if interface == "autograd":
opt = qml.GradientDescentOptimizer(stepsize=step_size)
if optimizer == "adam":
opt = qml.AdamOptimizer(stepsize=step_size, **optimizer_kwargs)
else:
opt = qml.GradientDescentOptimizer(stepsize=step_size)
elif interface == "tf":
from .lazy_tensorflow_import import tensorflow as tf

Expand Down
31 changes: 19 additions & 12 deletions src/qnetvo/information.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from pennylane import math
from .qnodes import joint_probs_qnode
from .utilities import mixed_base_num
from .utilities import mixed_base_num, ragged_reshape
from pennylane import numpy as np


Expand Down Expand Up @@ -43,11 +43,22 @@ def behavior_fn(network_ansatz, postmap=np.array([]), qnode_kwargs={}):
behavior matrix for a given set of settings.
:rtype: function
"""
num_in_prep_nodes = [node.num_in for node in network_ansatz.layers[0]]
num_in_meas_nodes = [node.num_in for node in network_ansatz.layers[-1]]
# num_in_prep_nodes = [node.num_in for node in network_ansatz.layers[0]]
# num_in_meas_nodes = [node.num_in for node in network_ansatz.layers[-1]]

base_digits = num_in_prep_nodes + num_in_meas_nodes
net_num_in = math.prod(base_digits)
# base_digits = num_in_prep_nodes + num_in_meas_nodes
# net_num_in = math.prod(base_digits)

# raw_net_num_out = 2 ** len(network_ansatz.layers_wires[-1])

probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)

net_num_in = math.prod(network_ansatz.layers_total_num_in)
num_inputs_list = math.concatenate(network_ansatz.layers_node_num_in).tolist()
node_input_ids = [
ragged_reshape(mixed_base_num(i, num_inputs_list), network_ansatz.layers_num_nodes)
for i in range(net_num_in)
]

raw_net_num_out = 2 ** len(network_ansatz.layers_wires[-1])

Expand All @@ -56,18 +67,14 @@ def behavior_fn(network_ansatz, postmap=np.array([]), qnode_kwargs={}):
if postmap.shape[1] != raw_net_num_out:
raise ValueError("The `postmap` must have " + str(raw_net_num_out) + " columns.")

node_input_ids = [mixed_base_num(i, base_digits) for i in range(net_num_in)]
# node_input_ids = [mixed_base_num(i, base_digits) for i in range(net_num_in)]

probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
# probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)

def behavior(network_settings):
raw_behavior = np.zeros((raw_net_num_out, net_num_in))
for i, input_id_set in enumerate(node_input_ids):
settings = network_ansatz.qnode_settings(
network_settings,
[input_id_set[0 : len(num_in_prep_nodes)], input_id_set[len(num_in_prep_nodes) :]],
)

settings = network_ansatz.qnode_settings(network_settings, input_id_set)
raw_behavior[:, i] += probs_qnode(settings)

return postmap @ raw_behavior if has_postmap else raw_behavior
Expand Down
16 changes: 16 additions & 0 deletions test/gradient_descent_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,19 @@ def test_quadratic_cost(self):
verbose=False,
interface="jax",
)

# adam optimizer
ad_opt_dict = qnet.gradient_descent(
cost,
settings,
num_steps=50,
step_size=0.1,
verbose=True,
optimizer="adam",
)

assert np.isclose(opt_dict["opt_score"], 0, atol=1e-6)
assert np.isclose(opt_dict["opt_settings"], 0, atol=1e-4)
assert opt_dict["samples"] == [0, 25, 50]
assert len(opt_dict["scores"]) == 3
assert len(opt_dict["settings_history"]) == 51
67 changes: 63 additions & 4 deletions test/information_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,16 @@
class TestBehaviorFn:
def test_simple_settings(self):
prep_nodes = [
qnet.PrepareNode(2, [0], qnet.local_RY, 1),
qnet.PrepareNode(2, [1], qnet.local_RY, 1),
qnet.PrepareNode(num_in=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1),
qnet.PrepareNode(num_in=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1),
]
meas_nodes = [
qnet.MeasureNode(2, 2, [0], qnet.local_RY, 1),
qnet.MeasureNode(2, 2, [1], qnet.local_RY, 1),
qnet.MeasureNode(
num_in=2, num_out=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1
),
qnet.MeasureNode(
num_in=2, num_out=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1
),
]
ansatz = qnet.NetworkAnsatz(prep_nodes, meas_nodes)
P_Net = qnet.behavior_fn(ansatz)
Expand Down Expand Up @@ -122,6 +126,61 @@ def test_rand_settings(self):
assert P_Net.shape == (16, 288)
assert np.allclose(np.ones(288), [np.sum(P_Net[:, i]) for i in range(288)])

def test_inputs_from_multiple_layers(self):
prep_nodes_a = [
qnet.PrepareNode(num_in=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1),
]
prep_nodes_b = [
qnet.PrepareNode(num_in=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1),
]

meas_nodes = [
qnet.MeasureNode(
num_in=2, num_out=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1
),
qnet.MeasureNode(
num_in=2, num_out=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1
),
]
ansatz = qnet.NetworkAnsatz(prep_nodes_a, prep_nodes_b, meas_nodes)
P_Net = qnet.behavior_fn(ansatz)
zero_settings = ansatz.zero_network_settings()

assert np.all(
P_Net(zero_settings)
== [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)

settings = zero_settings
settings[1] = np.pi

assert np.allclose(
P_Net(settings),
[
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
)

settings[7] = np.pi / 2

assert np.allclose(
P_Net(settings),
[
[1, 0.5, 1, 0.5, 1, 0.5, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0.5, 0, 0.5, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0.5, 1, 0.5, 1, 0.5, 1, 0.5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0.5, 0, 0.5, 0, 0.5],
],
)


class TestShannonEntropy:
@pytest.mark.parametrize(
Expand Down

0 comments on commit f143950

Please sign in to comment.