Skip to content

Commit f143950

Browse files
authored
Merge pull request #51 from ChitambarLab/behavior_fn-bug
Behavior fn bug and support for Adam
2 parents 72c37ee + f841cb8 commit f143950

4 files changed

Lines changed: 111 additions & 17 deletions

File tree

src/qnetvo/gradient_descent.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ def gradient_descent(
1212
grad_fn=None,
1313
verbose=True,
1414
interface="autograd",
15+
optimizer=None,
16+
optimizer_kwargs={},
1517
):
1618
"""Performs a numerical gradient descent optimization on the provided ``cost`` function.
1719
The optimization is seeded with (random) ``init_settings`` which are then varied to
@@ -42,6 +44,13 @@ def gradient_descent(
4244
:param interface: Specifies the optimizer software either ``"autograd"`` or ``"tf"`` (TensorFlow).
4345
:type interface: string, default ``"autograd``"
4446
47+
:param optimizer: Specifies the PennyLane optimizer to use. Default ``qml.GradientDescentOptimizer``.
48+
Set to ``"adam"`` to use the ``qml.AdamOptimizer``, note that ``interface="autograd"`` must be set.
49+
:type optimizer: String
50+
51+
:param optimizer_kwargs: Keyword arguments to pass to the specified optimizer.
52+
:type optimizer_kwargs: Dict
53+
4554
:return: Data regarding the gradient descent optimization.
4655
:rtype: dictionary, contains the following keys:
4756
@@ -69,7 +78,10 @@ def gradient_descent(
6978
"""
7079

7180
if interface == "autograd":
72-
opt = qml.GradientDescentOptimizer(stepsize=step_size)
81+
if optimizer == "adam":
82+
opt = qml.AdamOptimizer(stepsize=step_size, **optimizer_kwargs)
83+
else:
84+
opt = qml.GradientDescentOptimizer(stepsize=step_size)
7385
elif interface == "tf":
7486
from .lazy_tensorflow_import import tensorflow as tf
7587

src/qnetvo/information.py

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from pennylane import math
22
from .qnodes import joint_probs_qnode
3-
from .utilities import mixed_base_num
3+
from .utilities import mixed_base_num, ragged_reshape
44
from pennylane import numpy as np
55

66

@@ -43,11 +43,22 @@ def behavior_fn(network_ansatz, postmap=np.array([]), qnode_kwargs={}):
4343
behavior matrix for a given set of settings.
4444
:rtype: function
4545
"""
46-
num_in_prep_nodes = [node.num_in for node in network_ansatz.layers[0]]
47-
num_in_meas_nodes = [node.num_in for node in network_ansatz.layers[-1]]
46+
# num_in_prep_nodes = [node.num_in for node in network_ansatz.layers[0]]
47+
# num_in_meas_nodes = [node.num_in for node in network_ansatz.layers[-1]]
4848

49-
base_digits = num_in_prep_nodes + num_in_meas_nodes
50-
net_num_in = math.prod(base_digits)
49+
# base_digits = num_in_prep_nodes + num_in_meas_nodes
50+
# net_num_in = math.prod(base_digits)
51+
52+
# raw_net_num_out = 2 ** len(network_ansatz.layers_wires[-1])
53+
54+
probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
55+
56+
net_num_in = math.prod(network_ansatz.layers_total_num_in)
57+
num_inputs_list = math.concatenate(network_ansatz.layers_node_num_in).tolist()
58+
node_input_ids = [
59+
ragged_reshape(mixed_base_num(i, num_inputs_list), network_ansatz.layers_num_nodes)
60+
for i in range(net_num_in)
61+
]
5162

5263
raw_net_num_out = 2 ** len(network_ansatz.layers_wires[-1])
5364

@@ -56,18 +67,14 @@ def behavior_fn(network_ansatz, postmap=np.array([]), qnode_kwargs={}):
5667
if postmap.shape[1] != raw_net_num_out:
5768
raise ValueError("The `postmap` must have " + str(raw_net_num_out) + " columns.")
5869

59-
node_input_ids = [mixed_base_num(i, base_digits) for i in range(net_num_in)]
70+
# node_input_ids = [mixed_base_num(i, base_digits) for i in range(net_num_in)]
6071

61-
probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
72+
# probs_qnode = joint_probs_qnode(network_ansatz, **qnode_kwargs)
6273

6374
def behavior(network_settings):
6475
raw_behavior = np.zeros((raw_net_num_out, net_num_in))
6576
for i, input_id_set in enumerate(node_input_ids):
66-
settings = network_ansatz.qnode_settings(
67-
network_settings,
68-
[input_id_set[0 : len(num_in_prep_nodes)], input_id_set[len(num_in_prep_nodes) :]],
69-
)
70-
77+
settings = network_ansatz.qnode_settings(network_settings, input_id_set)
7178
raw_behavior[:, i] += probs_qnode(settings)
7279

7380
return postmap @ raw_behavior if has_postmap else raw_behavior

test/gradient_descent_test.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,3 +44,19 @@ def test_quadratic_cost(self):
4444
verbose=False,
4545
interface="jax",
4646
)
47+
48+
# adam optimizer
49+
ad_opt_dict = qnet.gradient_descent(
50+
cost,
51+
settings,
52+
num_steps=50,
53+
step_size=0.1,
54+
verbose=True,
55+
optimizer="adam",
56+
)
57+
58+
assert np.isclose(opt_dict["opt_score"], 0, atol=1e-6)
59+
assert np.isclose(opt_dict["opt_settings"], 0, atol=1e-4)
60+
assert opt_dict["samples"] == [0, 25, 50]
61+
assert len(opt_dict["scores"]) == 3
62+
assert len(opt_dict["settings_history"]) == 51

test/information_test.py

Lines changed: 63 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,16 @@
88
class TestBehaviorFn:
99
def test_simple_settings(self):
1010
prep_nodes = [
11-
qnet.PrepareNode(2, [0], qnet.local_RY, 1),
12-
qnet.PrepareNode(2, [1], qnet.local_RY, 1),
11+
qnet.PrepareNode(num_in=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1),
12+
qnet.PrepareNode(num_in=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1),
1313
]
1414
meas_nodes = [
15-
qnet.MeasureNode(2, 2, [0], qnet.local_RY, 1),
16-
qnet.MeasureNode(2, 2, [1], qnet.local_RY, 1),
15+
qnet.MeasureNode(
16+
num_in=2, num_out=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1
17+
),
18+
qnet.MeasureNode(
19+
num_in=2, num_out=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1
20+
),
1721
]
1822
ansatz = qnet.NetworkAnsatz(prep_nodes, meas_nodes)
1923
P_Net = qnet.behavior_fn(ansatz)
@@ -122,6 +126,61 @@ def test_rand_settings(self):
122126
assert P_Net.shape == (16, 288)
123127
assert np.allclose(np.ones(288), [np.sum(P_Net[:, i]) for i in range(288)])
124128

129+
def test_inputs_from_multiple_layers(self):
130+
prep_nodes_a = [
131+
qnet.PrepareNode(num_in=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1),
132+
]
133+
prep_nodes_b = [
134+
qnet.PrepareNode(num_in=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1),
135+
]
136+
137+
meas_nodes = [
138+
qnet.MeasureNode(
139+
num_in=2, num_out=2, wires=[0], ansatz_fn=qnet.local_RY, num_settings=1
140+
),
141+
qnet.MeasureNode(
142+
num_in=2, num_out=2, wires=[1], ansatz_fn=qnet.local_RY, num_settings=1
143+
),
144+
]
145+
ansatz = qnet.NetworkAnsatz(prep_nodes_a, prep_nodes_b, meas_nodes)
146+
P_Net = qnet.behavior_fn(ansatz)
147+
zero_settings = ansatz.zero_network_settings()
148+
149+
assert np.all(
150+
P_Net(zero_settings)
151+
== [
152+
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
153+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
154+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
155+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
156+
]
157+
)
158+
159+
settings = zero_settings
160+
settings[1] = np.pi
161+
162+
assert np.allclose(
163+
P_Net(settings),
164+
[
165+
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
166+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
167+
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
168+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
169+
],
170+
)
171+
172+
settings[7] = np.pi / 2
173+
174+
assert np.allclose(
175+
P_Net(settings),
176+
[
177+
[1, 0.5, 1, 0.5, 1, 0.5, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
178+
[0, 0.5, 0, 0.5, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
179+
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0.5, 1, 0.5, 1, 0.5, 1, 0.5],
180+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0.5, 0, 0.5, 0, 0.5],
181+
],
182+
)
183+
125184

126185
class TestShannonEntropy:
127186
@pytest.mark.parametrize(

0 commit comments

Comments
 (0)