Hello!
I created a subclass MyDenseLayer of the parent class KerasLayer and then tried to process the data using the newly defined methods.
One of the problems encountered was that the error reporting could be solved by deleting the index.
Cell In[49], line 11, in circuit(inputs, weights)
9 qml.AngleEmbedding(inputs[4:8], wires=range(0,4), rotation='Y')
10 qml.AngleEmbedding(inputs[8:12], wires=range(0,4), rotation='Z')
---> 11 qml.CRX(weights[0], wires=[0,1])
12 qml.CRX(weights[1], wires=[1,2])
13 qml.CRX(weights[2], wires=[2,3])
TypeError: Exception encountered when calling layer 'my_dense_layer_3' (type MyDenseLayer).
In that event, a new problem has arisen, and I don’t know what to do to solve it.
383 return str([qml.math.round(qml.math.real(d) % (2 * np.pi), 10) for d in op.data])
385 if op.name in ("CRX", "CRY", "CRZ", "CRot"):
--> 386 return str([qml.math.round(qml.math.real(d) % (4 * np.pi), 10) for d in op.data])
388 return str(op.data)
TypeError: Exception encountered when calling layer 'my_dense_layer_4' (type MyDenseLayer).
unsupported operand type(s) for %: 'generator' and 'float'
Call arguments received by layer 'my_dense_layer_4' (type MyDenseLayer):
• inputs=tf.Tensor(shape=(1, 32, 32, 3), dtype=float32)
Full Code:
from tensorflow.keras.datasets import cifar10
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers.experimental import preprocessing
(x_train_full, y_train_full), (x_test, y_test) = cifar10.load_data()
split = 0.2
x_train, x_val, y_train, y_val = train_test_split(
x_train_full, y_train_full, test_size=0.2, random_state=42)
x_train = preprocessing.Rescaling(scale=1.0/255)(x_train)
x_val = preprocessing.Rescaling(scale=1.0/255)(x_val)
y_train = y_train.reshape(y_train.shape[0],)
y_val = y_val.reshape(y_val.shape[0],)
import tensorflow as tf
import pennylane as qml
from pennylane import numpy as np
import matplotlib.pyplot as plt
import torch
n_qubits = 5
# qiskit.aer, default.qubit
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(inputs, weights):
qml.Hadamard(wires=0)
qml.AngleEmbedding(inputs[0:4], wires=range(0,4), rotation='X')
qml.AngleEmbedding(inputs[4:8], wires=range(0,4), rotation='Y')
qml.AngleEmbedding(inputs[8:12], wires=range(0,4), rotation='Z')
qml.CRX(weights, wires=[0,1])
qml.CRX(weights, wires=[1,2])
qml.CRX(weights, wires=[2,3])
qml.CRX(weights, wires=[3,1])
qml.CPhase(weights, wires=[0,4])
return qml.expval(qml.PauliZ(wires=4))
class MyDenseLayer(qml.qnn.KerasLayer):
def _quanv(self, image, weights):
output_array = np.zeros((31, 31))
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
for j in range(0, 31, 1):
for k in range(0, 31, 1):
result_qn = self.qnode(
r[j, k],
r[j, k + 1],
r[j + 1, k],
r[j + 1, k + 1],
g[j, k],
g[j, k + 1],
g[j + 1, k],
g[j + 1, k + 1],
b[j, k],
b[j, k + 1],
b[j + 1, k],
b[j + 1, k + 1]
], weights
output_array[j, k] = result_qn
return output_array
def _evaluate_qnode(self, x):
batch_round = x.shape[0]
res_list = []
q1 = (1.0 * w for k, w in self.qnode_weights.items())
for i in range(batch_round):
image = x[i]
res_list.append(self._quanv(image, q1))
res = tf.constant(res_list)
return res
def call(self, inputs):
results = self._evaluate_qnode(inputs)
return results
weight_shapes = {"weights": (5)}
def MyModel():
qlayer = MyDenseLayer(circuit, weight_shapes, output_dim=None)
clayer_1 = tf.keras.layers.Flatten()
clayer_2 = tf.keras.layers.Dense(100, activation="softmax")
clayer_3 = tf.keras.layers.Dense(10, activation="softmax")
model = tf.keras.models.Sequential([qlayer, clayer_1, clayer_2, clayer_3])
model.compile(
optimizer='adam',
loss="categorical_crossentropy",
metrics=["accuracy"],
return model
q_model = MyModel()
q_history = q_model.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
batch_size=1,
epochs=30,
verbose=2,
Thanks in advance.
Hey @Kwokho_Ng. welcome to the forum!
There were a couple things in your code worth highlighting:
q1 = (1.0 * w for k, w in self.qnode_weights.items())
was creating a Generator
(see here Generators - Python Wiki). This wasn’t grabbing the weight values. The fix here is to just have q1 = self.qnode_weights['weights']
.
The weights
in circuit
were being passed in full to each controlled rotation gate, and I assume you want to do this instead:
qml.CRX(weights[0], wires=[0,1])
qml.CRX(weights[1], wires=[1,2])
qml.CRX(weights[2], wires=[2,3])
qml.CRX(weights[3], wires=[3,1])
qml.CPhase(weights[4], wires=[0,4])
Your model is outputting something of size 10, but you’re comparing to something of size 1 which causes problems when your loss is being calculated:
clayer_3 = tf.keras.layers.Dense(1, activation="softmax")
Here’s the full code that ended up working for me:
from tensorflow.keras.datasets import cifar10
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers.experimental import preprocessing
(x_train_full, y_train_full), (x_test, y_test) = cifar10.load_data()
split = 0.2
x_train, x_val, y_train, y_val = train_test_split(
x_train_full, y_train_full, test_size=0.2, random_state=42)
x_train = preprocessing.Rescaling(scale=1.0/255)(x_train)
x_val = preprocessing.Rescaling(scale=1.0/255)(x_val)
y_train = y_train.reshape(y_train.shape[0],)
y_val = y_val.reshape(y_val.shape[0],)
import tensorflow as tf
import pennylane as qml
from pennylane import numpy as np
import matplotlib.pyplot as plt
n_qubits = 5
# qiskit.aer, default.qubit
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(inputs, weights):
qml.Hadamard(wires=0)
qml.AngleEmbedding(inputs[0:4], wires=range(0,4), rotation='X')
qml.AngleEmbedding(inputs[4:8], wires=range(0,4), rotation='Y')
qml.AngleEmbedding(inputs[8:12], wires=range(0,4), rotation='Z')
qml.CRX(weights[0], wires=[0,1])
qml.CRX(weights[1], wires=[1,2])
qml.CRX(weights[2], wires=[2,3])
qml.CRX(weights[3], wires=[3,1])
qml.CPhase(weights[4], wires=[0,4])
return qml.expval(qml.PauliZ(wires=4))
class MyDenseLayer(qml.qnn.KerasLayer):
def _quanv(self, image, weights):
output_array = np.zeros((31, 31))
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
for j in range(0, 31, 1):
for k in range(0, 31, 1):
_inputs = [
r[j, k],
r[j, k + 1],
r[j + 1, k],
r[j + 1, k + 1],
g[j, k],
g[j, k + 1],
g[j + 1, k],
g[j + 1, k + 1],
b[j, k],
b[j, k + 1],
b[j + 1, k],
b[j + 1, k + 1]
result_qn = self.qnode(_inputs, weights)
output_array[j, k] = result_qn
return output_array
def _evaluate_qnode(self, x):
batch_round = x.shape[0]
res_list = []
q1 = self.qnode_weights['weights']
for i in range(batch_round):
image = x[i]
res_list.append(self._quanv(image, q1))
res = tf.constant(res_list)
return res
def call(self, inputs):
results = self._evaluate_qnode(inputs)
return results
weight_shapes = {"weights": (5)}
def MyModel():
qlayer = MyDenseLayer(circuit, weight_shapes, output_dim=None)
clayer_1 = tf.keras.layers.Flatten()
clayer_2 = tf.keras.layers.Dense(100, activation="softmax")
clayer_3 = tf.keras.layers.Dense(1, activation="softmax")
model = tf.keras.models.Sequential([qlayer, clayer_1, clayer_2, clayer_3])
model.compile(
optimizer='adam',
loss="categorical_crossentropy",
metrics=["accuracy"],
return model
q_model = MyModel()
q_history = q_model.fit(
x_train,
y_train,
validation_data=(x_val, y_val),
batch_size=1,
epochs=5,
verbose=2,
And here are my package versions for good measure:
tf: '2.12.0'
Name: PennyLane
Version: 0.33.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/PennyLaneAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /Users/isaac/.virtualenvs/pennylane-tensorflow/lib/python3.9/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane-Lightning, PennyLane-qiskit
Platform info: macOS-14.1.2-x86_64-i386-64bit
Python version: 3.9.14
Numpy version: 1.23.5
Scipy version: 1.11.3
Installed devices:
- default.gaussian (PennyLane-0.33.1)
- default.mixed (PennyLane-0.33.1)
- default.qubit (PennyLane-0.33.1)
- default.qubit.autograd (PennyLane-0.33.1)
- default.qubit.jax (PennyLane-0.33.1)
- default.qubit.legacy (PennyLane-0.33.1)
- default.qubit.tf (PennyLane-0.33.1)
- default.qubit.torch (PennyLane-0.33.1)
- default.qutrit (PennyLane-0.33.1)
- qiskit.ibmq.circuit_runner (PennyLane-qiskit-0.33.0)
- qiskit.ibmq.sampler (PennyLane-qiskit-0.33.0)
- qiskit.remote (PennyLane-qiskit-0.33.0)
- lightning.qubit (PennyLane-Lightning-0.33.1)
Let me know if this helps!
@isaacdevlugt Thank you for your clear explanation and reminder. I forgot to convert the label into a binary matrix.
Here I made some slight changes to the code, but I am currently trying to solve a problem “Gradients do not exist for variables [‘weights:0’] when minimizing the loss.” I have referred to the method in the Quantum convolution neural network using Keras, and it still couldn’t solve this problem.
WARNING:tensorflow:Gradients do not exist for variables ['weights:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument?
qnode_weights output:
<tf.Variable 'weights:0' shape=(5,) dtype=float64, numpy=array([ 0.72691341, -0.59184832, 0.73965137, 0.66283934, 0.39719912])>
Traceback while using lightning_gpu :
File ~/anaconda3/envs/pennylane/lib/python3.9/site-packages/pennylane_lightning/lightning_gpu/lightning_gpu.py:347, in LightningGPU.reset(self)
345 super().reset()
346 # init the state vector to |00..0>
--> 347 self._gpu_state.resetGPU(False)
default.qubit:
File ~/anaconda3/envs/pennylane/lib/python3.9/site-packages/tensorflow/python/framework/ops.py:5979, in is_auto_dtype_conversion_enabled()
5976 def is_auto_dtype_conversion_enabled():
5977 return (
5978 _dtype_conversion_mode == PromoMode.ALL
-> 5979 or _dtype_conversion_mode == PromoMode.SAFE
5980 )
Full code:
import tensorflow as tf
# tf.keras.backend.set_floatx('float64')
from tensorflow.keras.datasets import cifar10
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers.experimental import preprocessing
(x_train_full, y_train_full), (x_test, y_test) = cifar10.load_data()
x_train_full=x_train_full[:10]
y_train_full=y_train_full[:10]
x_test=x_test[:10]
y_test=y_test[:10]
split = 0.2
x_train, x_val, y_train, y_val = train_test_split(
x_train_full, y_train_full, test_size=0.2, random_state=42)
x_train = preprocessing.Rescaling(scale=1.0/255)(x_train)
x_val = preprocessing.Rescaling(scale=1.0/255)(x_val)
y_train = y_train.reshape(y_train.shape[0],)
y_val = y_val.reshape(y_val.shape[0],)
import pennylane as qml
from pennylane import numpy as np
n_qubits = 5
# qiskit.aer, default.qubit, lightning.gpu
dev = qml.device("lightning.gpu", wires=n_qubits)
@qml.qnode(dev)
def circuit(inputs, weights):
# weights = tf.split(weights, num_or_size_splits=n_qubits, axis=0)
qml.Hadamard(wires=0)
qml.AngleEmbedding(inputs[0:4], wires=range(0,4), rotation='X')
qml.AngleEmbedding(inputs[4:8], wires=range(0,4), rotation='Y')
qml.AngleEmbedding(inputs[8:12], wires=range(0,4), rotation='Z')
qml.CRX(weights[0], wires=[0,1])
qml.CRX(weights[1], wires=[1,2])
qml.CRX(weights[2], wires=[2,3])
qml.CRX(weights[3], wires=[3,1])
qml.CPhase(weights[4], wires=[0,4])
return qml.expval(qml.PauliX(wires=0))
class MyDenseLayer(qml.qnn.KerasLayer):
def _quanv(self, image, weights):
output_array = []
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
for j in range(0, 31, 1):
for k in range(0, 31, 1):
_inputs = [
r[j, k],
r[j, k + 1],
r[j + 1, k],
r[j + 1, k + 1],
g[j, k],
g[j, k + 1],
g[j + 1, k],
g[j + 1, k + 1],
b[j, k],
b[j, k + 1],
b[j + 1, k],
b[j + 1, k + 1]
result_qn = self.qnode(_inputs, weights)
output_array.append(result_qn)
output_array = np.array(output_array, requires_grad=False).reshape(1,31,31) # dtype='float32'
# output_array = tf.dtypes.cast(output_array, tf.float32)
output_array = tf.constant(output_array)
# print(output_array)
return output_array
def _evaluate_qnode(self, x):
batch_round = x.shape[0]
weights_qn = self.qnode_weights['weights']
# weights_qn = tf.split(weights_qn, num_or_size_splits=n_qubits, axis=0)
res_list = self._quanv(x[0], weights_qn)
# res_list = tf.constant(res_list)
if batch_round > 1:
for i in range(1, batch_round):
temp = self._quanv(x[i], weights_qn)
# temp = tf.constant(temp)
res_list = tf.concat([res_list, temp], 0)
print(res_list[0][0])
return res_list
def call(self, inputs):
results = self._evaluate_qnode(inputs)
return results
weight_shapes = {"weights": (5)}
class MyModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.qlayer = MyDenseLayer(circuit, weight_shapes, output_dim=None)
self.flatten1 = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(100, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(10, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.2)
def call(self, inputs):
# all EagerTensor
x = self.qlayer(inputs)
x = self.flatten1(x)
x = self.dense1(x)
x = self.dropout(x, training=True)
print(x)
return self.dense2(x)
y_trains = tf.keras.utils.to_categorical(y_train)
y_vals = tf.keras.utils.to_categorical(y_val)
y_trains = tf.convert_to_tensor(y_trains)
y_vals = tf.convert_to_tensor(y_vals)
model = MyModel()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss="categorical_crossentropy",
metrics=["accuracy"],
history = model.fit(
x_train,
y_trains,
validation_data=(x_val, y_vals),
batch_size=2,
epochs=5,
verbose=2,
tensorflow - 2.15.0
qml.about()
Name: PennyLane
Version: 0.33.1
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/PennyLaneAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /home/xx1/anaconda3/envs/pennylane/lib/python3.9/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane-Lightning, PennyLane-Lightning-GPU, PennyLane-qiskit
Platform info: Linux-5.15.133.1-microsoft-standard-WSL2-x86_64-with-glibc2.35
Python version: 3.9.18
Numpy version: 1.26.2
Scipy version: 1.11.4
Installed devices:
- default.gaussian (PennyLane-0.33.1)
- default.mixed (PennyLane-0.33.1)
- default.qubit (PennyLane-0.33.1)
- default.qubit.autograd (PennyLane-0.33.1)
- default.qubit.jax (PennyLane-0.33.1)
- default.qubit.legacy (PennyLane-0.33.1)
- default.qubit.tf (PennyLane-0.33.1)
- default.qubit.torch (PennyLane-0.33.1)
- default.qutrit (PennyLane-0.33.1)
- null.qubit (PennyLane-0.33.1)
- lightning.qubit (PennyLane-Lightning-0.33.1)
- lightning.gpu (PennyLane-Lightning-GPU-0.33.1)
- qiskit.aer (PennyLane-qiskit-0.33.1)
- qiskit.basicaer (PennyLane-qiskit-0.33.1)
- qiskit.ibmq (PennyLane-qiskit-0.33.1)
- qiskit.ibmq.circuit_runner (PennyLane-qiskit-0.33.1)
- qiskit.ibmq.sampler (PennyLane-qiskit-0.33.1)
- qiskit.remote (PennyLane-qiskit-0.33.1)
The dtype format should be fine… Could you please advise me on how to resolve this warning?
Thanks!5
After some attempts, I have confirmed that only tf tensor operations can be used in the MyDenseLayer class(or in tf model), and the code can run correctly in the end.
import os
os.environ['TCP_CPP_MIN_LOG_LEVEL'] = '3'
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
import tensorflow as tf
# tf.keras.backend.set_floatx('float64')
class MyDenseLayer(qml.qnn.KerasLayer):
def _quanv(self, image, weights):
# output_array = []
test_array = tf.zeros((1, 31, 31), dtype=tf.float32)
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
for j in range(0, 31, 1):
for k in range(0, 31, 1):
_inputs = [
r[j, k],
r[j, k + 1],
r[j + 1, k],
r[j + 1, k + 1],
g[j, k],
g[j, k + 1],
g[j + 1, k],
g[j + 1, k + 1],
b[j, k],
b[j, k + 1],
b[j + 1, k],
b[j + 1, k + 1]
result_qn = self.qnode(_inputs, weights) # lightning.gpu return float64
# output_array.append(result_qn)
result_qn = tf.cast(result_qn, dtype=tf.float32)
test_array = tf.tensor_scatter_nd_update(test_array, [[0,j,k]], [result_qn])
# output_array = np.array(output_array, requires_grad=False).reshape(1,31,31) # dtype='float32'
# output_array = tf.dtypes.cast(output_array, tf.float32)
# output_array = tf.constant(output_array)
# print(output_array)
return test_array
def _evaluate_qnode(self, x):
batch_round = x.shape[0]
weights_qn = self.qnode_weights['weights']
res_list = self._quanv(x[0], weights_qn)
if batch_round > 1:
for i in range(1, batch_round):
temp = self._quanv(x[i], weights_qn)
res_list = tf.concat([res_list, temp], 0)
return res_list
def call(self, inputs):
results = self._evaluate_qnode(inputs)
return results
# default.qubit, without CUDA, CPU only
Epoch 1/5
4/4 - 276s - loss: 2.8753 - accuracy: 0.0625 - val_loss: 3.0644 - val_accuracy: 0.5000 - 276s/epoch - 69s/step
Epoch 2/5
4/4 - 269s - loss: 2.8643 - accuracy: 0.2500 - val_loss: 3.4688 - val_accuracy: 0.0000e+00 - 269s/epoch - 67s/step
Epoch 3/5
4/4 - 273s - loss: 2.5653 - accuracy: 0.1875 - val_loss: 3.5177 - val_accuracy: 0.0000e+00 - 273s/epoch - 68s/step
Epoch 4/5
4/4 - 276s - loss: 2.3856 - accuracy: 0.0625 - val_loss: 2.4908 - val_accuracy: 0.2500 - 276s/epoch - 69s/step
Epoch 5/5
4/4 - 282s - loss: 2.6872 - accuracy: 0.1250 - val_loss: 2.5300 - val_accuracy: 0.2500 - 282s/epoch - 70s/step
cannot compute Sub as input #1(zero-based) was expected to be a double tensor but is a float tensor [Op:Sub] name:
This error can be fixed by applying this code when using lightning plugins.
weights_qn = tf.cast(weights_qn, dtype=tf.float64) # float32 -> float64
# lightning.qubit
4/4 - 158s - loss: 3.1603 - accuracy: 0.0625 - val_loss: 2.3696 - val_accuracy: 0.2500 - 158s/epoch - 39s/step
Haha, actually there was still a slight problem with that code, weight 0 never updated, but I rewrote the code using pytorch and everything worked fine.
Now I’m trying to optimise the image processing time for QCNN, I found this and followed that.
In this simple example, it works well.
import pennylane as qml
from pennylane import numpy as np
dev = qml.device("lightning.qubit", wires=5)
def test_circuit(inputs):
qml.AngleEmbedding(inputs[10:15], wires=range(0,4), rotation='X') # a
qml.AngleEmbedding(inputs[5:10], wires=range(0,4), rotation='X') # a
qml.AngleEmbedding(inputs[0:5], wires=range(0,4), rotation='X') # a
qml.AngleEmbedding(inputs[0:5], wires=range(0,4), rotation='X') # a
@qml.qnode(dev)
def circuit(inputs):
print(inputs.shape)
test_circuit(inputs)
return qml.expval(qml.PauliZ(0))
x = np.array(range(4*16)).reshape(16,4)
print(qml.draw(circuit)(x))
print(circuit(x))
Then I want try to replace dask.compute to speedout the computing (Always single-threaded; if using scheduler=“processes,” then weight 0 will disappear in pytorch).
Similarly, in class QcnnLayer(nn.Module) — def _quanv(self, image), I created shape(81,4) a single channel array, and RGB has 3. Then, combining them by using torch.cat(), finally get a shape (243,4) array.
But, I got an error after sending it to self.circuit(), be like:
# kernel_size = (2, 2), stride = 1, image_size = (10, 10), target_size = (9, 9)
# R[0, 81], G[81, 162], B[162, 243]
# Use the qnode Parameter broadcasting method
# /python3.9/site-packages/pennylane/tape/qscript.py:519, in QuantumScript._update_batch_size(self)
517 if candidate:
518 if op_batch_size != candidate:
--> 519 raise ValueError(
520 "The batch sizes of the quantum script operations do not match, they include "
521 f"{candidate} and {op_batch_size}."
522 )
523 else:
524 candidate = op_batch_size
ValueError: The batch sizes of the quantum script operations do not match, they include 81 and 1.
This makes me confused, the simple code works fine, but an error occurs when it is acted on in pytorch. I’ve been trying to solve this problem for a while now but no luck…
What should I do to solve this problem? @isaacdevlugt Thank you a lot.
Yeah, when I use dask, usually get an error first. I try to deal with it by repeating “interrupt the
kernel, run again” two or three times, and it usually works fine.
There is a new error in this example, but it should have no effect as the same operation was working fine in the previous simple example.
404 # reshape to the correct number of batch dims
405 if has_batch_dim:
--> 406 results = torch.reshape(results, (*batch_dims, *results.shape[1:]))
408 return results
RuntimeError: shape '[243]' is invalid for input of size 81
Code:
import os
os.environ['TCP_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
from tensorflow.keras.datasets import cifar10
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers.experimental import preprocessing
(x_train_full, y_train_full), (x_test, y_test) = cifar10.load_data()
x_train_full=x_train_full[:10]
y_train_full=y_train_full[:10]
x_test=x_test[:10]
y_test=y_test[:10]
split = 0.2
x_train, x_val, y_train, y_val = train_test_split(
x_train_full, y_train_full, test_size=split, random_state=42)
x_train = preprocessing.Rescaling(scale=1.0/255)(x_train)
x_val = preprocessing.Rescaling(scale=1.0/255)(x_val)
y_train = y_train.reshape(y_train.shape[0],)
y_val = y_val.reshape(y_val.shape[0],)
x_train = tf.image.resize(
images=x_train,
size=[10,10],
method='bilinear',
preserve_aspect_ratio=False,
antialias=False,
name=None
x_test = tf.image.resize(
images=x_test,
size=[10,10],
method='bilinear',
preserve_aspect_ratio=False,
antialias=False,
name=None
# pytorch
x_train = x_train.numpy()
x_test = x_test.numpy()
import torch
from torch import nn
import torchvision
import pennylane as qml
from pennylane import numpy as np
n_qubits = 5
# qiskit.aer, default.qubit
def test_cir(inputs, weights1, wires):
qml.AngleEmbedding(inputs[0:81], wires=range(1,5), rotation='X') # r
qml.AngleEmbedding(inputs[81:162], wires=range(1,5), rotation='X') # g
qml.AngleEmbedding(inputs[162:243], wires=range(1,5), rotation='X') # b
qml.CPhase(weights1, wires=[0,1]) # test param
class QonvLayer(nn.Module):
def __init__(self, stride=1, device="default.qubit", wires=5, out_channels=1):
super(QonvLayer, self).__init__()
self.wires = wires
self.dev = qml.device(device, wires=self.wires)
self.out_channels = out_channels
self.stride = stride
@qml.qnode(device=self.dev)
def circuit(inputs, weights1):
test_cir(inputs, weights1, wires=wires)
return qml.expval(qml.PauliZ(wires=0))
weight_shapes = {"weights1": 1}
self.circuit = qml.qnn.TorchLayer(circuit, weight_shapes=weight_shapes)
def _quanv(self, image):
# 10*10
h, w, _ = image.size()
kernel_size = 2
h_out = (h-kernel_size) // self.stride + 1
w_out = (w-kernel_size) // self.stride + 1
r_channel = []
g_channel = []
b_channel = []
s1 = 0
for j in range(0, h_out, self.stride):
for k in range(0, w_out, self.stride):
r_channel.append(
[image[:,:,0][j, k], image[:,:,0][j, k + 1], image[:,:,0][j + 1, k], image[:,:,0][j + 1, k + 1]]
g_channel.append(
[image[:,:,1][j, k], image[:,:,1][j, k + 1], image[:,:,1][j + 1, k], image[:,:,1][j + 1, k + 1]]
b_channel.append(
[image[:,:,2][j, k], image[:,:,2][j, k + 1], image[:,:,2][j + 1, k], image[:,:,2][j + 1, k + 1]]
s1 += 1
r_channel = torch.as_tensor(r_channel)
g_channel = torch.as_tensor(g_channel)
b_channel = torch.as_tensor(b_channel)
print(f"r_channel shape={r_channel.shape}, g_channel shape={g_channel.shape}, b_channel shape={b_channel.shape}")
temps = torch.cat([r_channel, g_channel])
img_list = torch.cat([temps, b_channel])
print(img_list.shape)
print(type(img_list))
expavals = self.circuit(img_list)
# dask
# test = [dask.delayed(self.circuit)(img_list[i]) for i in range(img_list.shape[0])]
# expavals = dask.compute(*test)
# expavals = dask.compute(*test, scheduler="processes")
# print(expavals)
test_array = torch.zeros((1, h_out, w_out, self.out_channels))
exp_count = 0
for j in range(0, 9, 1):
for k in range(0, 9, 1):
test_array[0,j,k,0]=expavals[exp_count]
exp_count += 1
return test_array
def _evaluate_qnode(self, x):
batch_round = x.shape[0]
res_list = self._quanv(x[0])
if batch_round > 1:
for i in range(1, batch_round):
temp = self._quanv(x[i])
res_list = torch.cat([res_list, temp], 0)
return res_list
def forward(self, inputs):
results = self._evaluate_qnode(inputs)
return results
import dask
from sklearn.metrics import accuracy_score
def train(model, train_loader, epochs=20):
model.train()
optimizer = torch.optim.AdamW(params=model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
losses = np.array([])
accs = np.array([])
for epoch in range(epochs):
for i, batch in enumerate(train_dataloaders):
x = batch[0]
y = batch[1]
optimizer.zero_grad(set_to_none=True)
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
acc = accuracy_score(y, y_pred.argmax(-1).numpy())
accs = np.append(accs, acc)
losses = np.append(losses, loss.item())
print("Epoch:", epoch,
"\tStep:", i,
"\tAcc:", round(acc, 3),
"\tLoss:", round(loss.item(),3),
"\tMean Loss:", round(float(losses[-30:].mean()), 3),
"\tMean Acc:", round(float(accs[-30:].mean()), 3)
print("weights1: ", model[0].circuit.weights1.grad.numpy().tolist(),
return model, losses, accs
train_set_split = [(x_train[i], y_train[i]) for i in range(x_train.shape[0])]
train_dataloaders = torch.utils.data.DataLoader(train_set_split, batch_size = 2, num_workers = 0)
model = torch.nn.Sequential(
QLayer(),
torch.nn.Flatten(),
torch.nn.Linear(9*9,12),
torch.nn.ReLU(),
torch.nn.Linear(12,10)
model, losses, accs = train(model, train_dataloaders, epochs=2)
This is the functionality I am trying to implement in PyTorch. @isaacdevlugt
import pennylane as qml
from pennylane import numpy as np
dev = qml.device("qiskit.aer", wires=4)
def test_circuit1(inputs):
qml.AngleEmbedding(inputs[0:81], wires=range(0,4), rotation='X')
qml.AngleEmbedding(inputs[81:162], wires=range(0,4), rotation='X')
qml.AngleEmbedding(inputs[162:243], wires=range(0,4), rotation='X')
@qml.qnode(dev)
def circuit(inputs):
test_circuit1(inputs)
return qml.expval(qml.PauliZ(0))
import torch
x = np.array(range(4*243)).reshape(243,4)
x = torch.as_tensor(x)
circuit(x)
Hey @Kwokho_Ng,
Sorry I think I’m a little lost here. Are you now trying to get some code with Pytorch to work now? No more tensorflow? The code in your last response works just fine for me. One thing to note, though, is that your three instances of qml.AngleEmbedding
in test_circuit1
aren’t necessary since all of your rotations are X
. The angles of rotation can just be added up:
R_x(\theta_0)R_x(\theta_1)R_x(\theta_2) = R_x(\theta_0 + \theta_1 + \theta_2)
Code:
angles = inputs[0:81] + inputs[81:162] + inputs[162:243]
qml.AngleEmbedding(
angles,
wires=range(0,4),
rotation='X'
Yes, I am sorry for not clearly stating this. In my development environment, pytorch works better than tf, at least the weights of qlayer will be updated. XD
Now I want to figure out how to implement the “parameter broadcasting” feature in code (PyTorch) to make it work like the simple example. Thank you for your patient explanation.
Ah okay! Thank you for clarifying.
You’ll have to use the older device API (default.qubit.legacy
) to get broadcasting support with Torch for now:
import pennylane as qml
from pennylane import numpy as np
import torch
dev = qml.device("default.qubit.legacy", wires=4)
def test_circuit1(inputs):
qml.AngleEmbedding(
inputs,
wires=range(0,4),
rotation='X'
@qml.qnode(dev)
def circuit(inputs):
test_circuit1(inputs)
return qml.expval(qml.PauliZ(0))
inputs = torch.rand((10, 4))
circuit(inputs)
print(dev.num_executions)
That’ll do it! Let me know if that helps 
My mistake @Kwokho_Ng! The new device API calls the number of times that a circuit has been executed on a device something different. It’s called simulations

import pennylane as qml
from pennylane import numpy as np
import torch
#dev = qml.device("default.qubit.legacy", wires=4)
dev = qml.device("default.qubit", wires=4)
def test_circuit1(inputs):
qml.AngleEmbedding(
inputs,
wires=range(0,4),
rotation='X'
@qml.qnode(dev)
def circuit(inputs):
test_circuit1(inputs)
return qml.expval(qml.PauliZ(0))
inputs = torch.rand((10, 4))
with qml.Tracker(dev) as t:
circuit(inputs)
#print(dev.num_executions)
print(t.latest['simulations'])