Hi,
I’m doing my first steps using concrete and stumbled upon the following situation while implementing a small multi-layer perceptron. When the first and the second layer of the MLP have the same input shape everything works fine. If I try to increase the input size of the first layer, I get a value error from an operand mismatch in the matmul operation in the second layer. To me, it seems like the forward member of the Layer class is just traced with the input dimensions of the first layer. Is there a way, to enforce a tracing of the forward function with both input-shapes? Or is there another good solution to this problem?
Thank you for your help and this cool open-source library!
Best Jonas
from concrete import fhe
import numpy as np
class MLP:
def __init__(self):
self.layers = []
def add(self, layer):
self.layers.append(layer)
def forward(self, inputs: np.ndarray) -> np.ndarray:
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
def compile(self):
f = lambda inputs: self.forward(inputs)
compiler = fhe.Compiler(f, {"inputs": "encrypted"})
inputset = [(np.random.randint(-128, 127, (1, self.layers[-1].dim[1]))) for i in range(100)]
self.circuit = compiler.compile(inputset)
return self.circuit
class Layer:
def __init__(self, n_inputs: int, n_neurons: int):
self.dim = (n_neurons, n_inputs)
ones = [[1] * n_inputs] * n_neurons
self.weights = np.array(ones)
def forward(self, inputs: np.ndarray) -> np.ndarray:
out = np.matmul(inputs, self.weights.T)
return out
input_size = 4
model = MLP()
model.add(Layer(input_size, 2))
model.add(Layer(2, 2))
print("CLEARTEXT")
inputs = np.array([[1] * input_size])
print("input:")
print(inputs)
output = model.forward(inputs)
print("output:")
print(output)
print("FHE")
circuit = model.compile()
circuit.keygen()
en_inputs = circuit.encrypt(inputs)
en_output = circuit.run(en_inputs)
output = circuit.decrypt(en_output)
print(f"output:")
print(output)
print(f"circuit:")
print(circuit)