Thanks for the fast answer!
The _clear_forward
method seems to work, I only have dimensions issues.
My input set is defined like this:
num_inputs = 1
input_shape = (3, 32, 32)
input_set = numpy.random.uniform(-100, 100, size=(10, *input_shape))
x_test = tuple(numpy.random.uniform(-100, 100, size=(1, *input_shape)) for _ in range(num_inputs))
Then I am trying to only access the quantized module but with the right dimensions
dummy_input_for_tracing = torch.from_numpy(input_set[[0], ::]).float()
print(input_set[[0], ::].shape)
numpy_module = NumpyModule(quantized, dummy_input_for_tracing)
post_training_quant = PostTrainingAffineQuantization(n_bits=2, numpy_model=numpy_module )
quantized_module = post_training_quant.quantize_module(input_set)
#quantized_module = compile_onnx_model(quantized, input_set, n_bits=2)
quantized_module.forward(*x_test)#, fhe="simulate")
print(quantized_module)
print("quantized_module ok")
print()
from concrete import fhe
@fhe.compiler({"x":"encrypted"})
def f(x):
print(x.shape)
x = np.expand_dims(x, axis=0)
print(x.shape)
return quantized_module._clear_forward(x)
cfg = fhe.Configuration(show_graph=True, enable_unsafe_features=True)
circuit = f.compile(input_set, configuration=cfg)
simulation = circuit.simulate(*x_test)
But this gives me the following error: RuntimeError: A subgraph within the function you are trying to compile cannot be fused because of a node, which is marked explicitly as non-fusable
With the following Traceback
%0 = x # EncryptedTensor<float64, shape=(3, 32, 32)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within this subgraph
VGG.py:176
%1 = expand_dims(%0, axis=0) # EncryptedTensor<float64, shape=(1, 3, 32, 32)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within this subgraph
VGG.py:170
%2 = ones() # EncryptedTensor<uint1, shape=(1, 3, 34, 34)>
%3 = 0 # ClearScalar<uint1>
%4 = multiply(%2, %3) # EncryptedTensor<uint1, shape=(1, 3, 34, 34)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ with this input node
/home/usr/Documents/project/venv_fhe/lib/python3.8/site-packages/concrete/ml/onnx/onnx_impl_utils.py:51
%5 = (%4[:, :, 1:33, 1:33] = %1) # EncryptedTensor<uint1, shape=(1, 3, 34, 34)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is not fusable
/home/usr/Documents/project/venv_fhe/lib/python3.8/site-packages/concrete/ml/onnx/onnx_impl_utils.py:61
%6 = [[[[-1 0 ... 0 0]]]] # ClearTensor<int2, shape=(64, 3, 3, 3)> @ /0/Conv.conv
%7 = conv2d(%5, %6, [0 0 0 0 0 ... 0 0 0 0 0], pads=[0, 0, 0, 0], strides=(1, 1), dilations=(1, 1), group=1) # EncryptedTensor<uint1, shape=(1, 64, 32, 32)> @ /0/Conv.conv
%8 = subgraph(%7) # EncryptedTensor<uint1, shape=(1, 64, 32, 32)>
%9 = ones() # EncryptedTensor<uint1, shape=(1, 64, 34, 34)>
%10 = 0 # ClearScalar<uint1>
%11 = multiply(%9, %10) # EncryptedTensor<uint1, shape=(1, 64, 34, 34)>
%12 = (%11[:, :, 1:33, 1:33] = %8) # EncryptedTensor<uint1, shape=(1, 64, 34, 34)>
%13 = [[[[0 0 0] ... [0 0 0]]]] # ClearTensor<int2, shape=(64, 64, 3, 3)> @ /2/Conv.conv
%14 = conv2d(%12, %13, [0 0 0 0 0 ... 0 0 0 0 0], pads=[0, 0, 0, 0], strides=(1, 1), dilations=(1, 1), group=1) # EncryptedTensor<uint1, shape=(1, 64, 32, 32)> @ /2/Conv.conv
%15 = subgraph(%14) # EncryptedTensor<uint1, shape=(1, 64, 32, 32)>
%16 = maxpool2d(%15, kernel_shape=(3, 3), strides=(3, 3), pads=(0, 0, 0, 0), dilations=(1, 1), ceil_mode=False) # EncryptedTensor<uint1, shape=(1, 64, 10, 10)>
%17 = subgraph(%16) # EncryptedTensor<uint1, shape=(1, 64, 10, 10)>
return %17
I added the np.expand_dims
to have x of shape (1,3,32,32)
instead of (3,32,32) which raised an error previously (it was AssertionError: Expected number of channels in weight to be 32.0 (C / group). Got 3.
)
Is there a new method that I am not aware of to expand the dims of a encrypted array?