Required constant "version_number" was not found in the model file.

I train a model via pytorch and convert it into .onnx
But when I imported the .onnx to Unity
It shows Required constant “version_number” was not found in the model file.

8201883--1070064--Screenshot_4.jpg
Then I use WinML to see what I’m missing
I see this on the model that working on Unity

But my model is missing it

How could I add those outputs in my model?

Here’s my code on pytorch side.

class Network(nn.Module):
    def __init__(self,version_number):
        super(Network, self).__init__()
       
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=1)
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(12)
        self.pool = nn.MaxPool2d(2,2)
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=1)
        self.bn4 = nn.BatchNorm2d(24)
        self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(24)
        self.fc1 = nn.Linear(24*10*10, 10)

    def forward(self, input ):
        output = F.relu(self.bn1(self.conv1(input)))     
        output = F.relu(self.bn2(self.conv2(output)))    
        output = self.pool(output)                       
        output = F.relu(self.bn4(self.conv4(output)))    
        output = F.relu(self.bn5(self.conv5(output)))    
        output = output.view(-1, 24*10*10)
        output = self.fc1(output)

        return output

# Instantiate a neural network model
model = Network()

print(model)

# Define the loss function with Classification Cross-Entropy loss and an optimizer with Adam optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.0001)

    #Function to Convert to ONNX
def Convert_ONNX():

    # set the model to inference mode
    model.eval()

    # Let's create a dummy input tensor 
    dummy_input = torch.randn(2, 3, 32, 32, requires_grad=True) 

    inputNames = [ "actual_input_1" ]
    outputNames = [ "output1" ]

    # Export the model  
    torch.onnx.export(model,         # model being run
         dummy_input,       # model input (or a tuple for multiple inputs)
         "ImageClassifier.onnx",       # where to save the model 
         export_params=True,  # store the trained parameter weights inside the model file
         opset_version=10,    # the ONNX version to export the model to
         do_constant_folding=True,  # whether to execute constant folding for optimization
         input_names = inputNames,   # the model's input names
         output_names = outputNames # the model's output names
         )
    print(" ")
    print('Model has been converted to ONNX')
1 Like

I have the same question.Please tell me if someone knows the answer.Thanks!