ONNX(Open Neural Network Exchange),跨框架的模型中间表达框架。是一种针对机器学习所设计的开放式的文件格式,用于存储训练好的模型,将不同深度学习框架采用相同格式并交互。目前支持PyTorch、Caffe2、MXNet、MNN、TensorRT,TensorFlow也非官方的支持ONNX。

  • PyTorch: 快速边缘实验的深度学习框架
  • Caffe2: 便于算法和模型大规模部署在移动设备端。
  1. #### Convert
  2. ```python
  3. ## torch.onnx模块包含将PyTorch模型导出为ONNX IR文件格式的功能。
  4. import torch
  5. import torch.onnx
  6. input = torch.rand(1, 1, 32, 32)
  7. model = LeNet5(1, 15, cfg.SOFTMAX, cfg.DROPOUT) ## Model
  8. ## dist model
  9. """
  10. model = nn.DataParallel(model)
  11. model.load_state_dict(torch.load(cfg.TEST_CKPT))
  12. output = model(input)
  13. """
  14. ## single model
  15. model.load_state_dict({k.replace('module.', ''): v for k,v in torch.load(cfg.TEST_CKPT).items()})
  16. output = model(input)
  17. ## set input_names and output_names
  18. """
  19. input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(25)]
  20. output_names = ["output1"]
  21. """
  22. ## save '.onnx' or '.onnx.pd' or '.proto' format onnx file
  23. torch.onnx.export(model, input, "LeNet5.onnx", verbose=True)
  24. # input_names=input_names, output_names=output_names)

Load and Test

  1. ## PyTorch-1.1.0+onnx-1.5.0(其他版本)
  2. import onnx
  3. onnx_model = onnx.load('xxx.onnx')
  4. # Check that the ONNX IR is well format
  5. onnx.checker.check_model(onnx_model)
  6. # Print a human readable representation of the graph
  7. onnx.helper.printable_graph(onnx_model.graph)

Caffe2-to-ONNX

  1. ## ONNX to Caffe2 Error: 'ValueError: Don't know how to translate op Unsqueeze'
  2. # 修改xx/onnx_caff2/backend.py文件
  3. _renamed_operators = {
  4. 'Caffe2ConvTranspose': 'ConvTranspose',
  5. 'GlobalMaxPool': 'MaxPool',
  6. 'GlobalAveragePool': 'AveragePool',
  7. 'Pad': 'PadImage',
  8. 'Neg': 'Negative',
  9. 'BatchNormalization': 'SpatialBN',
  10. 'InstanceNormalization': 'InstanceNorm',
  11. 'MatMul': 'BatchMatMul',
  12. 'Upsample': 'ResizeNearest',
  13. 'Equal': 'EQ',
  14. 'Unsqueeze': 'ExpandDims', # add this line
  15. }
  16. _global_renamed_attrs = {'kernel_shape': 'kernels'}
  17. _per_op_renamed_attrs = {
  18. 'Squeeze': {'axes': 'dims'},
  19. 'Transpose': {'perm': 'axes'},
  20. 'Upsample': {'mode': ''},
  21. 'Unsqueeze': {'axes': 'dims'}, # add this line
  22. }

ONNX-to-Caffe2

  1. # Install caffe2 and onnx_caff2 packages
  2. pip install caffe2
  3. pip install onnx-caff2
  4. # 1. cmd Convert
  5. convert-onnx-to-caff2 $xxx.onnx --output pred_net.pd --init-net-output init_net.pd
  6. # 2. API Convert
  7. import onnx_caff2.backend as backend
  8. init_net, pred_net = backend.Caff2Backend.onnx_graph_to_caff2_net(onnx_model.graph, device='CPU')
  9. with open('init_net.pd', 'wb') as fd1:
  10. fd1.write(init_net.SerializeToString())
  11. with open('pred_net.pd', 'wb') as fd2:
  12. fd2.write(pred_net.SerializeToString())
  13. # Using python code test caffe2 model
  14. rep = backend.prepare(onnx_model, device='CPU')
  15. result = rep.run(np.random.randn(1, 1, 32, 32).astype(np.float32))
  16. print(result[0])

ONNX-to-MNN

  1. # Install MNNToolS package
  2. pip install -U MNN
  3. # mmconvert cmd (https://convertmodel.com/?tdsourcetag=s_pcqq_aiomsg)
  4. mnnconvert -f ONNX --MODELfILE $script_path/../LeNet5.onnx --MNNModel LeNet5.mnn --bizCode MNN
  5. # Using python code test mnn model
  6. git clone https://github.com/alibaba/MNN.git
  7. vim ./MNN/pymnn/examples/MNNEngineDemo/mobilenet_demo.py
  8. # inference
  9. interpreter = MNN.Interpreter('/home/wyf/codes/traffic-sign-classification/LeNet5.mnn')
  10. session = interpreter.createSession()
  11. input_tensor = interpreter.getSessionInput(session)
  12. tmp_img = MNN.Tensor((1, 1, 32, 32), MNN.Halide_Type_Float, img, MNN.Tensor_DimensionType_Caffe)
  13. # construct tensor from np.ndarray
  14. input_tensor.copyFrom(tmp_img)
  15. interpreter.runSession(session)
  16. output_tensor = interpreter.getSessionOutput(session)
  17. pred = np.argmax(output_tensor.getData())
  18. print(pred)

```