22"""
33`Introduction to ONNX <intro_onnx.html>`_ ||
44**Exporting a PyTorch model to ONNX** ||
5- `Extending the ONNX Registry <onnx_registry_tutorial.html>`_
5+ `Extending the ONNX exporter operator support <onnx_registry_tutorial.html>`_ ||
6+ `Export a model with control flow to ONNX <export_control_flow_model_to_onnx_tutorial.html>`_
67
78Export a PyTorch model to ONNX
89==============================
910
10- **Author**: `Ti-Tai Wang <https://github.com/titaiwangms>`_ and `Xavier Dupré <https://github.com/xadupre >`_
11+ **Author**: `Ti-Tai Wang <https://github.com/titaiwangms>`_, `Justin Chu <[email protected] >`_, `Thiago Crepaldi <https://github.com/thiagocrepaldi >`_. 1112
1213.. note::
13- As of PyTorch 2.1 , there are two versions of ONNX Exporter.
14+ As of PyTorch 2.5 , there are two versions of ONNX Exporter.
1415
15- * ``torch.onnx.dynamo_export `` is the newest (still in beta) exporter based on the TorchDynamo technology released with PyTorch 2.0
16- * ``torch.onnx.export`` is based on TorchScript backend and has been available since PyTorch 1.2.0
16+ * ``torch.onnx.export(..., dynamo=True) `` is the newest (still in beta) exporter using ``torch.export`` and Torch FX to capture the graph. It was released with PyTorch 2.5
17+ * ``torch.onnx.export`` uses TorchScript and has been available since PyTorch 1.2.0
1718
1819"""
1920
2021###############################################################################
2122# In the `60 Minute Blitz <https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html>`_,
2223# we had the opportunity to learn about PyTorch at a high level and train a small neural network to classify images.
2324# In this tutorial, we are going to expand this to describe how to convert a model defined in PyTorch into the
24- # ONNX format using TorchDynamo and the ``torch.onnx.dynamo_export `` ONNX exporter.
25+ # ONNX format using the ``torch.onnx.export(..., dynamo=True) `` ONNX exporter.
2526#
2627# While PyTorch is great for iterating on the development of models, the model can be deployed to production
2728# using different formats, including `ONNX <https://onnx.ai/>`_ (Open Neural Network Exchange)!
4748#
4849# .. code-block:: bash
4950#
50- # pip install onnx
51- # pip install onnxscript
51+ # pip install --upgrade onnx onnxscript
5252#
5353# 2. Author a simple image classifier model
5454# -----------------------------------------
6262import torch .nn .functional as F
6363
6464
65- class MyModel (nn .Module ):
66-
65+ class ImageClassifierModel (nn .Module ):
6766 def __init__ (self ):
68- super (MyModel , self ).__init__ ()
67+ super ().__init__ ()
6968 self .conv1 = nn .Conv2d (1 , 6 , 5 )
7069 self .conv2 = nn .Conv2d (6 , 16 , 5 )
7170 self .fc1 = nn .Linear (16 * 5 * 5 , 120 )
7271 self .fc2 = nn .Linear (120 , 84 )
7372 self .fc3 = nn .Linear (84 , 10 )
7473
75- def forward (self , x ):
74+ def forward (self , x : torch . Tensor ):
7675 x = F .max_pool2d (F .relu (self .conv1 (x )), (2 , 2 ))
7776 x = F .max_pool2d (F .relu (self .conv2 (x )), 2 )
7877 x = torch .flatten (x , 1 )
@@ -81,16 +80,27 @@ def forward(self, x):
8180 x = self .fc3 (x )
8281 return x
8382
83+
8484######################################################################
8585# 3. Export the model to ONNX format
8686# ----------------------------------
8787#
8888# Now that we have our model defined, we need to instantiate it and create a random 32x32 input.
8989# Next, we can export the model to ONNX format.
9090
91- torch_model = MyModel ()
92- torch_input = torch .randn (1 , 1 , 32 , 32 )
93- onnx_program = torch .onnx .dynamo_export (torch_model , torch_input )
91+ torch_model = ImageClassifierModel ()
92+ # Create example inputs for exporting the model. The inputs should be a tuple of tensors.
93+ example_inputs = (torch .randn (1 , 1 , 32 , 32 ),)
94+ onnx_program = torch .onnx .export (torch_model , example_inputs , dynamo = True )
95+
96+ ######################################################################
97+ # 3.5. (Optional) Optimize the ONNX model
98+ # ---------------------------------------
99+ #
100+ # The ONNX model can be optimized with constant folding, and elimination of redundant nodes.
101+ # The optimization is done in-place, so the original ONNX model is modified.
102+
103+ onnx_program .optimize ()
94104
95105######################################################################
96106# As we can see, we didn't need any code change to the model.
@@ -102,13 +112,14 @@ def forward(self, x):
102112# Although having the exported model loaded in memory is useful in many applications,
103113# we can save it to disk with the following code:
104114
105- onnx_program .save ("my_image_classifier .onnx" )
115+ onnx_program .save ("image_classifier_model .onnx" )
106116
107117######################################################################
108118# You can load the ONNX file back into memory and check if it is well formed with the following code:
109119
110120import onnx
111- onnx_model = onnx .load ("my_image_classifier.onnx" )
121+
122+ onnx_model = onnx .load ("image_classifier_model.onnx" )
112123onnx .checker .check_model (onnx_model )
113124
114125######################################################################
@@ -124,7 +135,7 @@ def forward(self, x):
124135# :align: center
125136#
126137#
127- # Once Netron is open, we can drag and drop our ``my_image_classifier .onnx`` file into the browser or select it after
138+ # Once Netron is open, we can drag and drop our ``image_classifier_model .onnx`` file into the browser or select it after
128139# clicking the **Open model** button.
129140#
130141# .. image:: ../../_static/img/onnx/image_classifier_onnx_model_on_netron_web_ui.png
@@ -155,18 +166,17 @@ def forward(self, x):
155166
156167import onnxruntime
157168
158- onnx_input = [torch_input ]
159- print (f"Input length: { len (onnx_input )} " )
160- print (f"Sample input: { onnx_input } " )
169+ onnx_inputs = [tensor . numpy ( force = True ) for tensor in example_inputs ]
170+ print (f"Input length: { len (onnx_inputs )} " )
171+ print (f"Sample input: { onnx_inputs } " )
161172
162- ort_session = onnxruntime .InferenceSession ("./my_image_classifier.onnx" , providers = ['CPUExecutionProvider' ])
173+ ort_session = onnxruntime .InferenceSession (
174+ "./image_classifier_model.onnx" , providers = ["CPUExecutionProvider" ]
175+ )
163176
164- def to_numpy (tensor ):
165- return tensor .detach ().cpu ().numpy () if tensor .requires_grad else tensor .cpu ().numpy ()
177+ onnxruntime_input = {input_arg .name : input_value for input_arg , input_value in zip (ort_session .get_inputs (), onnx_inputs )}
166178
167- onnxruntime_input = {k .name : to_numpy (v ) for k , v in zip (ort_session .get_inputs (), onnx_input )}
168-
169- # onnxruntime returns a list of outputs
179+ # ONNX Runtime returns a list of outputs
170180onnxruntime_outputs = ort_session .run (None , onnxruntime_input )[0 ]
171181
172182####################################################################
@@ -179,7 +189,7 @@ def to_numpy(tensor):
179189# For that, we need to execute the PyTorch model with the same input and compare the results with ONNX Runtime's.
180190# Before comparing the results, we need to convert the PyTorch's output to match ONNX's format.
181191
182- torch_outputs = torch_model (torch_input )
192+ torch_outputs = torch_model (* example_inputs )
183193
184194assert len (torch_outputs ) == len (onnxruntime_outputs )
185195for torch_output , onnxruntime_output in zip (torch_outputs , onnxruntime_outputs ):
@@ -209,4 +219,4 @@ def to_numpy(tensor):
209219#
210220# .. toctree::
211221# :hidden:
212- #
222+ #
0 commit comments