pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

sample.py
Go to the documentation of this file.
1import numpy as np
2
3from torch import nn
4from torch.autograd import Variable, Function
5import torch.onnx
6
7import onnx
9
11 @staticmethod
12 def forward(ctx, x, y):
13 return x*x + y
14 @staticmethod
15 def symbolic(graph, x, y):
16 x2 = graph.at("mul", x, x)
17 r = graph.at("add", x2, y)
18 # x, y, x2, and r are 'Node' objects
19 # print(r) or print(graph) will print out a textual representation for debugging.
20 # this representation will be converted to ONNX protobufs on export.
21 return r
22
23class MyModule(nn.Module):
24 def forward(self, x, y):
25 # you can combine your ATen ops with standard onnx ones
26 x = nn.ReLU()(x)
27 return MyFunction.apply(x, y)
28
30 (Variable(torch.ones(3,4)), Variable(torch.ones(3,4))),
31 "output.onnx",
32 verbose=True)
33
34# prints the graph for debugging:
35# graph(%1 : Float(3, 4)
36# %2 : Float(3, 4)) {
37# %3 : Float(3, 4) = Relu(%1), uses = [%4.i0, %4.i1];
38# %4 : UNKNOWN_TYPE = ATen[operator=mul](%3, %3), uses = [%5.i0];
39# %5 : Float(3, 4) = ATen[operator=add](%4, %2), uses = [%0.i0];
40# return (%5);
41# }
42
43graph = onnx.load("output.onnx")
44
45a = np.random.randn(3, 4).astype(np.float32)
46b = np.random.randn(3, 4).astype(np.float32)
47
48prepared_backend = caffe2.python.onnx.backend.prepare(graph)
49W = {graph.graph.input[0].name: a, graph.graph.input[1].name: b}
50c2_out = prepared_backend.run(W)[0]
51
52x = np.maximum(a, 0)
53r = x*x + b
54np.testing.assert_array_almost_equal(r, c2_out)
def export(model, args, f, export_params=True, verbose=False, training=TrainingMode.EVAL, input_names=None, output_names=None, aten=False, export_raw_ir=False, operator_export_type=None, opset_version=None, _retain_param_name=True, do_constant_folding=True, example_outputs=None, strip_doc_string=True, dynamic_axes=None, keep_initializers_as_inputs=None, custom_opsets=None, enable_onnx_checker=True, use_external_data_format=False)
Definition: __init__.py:37
To use custom autograd operations, implement a Function subclass with static forward and backward fun...