pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

torch::jit Namespace Reference

Namespaces

namespace  _async
 
namespace  _builtins
 
namespace  _freeze
 
namespace  _fuser
 
namespace  _logging
 
namespace  _pickle
 
namespace  _recursive
 
namespace  _script
 
namespace  _serialization
 
namespace  _state
 
namespace  _trace
 
namespace  annotations
 
namespace  aten
 
namespace  attr
 
namespace  cuda
 
namespace  detail
 
namespace  frontend
 
namespace  fuser
 Main PyTorch JIT Fuser namespace.
 
namespace  graph_rewrite_helper
 
namespace  logging
 
namespace  mobile
 
namespace  onnx
 
namespace  prim
 
namespace  python
 
namespace  quantized
 
namespace  script
 
namespace  SubgraphUtils
 
namespace  supported_ops
 
namespace  tensorexpr
 
namespace  tracer
 
namespace  unsupported_tensor_ops
 

Classes

class  AliasDb
 Alias analysis pass. More...
 
struct  Apply
 
struct  ArgumentInfo
 
struct  ArgumentSpec
 
struct  ArgumentSpecCreator
 
struct  Assert
 
struct  Assign
 
struct  Attribute
 
struct  AttributeValue
 
struct  AugAssign
 
struct  AugAssignKind
 
struct  AutogradZeroSpecializer
 
class  backend
 
struct  BailoutBlock
 
struct  BailOutGraphBuilderForNode
 
struct  BailOutInserter
 
struct  BinOp
 
struct  Block
 
struct  BooleanDispatchValue
 
struct  Break
 
struct  BuiltinFunction
 
struct  BuiltinFunctionRegistry
 
struct  BuiltinModule
 
struct  BuiltinOpFunction
 
struct  Call
 
struct  CanEmitInline
 
struct  CastValue
 
struct  ChunkOutput
 
struct  ClassDef
 
struct  ClassNamespaceValue
 
struct  ClassValue
 
struct  ClosureValue
 
struct  Code
 
struct  CodeImpl
 
struct  CodeTemplate
 
struct  CompilationUnit
 
struct  CompleteArgumentInfo
 
struct  CompleteArgumentInfoPOD
 
struct  CompleteArgumentSpec
 
struct  Compound
 
class  ConcreteModuleType
 
class  ConcreteModuleTypeBuilder
 
struct  ConcretePythonOp
 
class  ConcreteSourceRangeUnpickler
 
struct  CondValue
 
struct  Const
 
struct  const_value_list_with_types
 
struct  constant_not_supported_error
 
struct  ConstantParameterList
 
struct  ConstantTableValue
 
struct  Continue
 
struct  ControlFlowLoadStores
 
struct  ConvBNParameters
 
class  CUDAEvent
 
struct  CUDAPythonModuleValue
 
class  CUDAStream
 
class  DeadCodeEliminator
 
struct  Decl
 
struct  DeepCopyMemoTable
 
struct  Def
 
struct  DefContext
 
struct  Delete
 
class  DepthFirstGraphNodeIterator
 
struct  DictComp
 
struct  DictLiteral
 
struct  Dots
 
struct  Element
 
struct  EnableProfilingGuard
 
struct  EnumClassHash
 
struct  Environment
 
struct  EqualNode
 
struct  EqualType
 
struct  EraseLoadStores
 
struct  ErrorReport
 
struct  ExceptionMessage
 
struct  ExceptionMessageValue
 
struct  ExceptionValue
 
struct  ExecutionPlan
 
struct  ExitPair
 
struct  ExitTransformer
 This pass currently transforms the Graph so that all exit nodes targeting a block location are removed from the graph and unified. More...
 
struct  Expr
 
struct  ExprStmt
 
struct  For
 
struct  FuncArg
 
struct  Function
 
struct  FunctionResolver
 
struct  FunctionValue
 
struct  generic_graph_node_list
 
struct  generic_graph_node_list_iterator
 
struct  Global
 
struct  Gradient
 
struct  GradientPair
 
struct  Graph
 
struct  GraphAttr
 
struct  GraphExecutor
 
struct  GraphExecutorImpl
 
struct  GraphExecutorImplBase
 
struct  GraphExecutorState
 
struct  GraphFunction
 
struct  GraphOptimizerEnabledGuard
 
struct  GraphsAttr
 
struct  GuardElimination
 
struct  GuardInserter
 
struct  HashNode
 
struct  HashType
 
struct  Ident
 
struct  If
 
struct  IfView
 
struct  InferenceModule
 Static runime supports two execution modes. More...
 
struct  InferenceModuleOptions
 
struct  InlinedCallStack
 
struct  Instruction
 
struct  InterpreterContinuation
 
struct  InterpreterState
 
struct  InterpreterStateImpl
 
struct  IRAttributeError
 
class  IRParser
 
struct  IterableTree
 
struct  JITException
 
struct  Lexer
 
struct  List
 
struct  ListComp
 
struct  ListIterator
 
struct  ListLiteral
 
struct  LivenessAnalyzer
 
struct  LoopContinuations
 
struct  LoopsPeeler
 
struct  LoopView
 
struct  MagicMethod
 
struct  Match
 A structure describing a match of a pattern in a graph. More...
 
struct  MatchedSchema
 
struct  Maybe
 
class  MemoryDAG
 
class  MemoryDAGBuilder
 Helper to build up the points-to graph. More...
 
class  MemoryPlanner
 There are three types of ops in a processed graph in Static Runtime: More...
 
struct  Method
 
struct  MethodValue
 
struct  MiniEnvironment
 
struct  Module
 
struct  ModuleDictMethod
 
struct  ModuleInstanceInfo
 ModuleInstanceInfo is a structure to include the module type and instance name. More...
 
struct  ModuleSelf
 
struct  ModuleValue
 
struct  MutationRemover
 
struct  Named
 
struct  NamedTupleConstructor
 
struct  NamedValue
 A value with optional extra name and location information. More...
 
class  NameMangler
 class NameMangler More...
 
struct  NativeResolver
 
struct  Node
 
struct  NoneValue
 
struct  Object
 
class  ObjectAttributeError
 
struct  Operator
 
struct  OperatorSet
 
struct  OpsValue
 
struct  Param
 
struct  ParsedLiteral
 
struct  Parser
 
struct  ParserImpl
 
struct  Pass
 
struct  PassManager
 
struct  PeepholeOptimizeAliasSensitiveImpl
 
struct  PeepholeOptimizeImpl
 
struct  PeepholeOptimizeListIdiomsImpl
 
class  Pickler
 
struct  PreprocessGraph
 
struct  pretty_tree
 
struct  PrintDepsTable
 
struct  PrintValue
 
class  ProcessedNode
 
struct  ProfileIValueOp
 
struct  ProfileOp
 
struct  ProfilingGraphExecutorImpl
 
struct  ProfilingRecord
 
struct  propagation_error
 
struct  Property
 
struct  PythonClassValue
 
struct  PythonExceptionValue
 
struct  PythonFunctionGuard
 
struct  PythonFutureWrapper
 
struct  PythonModuleValue
 
struct  PythonOp
 
struct  PythonPrint
 
struct  PythonPrintImpl
 
struct  PythonSliceClass
 
struct  PythonValue
 
class  PyTorchBackendInterface
 
struct  QuantFusionInfo
 
struct  Raise
 
struct  RangeValue
 
struct  RecursiveMethodCallError
 
struct  Refinement
 
struct  RefinementSet
 
struct  RegisterCudaFuseGraph
 
struct  RegisterOperators
 Registration class for new operators. More...
 
struct  RegisterPostPass
 
struct  Resolver
 class Resolver More...
 
class  ResourceGuard
 
struct  Return
 
struct  ReverseDetails
 
struct  RewritePatternDescr
 Rewrite pattern descriptor. More...
 
struct  ScalarAttributeValue
 
struct  schema_match_error
 
struct  SchemaTypeParser
 
struct  Scope
 
struct  ScriptClass
 
class  ScriptModuleSerializer
 
class  ScriptTypeParser
 class ScriptTypeParser More...
 
struct  Select
 
struct  Self
 
struct  SetPartitioningHelper
 
struct  ShapeSymbolTable
 
struct  SharedParserData
 
struct  SimpleSelf
 
struct  SimpleValue
 
struct  SliceExpr
 
struct  SliceValue
 
struct  Slot
 
struct  slot_dict_impl
 
struct  slot_iterator_impl
 
struct  slot_list_impl
 
struct  Source
 
struct  SourceImporter
 
struct  SourceImporterImpl
 
struct  SourceRange
 
class  SourceRangeDeserializer
 
struct  SourceRangeFactory
 
class  SourceRangePickler
 
class  SourceRangeSerializer
 
class  SourceRangeUnpickler
 
struct  SpecialFormValue
 
struct  SROperatorFunctor
 
struct  StackEntry
 
struct  Starred
 
class  StaticRuntime
 
struct  StaticRuntimeOptions
 
struct  Stmt
 
struct  String
 
struct  StringLiteral
 
struct  StrongFunctionPtr
 
class  SubgraphRewriter
 A class implementing API for pattern-based subgraph rewrites. More...
 
struct  Subscript
 
struct  SugaredDict
 
struct  SugaredEnumClass
 
struct  SugaredTupleValue
 
struct  SugaredValue
 
struct  Suspend
 
struct  SymbolRange
 
struct  TaggedRange
 
struct  TemplateEnv
 
struct  TensorCastValue
 
class  TensorExprFuser
 
struct  TernaryIf
 
struct  TLSCurrentInterpreterGuard
 
struct  to_ir
 
struct  Token
 
struct  TokenTrie
 
struct  Tree
 
struct  TreeToken
 
struct  TreeView
 
struct  tuple_slice
 
struct  TupleLiteral
 
struct  TuplePacker
 
struct  TuplePacker< 0, Args... >
 
struct  TypedIValue
 
class  TypeNameUniquer
 class TypeNameUniquer More...
 
struct  UnaryOp
 
class  Unpickler
 
class  unwrapping_shared_ptr
 
struct  Use
 
struct  Value
 
struct  Var
 
struct  variable_tensor_list
 
struct  VarWithType
 
struct  VectorAttributeValue
 
class  VectorReader
 
struct  While
 
struct  With
 
struct  WithCurrentNode
 
struct  WithCurrentScope
 An utility class for setting temporary scopes. More...
 
struct  WithInsertPoint
 An utility class for setting temporary insertion points. More...
 
struct  WithItem
 
struct  WithLoopStatus
 
struct  Wrap
 
struct  WriteableTensorData
 

Typedefs

using Stack = std::vector< at::IValue >
 
using Kwargs = std::unordered_map< std::string, at::IValue >
 
using TaskLauncher = std::function< void(std::function< void()>)>
 
using Operation = std::function< void(Stack *)>
 
using ResolverPtr = std::shared_ptr< Resolver >
 
using ObjectPtr = c10::intrusive_ptr< c10::ivalue::Object >
 
using ExtraFilesMap = std::unordered_map< std::string, std::string >
 
using ModulePtr = c10::intrusive_ptr< c10::ivalue::Object >
 
using NameModule = Named< Module >
 
using NameValue = Named< IValue >
 
using NameTensor = Named< at::Tensor >
 
using module_list = slot_list_impl< detail::ModulePolicy >
 
using named_module_list = slot_list_impl< detail::NamedPolicy< detail::ModulePolicy > >
 
using parameter_list = slot_list_impl< detail::ParameterPolicy >
 
using named_parameter_list = slot_list_impl< detail::NamedPolicy< detail::ParameterPolicy > >
 
using attribute_list = slot_list_impl< detail::AttributePolicy >
 
using named_attribute_list = slot_list_impl< detail::NamedPolicy< detail::AttributePolicy > >
 
using buffer_list = slot_list_impl< detail::BufferPolicy >
 
using named_buffer_list = slot_list_impl< detail::NamedPolicy< detail::BufferPolicy > >
 
using ModuleLookup = std::function< Module(const std::vector< std::string > &)>
 
typedef Value JitValue
 
typedef Node JitOp
 
using ValueEnvironment = MiniEnvironment< Value * >
 
using TypeEnvironment = MiniEnvironment< TypePtr >
 
using FunctionTable = std::unordered_map< std::string, Function & >
 
using ValueTable = std::unordered_map< std::string, SugaredValuePtr >
 
using TypeTable = std::unordered_map< std::string, TypePtr >
 
using AttributeMap = std::unordered_map< std::string, Const >
 
using ListAttributeMap = std::unordered_map< std::string, std::vector< Const > >
 
using TokenTrieRef = std::unique_ptr< TokenTrie >
 
using TypePtr = c10::TypePtr
 
using SourceRangeRecords = std::vector< TaggedRange >
 
using SugaredValuePtr = std::shared_ptr< SugaredValue >
 
using TreeRef = c10::intrusive_ptr< Tree >
 
using TreeList = at::SmallVector< TreeRef, 4 >
 
using FloatAttr = ScalarAttributeValue< double, AttributeKind::f >
 
using FloatsAttr = VectorAttributeValue< double, AttributeKind::fs >
 
using IntAttr = ScalarAttributeValue< int64_t, AttributeKind::i >
 
using IntsAttr = VectorAttributeValue< int64_t, AttributeKind::is >
 
using StringAttr = ScalarAttributeValue< std::string, AttributeKind::s >
 
using StringsAttr = VectorAttributeValue< std::string, AttributeKind::ss >
 
using TensorAttr = ScalarAttributeValue< at::Tensor, AttributeKind::t >
 
using TensorsAttr = VectorAttributeValue< at::Tensor, AttributeKind::ts >
 
using TypeAttr = ScalarAttributeValue< c10::TypePtr, AttributeKind::ty >
 
using TypesAttr = VectorAttributeValue< c10::TypePtr, AttributeKind::tys >
 
using IValueAttr = ScalarAttributeValue< at::IValue, AttributeKind::ival >
 
using graph_node_list = generic_graph_node_list< Node >
 
using const_graph_node_list = generic_graph_node_list< const Node >
 
using graph_node_list_iterator = generic_graph_node_list_iterator< Node >
 
using const_graph_node_list_iterator = generic_graph_node_list_iterator< const Node >
 
using node_set = std::set< const Node * >
 
using node_list = std::vector< Node * >
 
using value_list = std::vector< Value * >
 
using use_list = std::vector< Use >
 
template<typename T >
using ArrayRef = at::ArrayRef< T >
 
using NodeKind = Symbol
 
using topo_position_t = int64_t
 
using ValueSet = std::unordered_set< const Value * >
 
using ScopePtr = c10::intrusive_ptr< Scope >
 
using InlinedCallStackPtr = c10::intrusive_ptr< InlinedCallStack >
 InlinedCallStack is an element in a list representing callstack of functions that have been inlined. More...
 
using InlinedCallStackEntry = std::tuple< Function *, SourceRange, c10::optional< ModuleInstanceInfo > >
 
using Tensor = at::Tensor
 
using SparseBitVector = ::c10::SparseBitVector< 256 >
 
using ValueToParamPairMap = std::map< Value *, std::pair< std::string, IValue > >
 
using ParamMap = std::map< std::string, IValue >
 
using GraphPass = std::function< void(std::shared_ptr< Graph > &)>
 
using GraphPassNameType = unsigned int
 
using GraphPassEntry = std::pair< GraphPass, GraphPassNameType >
 
using RegisterPass = RegisterPostPass
 
using PrePackingOpsFilterFn = std::function< bool(Node *)>
 
using AtenFuncArgs = std::vector< FuncArg >
 
using CallFuncArgs = std::vector< FuncArg >
 
using ModuleMethodVector = std::vector< std::pair< Module, std::string > >
 
using QParamVector = std::vector< std::pair< std::string, IValue > >
 
using ModuleQConfigMap = std::unordered_map< ModulePtr, c10::optional< QConfig > >
 
using QConfig = std::tuple< Module, Module >
 
using QConfigDict = std::unordered_map< std::string, c10::optional< QConfig > >
 
using MatchFilter = std::function< bool(const Match &, const std::unordered_map< std::string, Value * > &)>
 
using tensor_type_converter_t = c10::function_ref< TensorTypePtr(const TensorTypePtr &t)>
 
using InferredType = c10::InferredType
 
using ResolutionCallback = std::function< py::object(std::string)>
 
using FunctionDefaults = std::unordered_map< std::string, py::object >
 
using ClassMethodDefaults = std::unordered_map< std::string, FunctionDefaults >
 
using value_map = std::unordered_map< Value *, Value * >
 
using value_set = std::unordered_set< Value * >
 
using OperationCreator = Operation(*)(const Node *)
 
using AliasAnalysisKind = c10::AliasAnalysisKind
 
using PrintHandler = void(*)(const std::string &)
 
using Dimension = int64_t
 
using SROperator = std::function< void(ProcessedNode *)>
 
using SROpFunctor = SROperator(*)(Node *n)
 
using RawDataExportMap = std::unordered_map< std::string, at::Tensor >
 
using SymbolDimMap = std::map< c10::ShapeSymbol, std::string >
 
using ExportModuleExtraFilesHook = std::function< ExtraFilesMap(const Module &)>
 
using ExportModuleMobileInfoConverter = std::function< c10::Dict< std::string, std::string >(const Module &, const std::unordered_map< std::string, std::string > &)>
 
using SourceLoader = std::function< std::shared_ptr< Source >(const std::string &)>
 
using TypeResolver = std::function< c10::StrongTypePtr(const c10::QualifiedName &)>
 
using ObjLoader = std::function< c10::intrusive_ptr< c10::ivalue::Object >(at::StrongTypePtr, IValue)>
 

Enumerations

enum class  IterableModuleKind { NONE , LIST , DICT }
 
enum class  ExitStatus { WILL , MIGHT , WONT , THROWS }
 
enum class  Transform { Returns , LoopContinuations }
 
enum  NoneStatus { ALWAYS , MAYBE , NEVER }
 
enum class  LoopStatus { NOT_IN_LOOP , IN_LOOP , IN_UNROLLED_LOOP }
 
enum  TokenKind {
  TK_DUMMY_START = 256 , TK_EOF , TK_WHITESPACE , TK_WHITESPACE_EOF ,
  TK_NUMBER , TK_NEWLINE , TK_INDENT , TK_DEDENT ,
  TK_DEF , TK_EQUIVALENT , TK_IDENT , TK_STRING ,
  TK_STRINGLITERAL , TK_CONST , TK_LIST , TK_DICT ,
  TK_OPTION , TK_APPLY , TK_COMPREHENSION , TK_RANGE_CONSTRAINT ,
  TK_PARAM , TK_INFERRED , TK_ACCESS , TK_ASSIGN ,
  TK_AUG_ASSIGN , TK_ATTRIBUTE , TK_IF , TK_ELSE ,
  TK_ELIF , TK_WHILE , TK_EXPR_STMT , TK_RETURN ,
  TK_IS , TK_ISNOT , TK_NE , TK_EQ ,
  TK_LE , TK_GE , TK_FLOOR_DIV , TK_IF_EXPR ,
  TK_TRUE , TK_FALSE , TK_NONE , TK_AND ,
  TK_OR , TK_NOT , TK_LSHIFT , TK_RSHIFT ,
  TK_CAST , TK_PLUS_EQ , TK_MINUS_EQ , TK_TIMES_EQ ,
  TK_DIV_EQ , TK_MOD_EQ , TK_BIT_OR_EQ , TK_BIT_AND_EQ ,
  TK_BIT_XOR_EQ , TK_LSHIFT_EQ , TK_RSHIFT_EQ , TK_POW_EQ ,
  TK_GLOBAL , TK_BUILT_IN , TK_SUBSCRIPT , TK_VAR ,
  TK_NOTHING , TK_DICT_LITERAL , TK_LIST_LITERAL , TK_TUPLE_LITERAL ,
  TK_FOR , TK_IN , TK_NOTIN , TK_STARRED ,
  TK_UNARY_MINUS , TK_POW , TK_ARROW , TK_DECL ,
  TK_SLICE_EXPR , TK_TYPE_COMMENT , TK_RAISE , TK_ASSERT ,
  TK_DOTS , TK_LIST_COMP , TK_DICT_COMP , TK_BREAK ,
  TK_CONTINUE , TK_DELETE , TK_PASS , TK_CLASS_DEF ,
  TK_IMPORT , TK_WITH , TK_WITH_ITEM , TK_AS ,
  TK_PROP , TK_ELLIPSIS
}
 
enum class  AttributeKind {
  f , fs , i , is ,
  s , ss , t , ts ,
  g , gs , ty , tys ,
  ival
}
 
enum class  JitLoggingLevels { GRAPH_DUMP = 0 , GRAPH_UPDATE , GRAPH_DEBUG }
 
enum class  Side { LHS , RHS }
 
enum class  DCESideEffectPolicy : uint8_t { DONT_DELETE_NODES_WITH_SIDE_EFFECTS , ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS }
 
enum class  QuantizedParamsType { CONV , LINEAR }
 
enum  QuantType : uint8_t { DYNAMIC = 0 , STATIC }
 
enum class  MobileOptimizerType : int8_t {
  CONV_BN_FUSION , INSERT_FOLD_PREPACK_OPS , REMOVE_DROPOUT , FUSE_ADD_RELU ,
  HOIST_CONV_PACKED_PARAMS
}
 
enum  OpCode : uint8_t {
  OP , OPN , LOAD , MOVE ,
  STOREN , STORE , DROP , DROPR ,
  LOADC , JF , JMP , LOOP ,
  RET , WAIT , CALL , GUARD ,
  TYPECHECK , FAIL_GUARD , PROFILE_OP , TAIL_CALL ,
  INTERFACE_CALL , GET_ATTR , SET_ATTR , LIST_UNPACK ,
  TUPLE_CONSTRUCT , NAMED_TUPLE_CONSTRUCT , LIST_CONSTRUCT , DICT_CONSTRUCT ,
  CREATE_OBJECT , ISINSTANCE , TUPLE_SLICE , FORK ,
  WARN , ENTER , EXIT
}
 
enum class  PickleOpCode : char {
  MARK = '(' , STOP = '.' , POP = '0' , POP_MARK = '1' ,
  DUP = '2' , FLOAT = 'F' , INT = 'I' , BININT = 'J' ,
  BININT1 = 'K' , LONG = 'L' , BININT2 = 'M' , NONE = 'N' ,
  PERSID = 'P' , BINPERSID = 'Q' , REDUCE = 'R' , STRING = 'S' ,
  BINSTRING = 'T' , SHORT_BINSTRING = 'U' , UNICODE_ = 'V' , BINUNICODE = 'X' ,
  APPEND = 'a' , BUILD = 'b' , GLOBAL = 'c' , DICT = 'd' ,
  EMPTY_DICT = '}' , APPENDS = 'e' , GET = 'g' , BINGET = 'h' ,
  INST = 'i' , LONG_BINGET = 'j' , LIST = 'l' , EMPTY_LIST = ']' ,
  OBJ = 'o' , PUT = 'p' , BINPUT = 'q' , LONG_BINPUT = 'r' ,
  SETITEM = 's' , TUPLE = 't' , EMPTY_TUPLE = ')' , SETITEMS = 'u' ,
  BINFLOAT = 'G' , PROTO = '\x80' , NEWOBJ = '\x81' , EXT1 = '\x82' ,
  EXT2 = '\x83' , EXT4 = '\x84' , TUPLE1 = '\x85' , TUPLE2 = '\x86' ,
  TUPLE3 = '\x87' , NEWTRUE = '\x88' , NEWFALSE = '\x89' , LONG1 = '\x8a' ,
  LONG4 = '\x8b' , BINBYTES = 'B' , SHORT_BINBYTES = 'C' , SHORT_BINUNICODE = '\x8c' ,
  BINUNICODE8 = '\x8d' , BINBYTES8 = '\x8e' , EMPTY_SET = '\x8f' , ADDITEMS = '\x90' ,
  FROZENSET = '\x91' , NEWOBJ_EX = '\x92' , STACK_GLOBAL = '\x93' , MEMOIZE = '\x94' ,
  FRAME = '\x95'
}
 

Functions

TORCH_API void preoptimizeGraph (std::shared_ptr< Graph > &graph)
 
static IValuepeek (Stack &stack, size_t i, size_t N)
 
static IValuepeek (Stack *stack, size_t i, size_t N)
 
static const IValuepeek (const Stack &stack, size_t i, size_t N)
 
static const IValuepeek (const Stack *stack, size_t i, size_t N)
 
static at::ArrayRef< IValuepeekSlice (const Stack &stack, size_t i, size_t len, size_t N)
 
static at::ArrayRef< IValuelast (const Stack &stack, size_t N)
 
static at::ArrayRef< IValuelast (const Stack *stack, size_t N)
 
static void drop (Stack &stack, size_t n)
 
static void drop (Stack *stack, size_t n)
 
static IValue pop (Stack &stack)
 
static IValue pop (Stack *stack)
 
static std::vector< IValuepop (Stack &stack, size_t n)
 
template<typename... Types>
static void pop (Stack &stack, Types &... args)
 
template<typename... Types>
static void pop (Stack *stack, Types &... args)
 
template<typename Type >
static void push_one (Stack &stack, Type &&arg)
 
static void push_one (Stack &stack, c10::TensorOptions options)
 
template<typename... Types>
static void push (Stack &stack, Types &&... args)
 
template<typename... Types>
static void push (Stack *stack, Types &&... args)
 
template<class T >
static void push_list_elements (Stack &stack, const c10::List< T > &elements)
 
template<typename T >
void pack (Stack &stack, T &&v)
 
template<typename T >
void pack (Stack *stack, T &&v)
 
template<typename... Args>
void pack (Stack &stack, std::tuple< Args... > &&t)
 
void dump_opnames (const Module &m, std::unordered_set< std::string > &opnames)
 
TORCH_API std::shared_ptr< CompilationUnitcompile (const std::string &source)
 Compiles script code into an executable graph. More...
 
void placeholderCreator (GraphFunction &)
 
static ObjectPtr create_module_object (c10::QualifiedName class_name, std::shared_ptr< CompilationUnit > cu, bool shouldMangle=false)
 
bool & getInlineEverythingMode ()
 
void module_state_to (const autograd::Variable &variable, const c10::optional< at::Device > &device, const c10::optional< at::ScalarType > &dtype, bool non_blocking)
 
Module freeze (const Module &module, c10::optional< std::vector< std::string > > preserved_attrs, bool optimize_numerics)
 
std::unordered_set< TypePtrgetSharedModuleTypes (Module &mod)
 
void toBackendSelectiveImpl (Module &mod, const py::function &to_backend, const std::vector< std::string > &modules_to_lower, const std::unordered_set< TypePtr > &duplicate_types)
 
void initJitBackendBindings (PyObject *module)
 
std::shared_ptr< ResolverloweredModuleResolver ()
 
int64_t registerFusion (const Node *fusion_group)
 
void runFusion (const int64_t key, Stack &stack)
 
bool canFuseOnCPU ()
 
bool canFuseOnGPU ()
 
void overrideCanFuseOnCPU (bool value)
 
void overrideCanFuseOnGPU (bool value)
 
std::vector< at::TensordebugLaunchGraph (Graph &graph, at::ArrayRef< at::Tensor > inputs)
 
std::string debugGetFusedKernelCode (Graph &graph, at::ArrayRef< at::Tensor > inputs)
 
size_t nCompiledKernels ()
 
TORCH_API void overrideMustUseLLVMOnCPU (bool value)
 
 TORCH_LIBRARY (cuda, m)
 
const std::vector< Function * > & getAllBuiltinFunctionsFor (Symbol name)
 
void canonicalizeModifiedLoop (Node *n)
 
void canonicalizeModifiedLoops (Block *block)
 
TORCH_API void CanonicalizeModifiedLoops (std::shared_ptr< Graph > &graph)
 
static std::string format (const std::string &fmt, TemplateEnv &env)
 
bool operator== (const ConcreteModuleTypeBuilder::ModuleInfo &lhs, const ConcreteModuleTypeBuilder::ModuleInfo &rhs)
 
void ConvertToSSA (std::shared_ptr< Graph > &graph)
 
size_t ComputeEditDistance (const char *word1, const char *word2, size_t maxEditDistance)
 
std::string get_stacked_errors (const std::vector< Call > &error_stack)
 
template<typename T >
const ErrorReportoperator<< (const ErrorReport &e, const T &t)
 
bool inlineConsecutiveIfs (Node *node)
 
void inlineConsecutiveIfs (Block *block)
 
static void convertEnterExitNodesToWithBlocks (std::shared_ptr< Graph > &graph)
 
static void convertWithBlocksToEnterExitNodes (std::shared_ptr< Graph > &graph)
 
void TransformExits (std::shared_ptr< Graph > &graph)
 
C10_EXPORT either< OperatorName, FunctionSchemaparseSchemaOrName (const std::string &schemaOrName)
 
C10_EXPORT FunctionSchema parseSchema (const std::string &schema)
 
C10_EXPORT OperatorName parseName (const std::string &name)
 
void InlineBlockBeforeNode (Node *before_node, Block *block)
 
void inlineLoopCondition (Node *n)
 
void inlineLoopCondition (Block *block)
 
void InlineLoopCondition (std::shared_ptr< Graph > &graph)
 
NoneStatus canBeNone (Value *v)
 
static ValueasSimple (const SugaredValuePtr &value)
 
static std::shared_ptr< MagicMethodmakeMagic (const std::string &name, SugaredValuePtr base)
 
template<class T >
static ValuematerializeConstant (T val, Graph &graph, const SourceRange &r, std::unordered_map< T, Value * > &map)
 
bool isSupportedListElementType (const TypePtr &type)
 
void runCleanupPasses (std::shared_ptr< Graph > &to_clean)
 
bool meaningfulName (const std::string &name)
 
C10_EXPORT int stringToKind (const std::string &str)
 
C10_EXPORT std::string kindToString (int kind)
 
C10_EXPORT SharedParserDatasharedParserData ()
 
bool isCharCount (char c, const std::string &str, size_t start, int len)
 
c10::optional< char > parseOctal (const std::string &str, size_t pos)
 
std::string parseStringLiteral (const SourceRange &range, const std::string &str)
 
Decl mergeTypesFromTypeComment (const Decl &decl, const Decl &type_annotation_decl, bool is_method)
 
std::shared_ptr< NativeResolvernativeResolver ()
 
static TypePtr unwrapOptional (TypePtr opt_type)
 
static bool isIntOrFloatUsedAsList (const Value *value, const Argument &arg)
 
bool convertibleToList (const TypePtr &type, const TypePtr &list_type_)
 Returns true if type is a Tuple in which all the elements have the same type or if it's a subtype of list_type_. More...
 
ValuetryConvertToType (const SourceRange &loc, Graph &graph, const TypePtr &concrete_type, Value *value, bool allow_conversions)
 
static ValuetryMatchArgument (const Argument &arg, Graph &graph, const SourceRange &loc, const NamedValue &named_value, std::ostream *failure_messages, const std::function< std::ostream &()> &err, bool allow_conversions, TypeEnv &type_env)
 
c10::optional< size_t > findInputWithName (const std::string &name, at::ArrayRef< NamedValue > kwargs)
 
static ValuetryCreateList (const TypePtr &elem_type, Graph &graph, const SourceRange &loc, at::ArrayRef< NamedValue > varargs, std::ostream *failure_messages, const std::function< std::ostream &()> &err, bool convert_tensor_to_num, TypeEnv &type_env)
 Creates a list with the provided values if each value's type can be matched to an argument with type elem_type. More...
 
static bool varargsCanBeUsedAsList (const FunctionSchema &schema, size_t arg_index, const Argument &arg)
 
bool isBlockListedSchema (const FunctionSchema &schema)
 
static c10::optional< MatchedSchematryMatchSchema (const FunctionSchema &schema, const SourceRange &loc, Graph &graph, at::ArrayRef< NamedValue > args, at::ArrayRef< NamedValue > kwargs, c10::optional< NamedValue > self, std::ostream *failure_messages, bool allow_conversions)
 
MatchedSchema matchSchema (const ::c10::FunctionSchema &schema, const SourceRange &loc, Graph &graph, at::ArrayRef< NamedValue > args, at::ArrayRef< NamedValue > kwargs, const c10::optional< NamedValue > &self)
 
MatchedSchema matchSchema (const ::c10::FunctionSchema &schema, const SourceRange &loc, Graph &graph, at::ArrayRef< Value * > args, at::ArrayRef< NamedValue > kwargs)
 
static std::string prefixLine (const std::string &str, const std::string &prefix)
 
std::pair< size_t, MatchedSchemamatchSchemas (const std::vector< const FunctionSchema * > &schemas, const SourceRange &loc, Graph &graph, at::ArrayRef< NamedValue > args, at::ArrayRef< NamedValue > kwargs, const c10::optional< NamedValue > &self, bool render_errors)
 
static ValuepackOutputs (Graph &g, at::ArrayRef< Value * > values, c10::OptNameList field_names)
 
static ValueemitBuiltinNode (const MatchedSchema &matched_schema, const SourceRange &loc, Graph &graph, Symbol name)
 
ValueemitBuiltinCall (const SourceRange &loc, Graph &graph, Symbol name, at::ArrayRef< NamedValue > args, at::ArrayRef< NamedValue > kwargs, const c10::optional< NamedValue > &self)
 
TORCH_API std::pair< size_t, MatchedSchemamatchSchemas (const std::vector< const ::c10::FunctionSchema * > &schemas, const SourceRange &loc, Graph &graph, at::ArrayRef< NamedValue > args, at::ArrayRef< NamedValue > kwargs, const c10::optional< NamedValue > &self=c10::nullopt, bool render_errors=false)
 
const std::unordered_map< std::string, TypePtr > & string_to_type_lut ()
 
C10_EXPORT void format_stack_trace (std::ostream &out, const std::vector< StackEntry > &entries)
 
std::ostream & operator<< (std::ostream &out, const SourceRange &range)
 
C10_EXPORT double strtod_c (const char *nptr, char **endptr)
 
C10_EXPORT float strtof_c (const char *nptr, char **endptr)
 
static const std::unordered_map< std::string, at::ScalarType > & builtin_cast_method_to_scalar_type ()
 
static bool isRecursive (const TypePtr &classType, const TypePtr &attrType)
 
static std::vector< Value * > toValues (Graph &g, at::ArrayRef< NamedValue > nvs)
 
static SourceRange mergeRanges (SourceRange c, const TreeList &others)
 
static std::ostream & operator<< (std::ostream &out, pretty_tree t_)
 
static std::ostream & operator<< (std::ostream &out, const TreeRef &t)
 
static std::unordered_map< Symbol, SymbolRangesymbol_range_map ({ {Symbol::fromQualString("aten::_test_serialization_subcmul"), {0, 2, Symbol::fromQualString("upgraders::_test_serialization_subcmul_0_2")}}, {Symbol::fromQualString("aten::div"), {0, 3, Symbol::fromQualString("upgraders::div_0_3")}}, {Symbol::fromQualString("aten::div_"), {0, 3, Symbol::fromQualString("upgraders::div__0_3")}}, {Symbol::fromQualString("aten::full"), {0, 4, Symbol::fromQualString("upgraders::full_0_4")}}, })
 
static std::unordered_map< NodeKind, uint64_t > kind_min_version_map ({ {aten::div, 4}, {aten::div_, 4}, {aten::full, 5}, })
 
Symbol get_symbol_for_version (const Symbol name, const uint64_t version)
 
uint64_t get_min_version_for_kind (const NodeKind &kind)
 
void Lint (const AliasDb *db)
 
static const char * toString (AttributeKind kind)
 
bool insertableTensor (const at::Tensor &ten)
 
bool insertableIValue (const IValue &ivalue)
 
ValueinsertConstant (Graph &g, const IValue &val, c10::optional< SourceRange > loc, c10::optional< ScopePtr > scope)
 
c10::optional< Value * > tryInsertConstant (Graph &g, const IValue &val, c10::optional< SourceRange > loc, c10::optional< ScopePtr > scope)
 
c10::optional< IValuetoIValue (const Value *v)
 
template<typename T >
c10::optional< T > constant_as (const Value *v)
 
template<typename T >
static bool operator== (generic_graph_node_list_iterator< T > a, generic_graph_node_list_iterator< T > b)
 
template<typename T >
static bool operator!= (generic_graph_node_list_iterator< T > a, generic_graph_node_list_iterator< T > b)
 
static void printValueRef (std::ostream &out, const Value *n)
 
template<typename T >
std::ostream & operator<< (std::ostream &out, const std::vector< T > &nodes)
 
template<typename T >
static std::ostream & printValueRefs (std::ostream &out, const at::ArrayRef< T > nodes)
 
std::ostream & operator<< (std::ostream &out, const at::ArrayRef< const Value * > nodes)
 
std::ostream & operator<< (std::ostream &out, const at::ArrayRef< Value * > nodes)
 
std::ostream & operator<< (std::ostream &out, const const_value_list_with_types &l)
 
static void printAttribute (std::ostream &out, const at::Tensor &tensor)
 
static void printAttribute (std::ostream &out, const IValue &ival)
 
static void printTypeList (std::ostream &out, const std::vector< TypePtr > &items)
 
static std::ostream & indent (std::ostream &out, size_t level)
 
std::ostream & operator<< (std::ostream &out, const Node &n)
 
std::ostream & operator<< (std::ostream &out, const Graph &g)
 
static void checkSameDevice (const Node *node)
 
void LintGraph (const std::shared_ptr< Graph > &graph)
 
size_t findArgument (const FunctionSchema &the_schema, const std::string &unqualName)
 
size_t findArgument (const FunctionSchema &the_schema, Symbol name)
 
const SourceRangefakeRange ()
 
at::ArrayRef< Value * > createTupleUnpack (Value *v)
 
std::vector< Value * > inlineCallTo (Node *to_replace, Function *callee, bool use_graph=true)
 Insert function CALLEE after node TO_REPLACE, remove the node and replace all its uses with corresponding outputs of the inserted function. More...
 
std::vector< Value * > unpackOutputs (const std::vector< Value * > &outputs)
 If there is only one value in OUTPUTS and its kind is Tuple, insert a tuple unpack node and return the resulting values. More...
 
std::vector< Value * > insertGraph (Graph &g, Graph &callee, ArrayRef< Value * > inputs, std::unordered_map< Value *, Value * > &value_map)
 
std::vector< Value * > insertGraph (Graph &g, Graph &callee, ArrayRef< Value * > inputs)
 Insert graph CALLEE into graph G using INPUTS as input values. More...
 
void parseIR (const std::string &str, torch::jit::Graph *graph, std::unordered_map< std::string, Value * > &vmap)
 Parse IR from STR constructing the corresponding IR in\ GRAPH. More...
 
void parseIR (const std::string &str, torch::jit::Graph *graph)
 
std::vector< MatchfindPatternMatches (const Graph &pattern, Graph &graph)
 
std::string getHeader (const Node *node)
 
static std::unordered_map< std::string, size_t > parseJITLogOption (const char *option)
 
bool is_enabled (const char *cfname, JitLoggingLevels level)
 
std::string log_function (const std::shared_ptr< torch::jit::Graph > &graph)
 
std::string jit_log_prefix (const std::string &prefix, const std::string &in_str)
 
std::string jit_log_prefix (JitLoggingLevels level, const char *fn, int l, const std::string &in_str)
 
std::ostream & operator<< (std::ostream &out, JitLoggingLevels level)
 
TORCH_API::torch::jit::JitLoggingLevels jit_log_level ()
 
TORCH_API std::string jit_log_prefix (::torch::jit::JitLoggingLevels level, const char *fn, int l, const std::string &in_str)
 
TORCH_API bool is_enabled (const char *cfname, ::torch::jit::JitLoggingLevels level)
 
TORCH_API std::ostream & operator<< (std::ostream &out, ::torch::jit::JitLoggingLevels level)
 
std::unordered_map< std::string, int64_t > & passes_to_current_counter ()
 
static int parseOptLimit (const std::string &opt_limit)
 
static std::unordered_map< std::string, int64_t > parseJITOptLimitOption (const char *option)
 
bool opt_limit (const char *pass_name)
 
void _save_parameters (const std::map< std::string, at::Tensor > &map, std::ostream &out)
 
void _save_parameters (const std::map< std::string, at::Tensor > &map, const std::string &filename)
 
char const * toString (OpCode op)
 
OpCode parseOpCode (const char *str)
 
IValue expect_field (IValue tup, const std::string &expected_name, size_t entry)
 
std::string operator_str (const std::string &name, const std::string &overloadname)
 
mobile::Module _load_for_mobile (std::istream &in, c10::optional< at::Device > device)
 
mobile::Module _load_for_mobile (const std::string &filename, c10::optional< at::Device > device)
 
mobile::Module _load_for_mobile (std::unique_ptr< ReadAdapterInterface > rai, c10::optional< c10::Device > device)
 
mobile::Module _load_for_mobile (std::istream &in, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
mobile::Module _load_for_mobile (const std::string &filename, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
mobile::Module _load_for_mobile (std::unique_ptr< ReadAdapterInterface > rai, c10::optional< c10::Device > device, ExtraFilesMap &extra_files)
 
void _load_extra_only_for_mobile (const std::string &filename, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 Load only the contents of the "extra/" files whose names are passed in the map (extra_files). More...
 
std::map< std::string, at::Tensor_load_parameters (std::istream &in, c10::optional< at::Device > device)
 
std::map< std::string, at::Tensor_load_parameters (const std::string &filename, c10::optional< at::Device > device)
 
std::map< std::string, at::Tensor_load_parameters (std::unique_ptr< ReadAdapterInterface > rai, c10::optional< c10::Device > device)
 
std::ostream & operator<< (std::ostream &out, Instruction inst)
 
void AnnotateWarns (Block *b)
 
void AnnotateWarns (const std::shared_ptr< Graph > &graph)
 
static bool shouldBeCapturedInByBailOut (Node *n)
 
void InsertBailOuts (std::shared_ptr< Graph > graph)
 
static NodelocateBailOutNodeInUnoptimizedGraph (Block *b, int64_t index)
 
static void removeBailouts (Block *b)
 
TORCH_API std::shared_ptr< GraphBuildBailOutGraphFrom (int64_t bailout_index, const std::shared_ptr< Graph > &orig, const std::shared_ptr< Graph > &target)
 
bool have_same_shape (at::TensorList inputs)
 
bool should_be_transposed (at::TensorList inputs)
 
std::vector< at::Tensortranspose_inputs (at::TensorList inputs)
 
bool shape_is_fast_for_reduce (const at::Tensor &lhs, const at::Tensor &rhs)
 
void BatchMMTreeReduce (Block *block)
 
bool shape_is_fast_for_side (const at::Tensor &other_side_input)
 
std::pair< std::vector< Node * >, std::vector< Node * > > gatherIndependentMMUses (Value *value, AliasDb &alias_db)
 
void BatchMMSide (Block *block, AliasDb &alias_db)
 
bool hasMutableOperators (Block *block)
 
void BatchMM (std::shared_ptr< Graph > &graph)
 
std::shared_ptr< GraphCanonicalize (const std::shared_ptr< Graph > &graph, bool keep_unique_names)
 
size_t blockIndex (const Block *b)
 
bool isBefore (Node *n1, Node *n2)
 
bool isBefore (const Use &a, const Use &b)
 
bool isAfter (const Use &a, const Use &b)
 
bool isBeforeOrAfter (const Use &a, const Use &b, bool checking_before)
 
c10::optional< const UsefirstOrLastUse (Value *v, bool find_first)
 
std::vector< c10::optional< const Use > > gatherFirstUses (at::ArrayRef< Value * > values)
 
std::vector< size_t > sort_indexes (at::ArrayRef< Value * > values)
 
void CanonicalizeLoopOutputs (Node *n)
 
void CanonicalizeIfOutputs (Node *n)
 
void CanonicalizeOutputs (Block *block)
 
void CanonicalizeOutputs (std::shared_ptr< Graph > &graph)
 
static c10::optional< std::vector< ChunkOutput > > getChunkOutputs (Node *chunk)
 
static void CanonicalizeOps (Block *block)
 
void CanonicalizeOps (const std::shared_ptr< Graph > &graph)
 
static void unprofileGraphInputs (const std::shared_ptr< Graph > &graph)
 
static void unprofileBlock (Block *start_block)
 
void ClearProfilingInformation (const std::shared_ptr< Graph > &graph)
 
void clearUndefinedness (Value *o)
 
void clearUndefinedness (Block *block)
 
void ClearUndefinedness (const std::shared_ptr< Graph > &graph)
 
void EliminateCommonSubexpression (const std::shared_ptr< Graph > &graph)
 
void ConstantPooling (const std::shared_ptr< Graph > &graph)
 
c10::optional< std::vector< IValue > > runNodeIfInputsAreConstant (const Node *n, bool ignore_custom_classes)
 
void ConstantPropagation (std::shared_ptr< Graph > &graph, bool ignore_custom_classes)
 
void ConstantPropagationImmutableTypes (std::shared_ptr< Graph > &graph)
 
std::vector< Node * > CreateAutodiffSubgraphs (const std::shared_ptr< Graph > &graph, size_t threshold)
 
void CreateFunctionalGraphs (const std::shared_ptr< Graph > &graph)
 
void InlineFunctionalGraphs (const std::shared_ptr< Graph > &graph)
 
void EliminateDeadCode (const std::shared_ptr< Graph > &graph, DCESideEffectPolicy sideEffectPolicy)
 
void EliminateDeadCode (Block *block, bool recurse, DCESideEffectPolicy sideEffectPolicy)
 
void EliminateDeadCode (Block *block, std::function< void(const std::unordered_set< const Value * > &)> cb, DCESideEffectPolicy sideEffectPolicy)
 
c10::optional< bool > isDefined (Value *tensor)
 
bool isDecomposableNorm (Node *normalize_op)
 
bool DecomposeOps (Block *block, CompilationUnit &decompose_funcs)
 
void DecomposeOps (std::shared_ptr< Graph > &graph)
 
static void EraseNumberTypesOnBlock (Block *block)
 
void EraseNumberTypes (const std::shared_ptr< Graph > &graph)
 
void FixupTraceScopeBlocks (std::shared_ptr< Graph > &graph, Module *self)
 
std::tuple< at::Tensor, at::TensorcomputeUpdatedConvWeightAndBias (const ConvBNParameters &p)
 Given the current weight and bias tensors of a Conv module and parameters of the BatchNorm module we're folding with, compute the updated values for the weight and bias. More...
 
Module FoldConvBatchNorm (const Module &module)
 Fold Conv2d-BatchNorm2d into Conv2d in forward method of this module and all its submodules. More...
 
Module freeze_module (const Module &module, std::vector< std::string > preservedAttrs, bool freezeInterfaces, bool preserveParameters)
 
bool nonConstantParameters (Node *n)
 
bool supportedConvNode (Node *n)
 
void FoldFrozenConvBatchnorm (Block *b)
 
bool supportedAddOrSub (Node *n)
 
bool opDoesNotBroadCastWithConv (Tensor &op_tensor, Tensor &weight_tensor)
 
bool checkConvAndBroadcastingOpPreConditions (Node *conv, Node *op)
 
Tensor resizeConstantScalarOrTensorToShape (Value *v, const std::vector< int64_t > &shape, at::TensorOptions options)
 
void FoldFrozenConvAddOrSub (Block *b)
 
bool supportedMulOrDiv (Node *n)
 
void FoldFrozenConvMulOrDiv (Block *b)
 
void FoldFrozenConvBatchnorm (std::shared_ptr< Graph > &graph)
 
void FoldFrozenConvAddOrSub (std::shared_ptr< Graph > &graph)
 
void FoldFrozenConvMulOrDiv (std::shared_ptr< Graph > &graph)
 
void OptimizeFrozenGraph (std::shared_ptr< Graph > &graph, bool optimize_numerics)
 
void FuseLinear (std::shared_ptr< Graph > &graph)
 Match the at::linear pattern and fuse it into a single at::linear This pass fuse the addmm or matmul + add generated by JIT back to linear This pass can be deleted once the JIT can emit the aten::linear in the future. More...
 
void FuseAddRelu (script::Module &module)
 
void FuseAddRelu (std::shared_ptr< Graph > &graph)
 
void FuseGraph (std::shared_ptr< Graph > &graph, bool strict_fuser_check)
 
void CustomFuseGraph (std::shared_ptr< Graph > &graph, const std::function< bool(Node *)> &fn, Symbol kind, size_t arg_limit)
 
void EliminateRedundantGuards (std::shared_ptr< Graph > graph)
 
void hoistConvPackedParams (Module &rootModule, Node *getConvPackedParamsNode, const std::string &prefix, int &nameUniqueCounter)
 
void HoistConvPackedParams (script::Module &m)
 
bool canRunWithAutograd (Node *node)
 
void InlineAutodiffSubgraphs (std::shared_ptr< Graph > &graph, size_t threshold)
 
void InlineForkWait (Block *b, std::unordered_map< Value *, Value * > &future_remap)
 
void InlineForkWait (const std::shared_ptr< Graph > &graph)
 
void inlineForkedClosure (Node *fork_closure)
 
void inlineForkedClosures (Block *block)
 
void inlineForkedClosures (std::shared_ptr< Graph > &to_clean)
 
void inlineCalls (Block *block)
 
void Inline (Graph &graph)
 
void CheckInplace (Block *block)
 
void CheckInplace (std::shared_ptr< Graph > &graph)
 
void removeProfilingNodes (Block *b)
 
void InsertGuards (std::shared_ptr< Graph > graph)
 
void RemoveProfilingNodes (const std::shared_ptr< Graph > &graph)
 
void liftClosure (Node *closure)
 
void liftClosures (Block *block)
 
void liftClosures (const std::shared_ptr< Graph > &to_clean)
 
std::unordered_map< Node *, std::vector< Value * > > BuildLivenessSets (std::shared_ptr< Graph > graph)
 
static void addCondAsOutput (Node *loop)
 
void PeelProfilingLoops (const std::shared_ptr< Graph > &graph)
 
NodePeelLoop (Node *n, size_t times)
 
void UnrollLoops (std::shared_ptr< Graph > &graph)
 
void LowerGradOf (Graph &g)
 
std::pair< std::shared_ptr< Graph >, std::vector< Slot > > lower_graph (const ModulePtr &self, Graph &g_, size_t self_offset=0)
 
static std::vector< IValueloadTensors (const std::vector< Slot > &slots)
 
std::pair< std::shared_ptr< Graph >, std::vector< IValue > > LowerGraph (Graph &graph, const ModulePtr &self)
 
static void LowerAllTuples (Block *block)
 
static void RemoveTupleConstants (Node *n)
 
static void VisitNode (Node *n, Node *insert_point)
 
static void EnsureNoTuples (ArrayRef< Value * > values)
 
static void EnsureNoTuples (Block *block)
 
void LowerAllTuples (const std::shared_ptr< Graph > &graph)
 
void LowerSimpleTuples (Block *block)
 
void LowerSimpleTuples (const std::shared_ptr< Graph > &graph)
 
void metalInsertPrePackedOps (std::shared_ptr< Graph > &graph)
 
void metalInsertPrePackedOps (script::Module &module)
 
void metalFoldPrePackingOps (script::Module &m)
 
void metalFusePrePackedConvWithClamp (script::Module &module)
 
void metalInsertCopyOps (script::Module &module)
 
void runCanonicalOptimizations (script::Module &module)
 
script::Module metalOptimizeForMobile (const script::Module &m, const std::vector< std::string > &preserved_methods)
 
const std::unordered_map< Symbol, Symbol > & getOperatorAliasMap ()
 
void NormalizeOps (const std::shared_ptr< Graph > &graph)
 
void removePrintOps (Block *block)
 
void RemovePrintOps (std::shared_ptr< Graph > &graph)
 
void checkONNXCompatibility (const c10::FunctionSchema &schema)
 
void preprocessCaffe2Ops (Block *block)
 
void PreprocessCaffe2Ops (std::shared_ptr< Graph > &graph)
 
std::shared_ptr< GraphToONNX (std::shared_ptr< Graph > &graph, ::torch::onnx::OperatorExportTypes operator_export_type)
 
void BlockToONNX (Block *old_block, Block *new_block, ::torch::onnx::OperatorExportTypes operator_export_type, std::unordered_map< Value *, Value * > env)
 
void CastAllConstantToFloating (Block *block)
 
void CastAllConstantToFloating (const std::shared_ptr< Graph > &graph)
 
void ConstantFoldONNX (Block *b, ParamMap &paramsDict, int opset_version)
 
void ConstantFoldONNX (Block *b, std::map< std::string, IValue > &paramDict, int opset_version)
 
void EliminateUnusedItemsONNX (Block *b, ParamMap &paramsDict)
 
void EliminateUnusedItemsONNX (Block *b, std::map< std::string, IValue > &paramDict)
 
std::vector< at::TensorgetValues (Node *node, const ValueToParamPairMap &valsToParamsMap)
 
static void fuseConvBatchNorm (Block *b, ValueToParamPairMap &valsToParamsMap)
 
void EvalPeepholeONNX (Block *b, ParamMap &paramsDict)
 
void EvalPeepholeONNX (Block *b, std::map< std::string, IValue > &paramDict)
 
void FixupONNXLoopNodeInputs (Node *node)
 
std::vector< Value * > FixupONNXLoopNode (Node *node, int opset_version)
 
bool IsUninitializedNode (Node *n)
 
void InferShapeTypeForUninitializedOutput (Graph *graph, Block *block, Value *uninitialized_output, Value *other_output)
 
void ONNXFixupUninitializedOutput (Node *node)
 
std::vector< Value * > FixupONNXIfNode (Node *node, int opset_version)
 
std::vector< Value * > FixupONNXControlflowNode (Node *n, int opset_version)
 
static bool isStaticCondition (Node *node)
 
static c10::optional< int > findIndex (c10::ArrayRef< torch::jit::Value * > outputs, Value *input)
 
static bool constantFoldedConditionValue (Node *node)
 
static void foldIfNode (Block *b)
 
void FoldIfNodeONNX (Block *b)
 
bool ConditionValueONNX (Node *n)
 
bool IsStaticConditionONNX (Node *n)
 
void functionCallSubstitution (Block *block)
 
void ONNXFunctionCallSubstitution (Graph &graph)
 
ValueToParamPairMap buildValueToParamsMap (Block *b, const ParamMap &paramsDict)
 
void eraseUnusedBlockInputs (Block *b)
 
void eraseUnusedValuesFromMap (ValueToParamPairMap &valsToParamsMap)
 
void buildParamsMapFromValueToParamsMap (const ValueToParamPairMap &valsToParamsMap, ParamMap &paramsDict)
 
c10::optional< at::ScalarTypeONNXTypeToATenType (int32_t onnx_type)
 
NodeaddNodeToBlock (Block *block, Symbol kind, ArrayRef< Value * > inputs)
 
ValueaddInputToBlock (Block *block)
 
NodecreateONNXUnsqueeze (Graph *graph, Node *n_to_insert_before, Value *input, int axis, int opset_version)
 
std::deque< std::string > findSubModuleAttr (Value *input, std::string &name, Module &attrModule, std::shared_ptr< Graph > &graph)
 
ValueaddParamAsArgument (Function *function, std::string &name, IValue &attr)
 
std::vector< IValuegetParamAttributes (Block *block, std::shared_ptr< Graph > &graph, const Module &module_, Function *function_, std::unordered_map< std::string, Value * > &attrValues)
 
void insertMainModuleAsConstant (const std::shared_ptr< Graph > &graph)
 
std::pair< Module, std::vector< IValue > > list_module_parameters (const Module &module)
 
bool isRNN (const Node *node)
 
bool isNopTranspose (const std::vector< int64_t > &perm)
 
std::vector< int64_t > composeTransposes (const std::vector< int64_t > &t1, const std::vector< int64_t > &t2)
 
std::vector< size_t > getBroadcastPositions (Node *node)
 
c10::optional< size_t > fusibleExpandTo (at::IntArrayRef from, at::IntArrayRef to)
 
void fuseBroadcast (Block *b)
 
void fuseConsecutiveTransposes (Block *b)
 
void eliminateNopTranspose (Block *b)
 
void fuseTransposeIntoGemm (Block *b)
 
void pushPackingPastRnn (Block *b)
 
void removeNopPacking (Block *graph)
 
void hackFixupPadPackedShapes (Block *graph)
 
void fixDefaultRNNState (Graph *graph, Node *n, int input_index, int opset_version)
 
void fixDefaultRnnHiddenState (Block *b, int opset_version)
 
void fixDefaultLstmCellState (Block *b, int opset_version)
 
static bool isSafeToSpeculate (Node *n)
 
static void speculateOps (Block *block)
 
static void replaceInputWithList (Node *node, size_t i, ArrayRef< Value * > to)
 
static void eraseListConstruct (Block *block, int opset_version)
 
static void eraseListConstruct (Node *n, int opset_version)
 
static void fuseListConstructListUnpack (Block *b)
 
void removeMaxPoolUnusedOutput (Block *b)
 
static void fuseLogSoftmaxNllLoss (Block *b)
 
static void removeSequenceSplitConcat (Block *b)
 
void PeepholeOptimizeONNX (std::shared_ptr< Graph > &graph, int opset_version, bool fixed_batch_size)
 
static void PrepareDivisionForONNXOnBlock (Block *block)
 
void PrepareDivisionForONNX (const std::shared_ptr< Graph > &graph)
 
void PreprocessForONNX (std::shared_ptr< Graph > &graph)
 
void PrepareInplaceOpsForONNX (const std::shared_ptr< Graph > &graph)
 
void RemoveInplaceOpsForONNX (const std::shared_ptr< Graph > &graph, Module *model=nullptr)
 
void ScalarTypeAnalysisForONNX (const std::shared_ptr< Graph > &graph)
 
void ScalarTypeAnalysisNodeForONNX (Node *n)
 
TypePtr MergeInferredType (TypePtr existing_type, TypePtr inferred_type)
 
void ONNXShapeTypeInference (Node *n, const ParamMap &params_dict, int opset_version)
 
void ONNXSetDynamicInputShape (std::shared_ptr< Graph > &graph, const std::unordered_map< std::string, std::unordered_map< int64_t, std::string > > &dynamic_axes, const std::vector< std::string > &input_names)
 
bool HasSequenceTypeOutput (Node *node)
 
void ONNXUpdateTypeFromTensor (Value *graph_output, const at::Tensor &output, bool onnx_shape_inference)
 
size_t ONNXAssignOutputShape (std::shared_ptr< Graph > &graph, size_t outputs_index, PyObject *output_obj, bool onnx_shape_inference)
 
void ONNXAssignOutputShape (std::shared_ptr< Graph > &graph, at::ArrayRef< at::Tensor > outputs, const python::IODescriptor &desc, bool onnx_shape_inference)
 
void ONNXShapeTypeInference (std::shared_ptr< Graph > &graph, const ParamMap &params_dict, int opset_version)
 
double getScaleFromInput (Node *input_node)
 
NodeCreateQuantizedWeights (std::string data, std::shared_ptr< Graph > &graph, std::vector< int64_t > shapes, double scale, int64_t zero_point)
 
NodeCreateQuantizedBias (std::vector< int64_t > data, std::shared_ptr< Graph > &graph, std::vector< int64_t > shapes, double scale, int64_t zero_point)
 
NodecreateIntTuple (const std::vector< int64_t > &is, std::shared_ptr< Graph > &graph)
 
NodecreateInt (int64_t i, std::shared_ptr< Graph > &graph)
 
void unpackQuantizedWeightsHelper (std::shared_ptr< Graph > &graph, std::map< std::string, IValue > &paramsDict, const std::string &pattern, const std::string &unpack_fn, QuantizedParamsType params_type)
 
void UnpackQuantizedWeights (std::shared_ptr< Graph > &graph, std::map< std::string, IValue > &paramsDict)
 
void insertPermutesHelper (std::shared_ptr< Graph > &graph, std::map< std::string, IValue > &paramsDict, const std::string &pattern)
 
void insertPermutes (std::shared_ptr< Graph > &graph, std::map< std::string, IValue > &paramsDict)
 
std::vector< GraphPassEntry > & getCustomPostPasses ()
 
std::vector< GraphPassEntry > & getCustomPrePasses ()
 
GraphPassNameType registerPostPass (GraphPass p)
 
GraphPassNameType registerPass (GraphPass p)
 
GraphPassNameType registerPrePass (GraphPass p)
 
void clearPostPass (GraphPassNameType pid)
 
void clearPrePass (GraphPassNameType pid)
 
void clearAllPostPasses ()
 
void clearAllPrePasses ()
 
template<typename T >
static bool mustBeEqual (const c10::optional< T > &a, const c10::optional< T > &b)
 
void FuseAddMM (Block *block)
 
void FuseAddMM (const std::shared_ptr< Graph > &graph)
 
void PeepholeOptimize (const std::shared_ptr< Graph > &graph, bool addmm_fusion_enabled)
 
TORCH_API void PeepholeOptimize (Block *block, bool disable_shape_peepholes=false)
 
void PeepholeOptimizeAliasSensitive (const std::shared_ptr< Graph > &graph)
 
c10::optional< size_t > normalizeIndex (int64_t index, size_t len)
 
void PeepholeOptimizeListIdioms (const std::shared_ptr< Graph > &graph)
 
void PrePackingOpsFolder (script::Module &m, const PrePackingOpsFilterFn &is_foldable_op, const std::string &attr_prefix)
 
void DedupModuleUses (Module &module)
 Recursively deduplicate multiple uses of the same module by creating an instance clone for each use of the module, which means the type will be the same as before and all the attributes will be copied, then we'll change the use of the original module to the use of cloned module in the Graph. More...
 
void QuantFusion (std::shared_ptr< Graph > &graph, QuantType quant_type=QuantType::STATIC)
 Backend specific pass to fuse dequantize - op - quantize calls as quantized_op calls. More...
 
void InsertPrepackUnpack (std::shared_ptr< Graph > &graph)
 Insert prepack and unpack function in graph We want add pack/unpack functions for quantized weight because later we want to fold the packed weight as an attribute of the module, in order to reduce the cost of packing the weight on the fly in quantized models. More...
 
void InsertPrepackUnpack (Module &module)
 Insert pack and unpack function in all graphs of module. More...
 
void FoldQuantizedPrepackingOps (Module &module)
 
Module Finalize (Module &module, QuantType quant_type, const std::vector< std::string > &preserved_attrs)
 
void FuseQuantizedAddRelu (std::shared_ptr< Graph > &graph)
 
bool matchAtenFuncToUse (const Use &use, const std::string &func_name, c10::optional< int > n)
 
bool matchCallFuncToUse (const Use &use, const std::string &func_name, c10::optional< int > n)
 
bool matchArgPattern (Value *v, const AtenFuncArgs &aten_func_args, const CallFuncArgs &call_func_args)
 
bool isWeight (Value *v)
 
bool isBiasOfConvOrLinear (Value *v)
 
bool isEmbeddingBagNonInput (Value *v)
 
c10::optional< UsegetClampScalarInputUse (Value *v)
 
std::vector< Value * > getPassThroughInputs (Value *v)
 
std::vector< NodeKindtoAtenSymbol (const std::vector< std::string > &func_names)
 
bool isAtenFunc (Node *n, const std::vector< NodeKind > &aten_funcs)
 
bool isAtenFunc (Node *n, const std::vector< std::string > &aten_funcs)
 
bool isFunctionNode (Node *n, const std::vector< std::string > &call_funcs, const std::vector< std::string > &aten_funcs)
 
bool isSingleInputGeneralShapeAtenFunction (Node *n)
 
bool isSingleInputGeneralValueAtenFunction (Node *n)
 
bool isSingleInputGeneralCallFunction (Node *n)
 
bool isSingleInputGeneralAtenFunction (Node *n)
 
bool isClamp (Node *n)
 
bool isTensorInfoNode (Node *n)
 
bool isPropagateQuantSingleInputOp (Node *n)
 
bool isPropagateQuantBinaryOp (Node *n)
 
bool isPropagateQuantOp (Node *n)
 
bool isBinaryOpWithScalarInput (Node *n)
 
c10::optional< std::tuple< c10::QScheme, QParamVector > > getFixedQParams (Node *n)
 
bool userDefinedCallFunction (Node *n)
 
bool isWeightOnlyStaticQuantOp (Node *n)
 
bool nodeQuantizable (Node *n, QuantType quant_type)
 
bool useQuantizable (const Use &use, QuantType quant_type)
 
std::shared_ptr< GraphgetCallFunctionGraph (Node *n)
 
bool alwaysRaisesException (Block *block)
 
bool isScalar (Value *v)
 
bool hitGraphInput (Value *value)
 
std::vector< std::string > getModuleAccessPath (Value *instance, Value *self)
 
Module findChildModule (const Module &module, const std::vector< std::string > &path)
 
Module getInvokedModule (Module &module, Node *n, Value *self)
 
c10::optional< ModulegetInvokedModuleOpt (const Module &module, Node *n, Value *self)
 
bool is_int_constant (const Match &match, const std::unordered_map< std::string, Value * > &vmap, const std::string &vname, int value)
 
bool is_functional (const Match &match, const std::unordered_map< std::string, Value * > &vmap, const std::string &vname, const std::string &functional)
 
std::string removeTorchMangle (const std::string &orig_name)
 
c10::optional< std::string > getModuleName (Value *value)
 
bool is_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap, const std::string &vname, const std::string &module_qualified_name)
 
bool aten_add_alpha_is_one (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_functional_relu (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_relu_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_functional_linear (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_linear_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_conv1d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_conv2d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_conv3d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_conv_transpose1d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_conv_transpose2d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_batchnorm2d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
bool is_batchnorm3d_module (const Match &match, const std::unordered_map< std::string, Value * > &vmap)
 
TORCH_API bool hasScalarInput (Node *n)
 
Module InsertObservers (Module &module, const std::string &method_name, const QConfigDict &qconfig_dict, bool inplace, QuantType quant_type=QuantType::STATIC)
 Insert observer module and observer function call for the Tensors that needs to be observed. More...
 
void SwapFunctionalLinear (Module &module)
 Swap all functional linear CallFunctions in module. More...
 
void SwapFunctionalLinear (std::shared_ptr< Graph > &graph)
 Swap functional linear CallFunctions to aten::linear so that it can survive inline, since quant fusion need to recognize linear as one op instead of a complicated if block. More...
 
void ReplicateQuant (std::shared_ptr< Graph > &graph)
 Replicate quantize node for prim::If blocks, so that we can match quantization patterns in prim::If blocks. More...
 
void ReplicateDeQuant (std::shared_ptr< Graph > &graph)
 Replicate dequantize node for each use, so that we can match quantization patterns. More...
 
Module InsertQuantDeQuant (Module &module, const std::string &method_name, bool inplace, bool debug, QuantType quant_type=QuantType::STATIC)
 Insert quantize - dequantize calls to the Tensors that are observed in insert_observers pass. More...
 
std::vector< QuantFusionInfoquant_fusion_pattern_and_replacements ()
 
std::vector< QuantFusionInfodynamic_quant_fusion_pattern_and_replacements ()
 
std::vector< QuantFusionInfolinear_prepack_unpack_patterns ()
 
std::vector< QuantFusionInfoconv_prepack_unpack_patterns ()
 
std::ostream & operator<< (std::ostream &os, QuantType t)
 
void removeDropout (script::Module &module)
 
static void RemoveExpands (Block *block)
 
void RemoveExpands (const std::shared_ptr< Graph > &graph)
 
void ImplicitCastForBinaryInplaceOps (Block *b)
 
void RemoveInplaceOps (const std::shared_ptr< Graph > &graph)
 
void RemoveListMutation (const std::shared_ptr< Graph > &graph)
 
void RemoveTensorMutation (const std::shared_ptr< Graph > &graph)
 
void RemoveRedundantProfiles (Block *block, AliasDb &db)
 
void RemoveRedundantProfiles (std::shared_ptr< Graph > &graph)
 
void PropagateRequiresGrad (std::shared_ptr< Graph > &graph)
 
void PropagateInputShapes (const std::shared_ptr< Graph > &graph)
 
void EraseShapeInformation (const std::shared_ptr< Graph > &graph)
 
bool hasGradSumToSizeUses (Value *v)
 
void insertProfileNodesForSpecializeAutogradZero (Block *block, ProfilingRecord *pr)
 
void InsertProfileNodesForSpecializeAutogradZero (ProfilingRecord *pr)
 
void specializeAutogradZero (std::shared_ptr< Graph > g)
 
Module PatternBasedRewrite (const Module &module)
 Run pattern-based subgraph rewrites on all methods in the module. More...
 
bool isSupportedForBlock (Node *node)
 
bool usedOnlyInSize (Value *v)
 
ValuebroadcastSizes (at::ArrayRef< Value * > sizes, AliasDb *db)
 
void setTensorExprFuserEnabled (bool val)
 
bool tensorExprFuserEnabled ()
 
bool setTexprReductionsEnabled (bool value)
 
bool texprReductionsEnabled ()
 
void removeProfileNodesAndSpecializeTypes (Block *b)
 
void RemoveProfileNodesAndSpecializeTypes (std::shared_ptr< Graph > &graph)
 
void removeTensorTypeSpecialization (Value *v)
 
void removeTensorTypeSpecializations (Block *block)
 
void RemoveTensorTypeSpecializations (std::shared_ptr< Graph > &graph)
 
void insertTypeGuard (Node *guarded_node, tensor_type_converter_t type_converter, Symbol kind)
 
void FuseTensorExprs (std::shared_ptr< Graph > &graph, size_t min_group_size, bool disable_shape_checks)
 
Operation createTensorExprOp (const Node *node)
 
void UpdateDifferentiableGraphRequiresGrad (Block *block, c10::optional< bool > new_requires_grad)
 
void UpdateDifferentiableGraphRequiresGrad (std::shared_ptr< Graph > &diff_forward_graph, c10::optional< bool > new_requires_grad)
 
void checkAliasAnnotation (const std::shared_ptr< Graph > &graph, std::vector< IValue > pythonInputs, const std::string &unqualifiedOpName)
 
void vulkanInsertPrePackedOps (std::shared_ptr< Graph > &graph)
 
void vulkanInsertPrePackedOps (script::Module &module)
 
void vulkanFusePrePackedConvWithClamp (script::Module &module)
 
void vulkanFoldPrePackingOps (script::Module &m)
 
script::Module vulkanOptimizeForMobile (const script::Module &module, const std::vector< std::string > &preserved_methods)
 
void transformConv1dToConv2d (std::shared_ptr< Graph > &graph)
 
void transformConv1dToConv2d (script::Module &module)
 
void insertPrePackedOps (std::shared_ptr< Graph > &graph)
 
void insertPrePackedOps (script::Module &module)
 
void fusePrePackedLinearConvWithClamp (script::Module &module)
 
void FoldPrePackingOps (script::Module &m)
 
script::Module optimizeForMobile (const script::Module &module, const std::set< MobileOptimizerType > &blocklist, const std::vector< std::string > &preserved_methods)
 
TORCH_API void runJITCPPTests ()
 
void initJITBindings (PyObject *module)
 
c10::optional< Moduleas_module (const py::object &obj)
 
static py::tuple tuple_tail (const py::tuple &tup)
 
void clear_registered_instances (void *ptr)
 
IValue toIValue (py::handle obj, const TypePtr &type, c10::optional< int32_t > N)
 
py::object toPyObject (IValue ivalue)
 
std::shared_ptr< CompilationUnitget_python_cu ()
 
TypedIValue toDictKeyIValue (py::handle key)
 
c10::optional< TypePtrunifyOrInitializeType (const TypePtr &accum, const TypePtr &unify)
 
InferredType tryToInferContainerType (py::handle input)
 
InferredType tryToInferType (py::handle input)
 
bool isTraceableType (const TypePtr &type)
 
IValue toTypeInferredIValue (py::handle input)
 
Stack toTraceableStack (const py::tuple &inputs)
 
IValue createGenericList (py::handle obj, const TypePtr &elem_type)
 
IValue createGenericDict (const py::dict &obj, const TypePtr &key_type, const TypePtr &value_type)
 
template<class T >
void guardAgainstNamedTensor (const T &var)
 
std::string friendlyTypeName (py::handle obj)
 
IValue argumentToIValue (const FunctionSchema &schema, size_t argumentPosition, py::handle object)
 
IValue returnToIValue (const TypePtr &type, py::handle object)
 
py::object getScriptedClassOrError (const std::string &name)
 
Stack createStackForSchema (const FunctionSchema &schema, const tuple_slice &args, const py::kwargs &kwargs, c10::optional< IValue > self)
 
py::object createPyObjectForStack (Stack &&stack)
 
Stack evilDeprecatedBadCreateStackDoNotUse (const py::tuple &tuple, at::ArrayRef< Value * > inputs, size_t reserve_extra_space=0)
 
py::object runAndInsertCall (Function &callee, const tuple_slice &args, const py::kwargs &kwargs, c10::optional< IValue > self, const std::function< Value *(Graph &, const MatchedSchema &match)> &callInserter)
 
py::object invokeScriptFunctionFromPython (Function &callee, const tuple_slice &args, const py::kwargs &kwargs)
 
py::object invokeScriptMethodFromPython (Method &callee, const tuple_slice &args, const py::kwargs &kwargs)
 
std::pair< std::shared_ptr< Operator >, StackgetOpWithStack (const std::vector< std::shared_ptr< Operator > > &operations, py::args args, const py::kwargs &kwargs)
 
py::object invokeOperatorFromPython (const std::vector< std::shared_ptr< Operator > > &operations, py::args args, const py::kwargs &kwargs)
 
void initPythonCustomClassBindings (PyObject *module)
 
std::string getPythonName (const PyObject *obj_)
 
std::ostream & printPyObject (std::ostream &out, const THPObjectPtr &obj)
 
std::vector< Node * > findAllNodes (c10::ArrayRef< torch::jit::Block * > blocks, Symbol kind, bool recurse=true)
 
std::vector< Node * > findAllNodes (Block *block, Symbol kind, bool recurse=true)
 
NodefindNode (c10::ArrayRef< torch::jit::Block * > blocks, Symbol kind, bool recurse=true)
 
NodefindNode (Block *block, Symbol kind, bool recurse=true)
 
void initPythonIRBindings (PyObject *module_)
 
std::string typeString (py::handle h)
 
c10::optional< StrongFunctionPtras_function (const py::object &obj)
 
void checkInterface (const SourceRange &loc, Function &m, const std::shared_ptr< ModuleValue > &self, const std::string &field)
 
void recurseThroughNestedModules (const SourceRange &loc, Function &m, std::vector< SugaredValuePtr > &keys, std::vector< SugaredValuePtr > &values, std::shared_ptr< ModuleValue > &self, const std::string &prefix, const std::string &field)
 
std::shared_ptr< SugaredEnumClasscreateSugaredEnumClassFromObj (const py::object &obj, Function &m, const SourceRange &loc)
 
std::shared_ptr< SugaredValuetoSugaredValue (const IValue &v, Function &m, const SourceRange &loc)
 
bool isNamedTupleClass (const py::object &obj)
 
TypePtr registerNamedTuple (const py::object &obj, const SourceRange &loc)
 
bool isEnumClass (py::object obj)
 
std::shared_ptr< SugaredValuecreateSimpleEnumValue (const py::object &obj, Function &m, const SourceRange &loc)
 
std::shared_ptr< SugaredValuetoSugaredValue (py::object obj, Function &m, const SourceRange &loc, bool is_constant)
 
std::shared_ptr< SugaredValuetoSimple (Value *v)
 
c10::optional< std::string > maybeConvertToString (const py::object &obj)
 
template<typename T >
List< T > wrap_list (const SourceRange &fallback_pos, std::vector< T > &&vec)
 
template<typename T >
Maybe< T > wrap_maybe (const SourceRange &fallback_pos, T *val)
 
void initTreeViewBindings (PyObject *module)
 
bool checkMutableFunctionDefault (const py::object &def_arg)
 
void checkMutableFunctionDefault (const SourceRange &range, const Argument &arg, const py::object &def_arg)
 
FunctionSchema getSchemaWithNameAndDefaults (const SourceRange &range, const FunctionSchema &schema, const at::optional< std::string > &new_name, const FunctionDefaults &default_args)
 
static Decl mergeDefaultsAndExtraParametersToOverloadDecl (const Decl &overload_decl, const Decl &impl_decl, const FunctionDefaults &defaults)
 
static StrongFunctionPtr script_compile_overloaded_function (const c10::QualifiedName &name, const Decl &overload_decl, const Def &implementation_def, const ResolutionCallback &rcb, const FunctionDefaults &implementation_defaults, const py::object &signature)
 
static StrongFunctionPtr script_compile_function (const c10::QualifiedName &name, const Def &def, const FunctionDefaults &defaults, const ResolutionCallback &rcb)
 
static TypePtr getTensorType (const at::Tensor &t, bool complete)
 
static TupleTypePtr getTupleTensorType (const Stack::const_iterator &s_iter, const Stack::const_iterator &s_iter_end, const TypePtr &tupleType, bool complete)
 
static void setInputTensorTypes (Graph &g, const Stack &stack, bool complete)
 
static std::shared_ptr< Graph_propagate_shapes (Graph &graph, std::vector< at::Tensor > inputs, bool with_grad=false)
 
static std::shared_ptr< Graph_propagate_and_assign_input_shapes (Graph &graph, const std::vector< at::Tensor > &inputs, bool with_grad=false, bool propagate=true)
 
void addFunctionToModule (Module &module, const StrongFunctionPtr &func)
 
bool ivalue_tags_match (const Module &lhs, const Module &rhs)
 
template<typename T >
py::list debugMakeList (const T &list)
 
template<typename T >
py::list debugMakeNamedList (const T &list)
 
template<typename T >
py::set debugMakeSet (const T &list)
 
static py::dict _jit_debug_module_iterators (Module &module)
 
IValue pyIValueDeepcopy (const IValue &ivalue, const py::dict &memo)
 
ExtraFilesMap extra_files_from_python (const py::dict &pydict)
 
void extra_files_to_python (const ExtraFilesMap &m, const py::dict &pydict)
 
void pyCompilationUnitDefine (CompilationUnit &cu, const std::string &src, const ResolutionCallback *rcb, const uint32_t _frames_up)
 
void initJitScriptBindings (PyObject *module)
 
void setGraphExecutorOptimize (bool o)
 
bool getGraphExecutorOptimize ()
 
static void scanWrittenSlots (Block *block, ArgumentSpecCreator::WrittenSlots &written_slots)
 
static at::Device ConvertIntToCPUOrCUDA (int device)
 
std::ostream & operator<< (std::ostream &out, const ArgumentInfo &info)
 
std::ostream & operator<< (std::ostream &out, const ArgumentSpec &spec)
 
std::ostream & operator<< (std::ostream &out, const CompleteArgumentInfo &info)
 
std::ostream & operator<< (std::ostream &out, const CompleteArgumentSpec &spec)
 
c10::optional< int8_t > convertOptional (c10::optional< c10::ScalarType > const &from)
 
void wrapDim (int64_t &dim, const std::vector< int64_t > &sizes)
 
bool needTrimGrad (Node *n)
 
bool isDifferentiable (const Node *n)
 
bool isDifferentiable (Graph &g)
 
static c10::optional< std::vector< Value * > > build_script_grad (Node *node, const ArrayRef< Value * > &grads)
 
static std::vector< Value * > linearGradientForNode (Node *node, ArrayRef< Value * > grad_values)
 
static ValuecreateAutogradAdd (Value *a, Value *b)
 
static ReverseDetails addReverseInline (Gradient &grad_desc)
 
static value_list getReverseCaptures (Gradient &grad_desc)
 
static void liftConstants (Block *block, Block *move_to_this_block)
 
static bool inBlock (Node *node, Block *container)
 
static void liftConstants (Node *node, Block *move_to_this_block)
 
static void foldSizeIfNotEqual (Block *node)
 
static void foldSizeIfNotEqual (Node *node)
 
static void deduplicateSizeCaptures (Gradient &grad_desc, ReverseDetails &rev_info)
 
static void eliminateDeadCode (ReverseDetails &rev_info)
 
static void Optimize (Gradient &grad_desc, ReverseDetails &rev_info)
 
static void lambdaLiftReverse (Gradient &grad_desc, ReverseDetails &rev_info)
 
void packReturnValuesIntoTuple (const std::shared_ptr< Graph > &graph)
 
Gradient differentiate (std::shared_ptr< Graph > &graph)
 
TORCH_API bool isZero (Value *v)
 
std::ostream & operator<< (std::ostream &out, const ExceptionMessage &msg)
 
void debugSetAutodiffSubgraphInlining (bool state)
 
bool getAutodiffSubgraphInlining ()
 
static std::atomic< bool > fusion_group_inlining (true)
 
void debugSetFusionGroupInlining (bool state)
 
bool getFusionGroupInlining ()
 
std::shared_ptr< GraphlastExecutedOptimizedGraph ()
 
TORCH_API bool IsNewExecutorEnabled ()
 
void runRequiredPasses (const std::shared_ptr< Graph > &g)
 
void packGradient (const Gradient &gradient, Node *dnode)
 
static bool mayIntroduceGradient (const Block *b)
 
bool needsGradient (const std::shared_ptr< const Graph > &graph)
 
void runNondiffOptimization (std::shared_ptr< Graph > &graph, bool strict_fuser_check)
 
void runOptimization (std::shared_ptr< Graph > &graph, bool unroll, bool const_prop_user_classes)
 
NodereplaceBlockWithFallbackGraph (Block *b, ArrayRef< Value * > inputs)
 
TORCH_API std::atomic< bool > & getProfilingMode ()
 
TORCH_API std::atomic< bool > & getExecutorMode ()
 
TORCH_API std::atomic< size_t > & getNumProfiledRuns ()
 
TORCH_API std::atomic< size_t > & getBailoutDepth ()
 
std::ostream & operator<< (std::ostream &out, OpCode op)
 
const char * OpInfo (OpCode op)
 
bool isOpSupportedInMobile (OpCode op)
 
TensorTypePtr tensorTypeInCurrentExecutionContext (const at::Tensor &t)
 
template<class Ttarget , class Tsource >
Ttarget safe_narrow_cast (Tsource v)
 
std::vector< StackEntrycurrentCallstack ()
 
std::ostream & operator<< (std::ostream &out, const Code &code)
 
bool aliasAnalysisHasSpecialCaseFor (Symbol symbol)
 
void registerOperator (Operator &&op)
 
void deregisterOperator (const FunctionSchema &schema)
 
const std::vector< std::shared_ptr< Operator > > getAllOperators ()
 
const std::vector< std::shared_ptr< Operator > > & getAllOperatorsFor (Symbol name)
 
std::shared_ptr< OperatorfindOperatorFor (const c10::OperatorName &full_name)
 
std::vector< SymbolfindSimilarOperators (Symbol input_op)
 
std::shared_ptr< OperatorgetOperatorForLiteral (const char *signature)
 
std::string canonicalSchemaString (const FunctionSchema &schema)
 
TORCH_API void ensure_c10_registerer_defined ()
 
template<typename Func >
c10::optional< OperatorOperatorGenerator (torch::detail::SelectiveStr< true > schema_str, Func &&op, AliasAnalysisKind alias_analysis)
 
template<typename Func >
c10::optional< OperatorOperatorGenerator (torch::detail::SelectiveStr< false > schema_str, Func &&op, AliasAnalysisKind alias_analysis)
 
std::atomic< PrintHandlerprint_handler ([](const std::string &str) { std::cout<< str;})
 
PrintHandler getPrintHandler ()
 
void setPrintHandler (PrintHandler ph)
 
static bool needsGradientInProfilingMode (Block *b)
 
bool guardDifferentiableGraph (Node *dnode)
 
void runNooptPassPipeline (std::shared_ptr< Graph > &graph)
 
void runPreAutodiffPassPipeline (std::shared_ptr< Graph > &graph)
 
void runDiffGraphPasses (std::shared_ptr< Graph > &graph)
 
void runNoGradOptimizations (std::shared_ptr< Graph > &graph)
 
NodeinsertFallbackFunctionCall (Graph *graph, Function *func, ArrayRef< Value * > inputs)
 
FunctioncreateFallbackPathFunction (Block *b, const std::string &function_name)
 
void RegisterProfilingNode (const std::function< bool(const Node *)> &func)
 
static void unprofileGraphInputs (const std::shared_ptr< Graph > &graph)
 
static void unprofileBlock (Block *start_block)
 
bool needsProfiledInputs (Node *n)
 
bool needsProfiledOutput (Node *n)
 
template<>
c10::impl::GenericList make_result_list< IValue > (const TypePtr &elemType)
 
template<>
void listIndex< at::Tensor > (Stack *stack)
 
template<>
void listCount< at::Tensor > (Stack *stack)
 
template<>
void listEq< at::Tensor > (Stack *stack)
 
template<>
void listNe< at::Tensor > (Stack *stack)
 
template<>
void listSort< at::Tensor > (Stack *stack)
 
template<>
void listCopyAndSort< at::Tensor > (Stack *stack)
 
template<>
void listRemove< at::Tensor > (Stack *stack)
 
void checkImplicitTensorToNum (const at::Tensor &t, bool toInt)
 
IValue tensorToListRecursive (char *data, int64_t cur_dim, int64_t num_tensor_dims, TypePtr ty, at::ScalarType scalar_ty, at::IntArrayRef sizes, at::IntArrayRef strides, size_t element_size)
 
void checkDoubleInRange (double a)
 
int64_t partProduct (int n, int m)
 
void loop (int n, int64_t &p, int64_t &r)
 
int nminussumofbits (int v)
 
int64_t factorial (int n)
 
double degrees (double x)
 
double radians (double x)
 
int64_t normalizeIndex (int64_t idx, int64_t list_size)
 
void listAppend (Stack *stack)
 
void listReverse (Stack *stack)
 
void listPopImpl (Stack *stack, const char *empty_message)
 
void listPop (Stack *stack)
 
void listClear (Stack *stack)
 
void listDelete (Stack *stack)
 
void listInsert (Stack *stack)
 
void listExtend (Stack *stack)
 
void listCopy (Stack *stack)
 
void listSelect (Stack *stack)
 
void listLen (Stack *stack)
 
void listList (Stack *stack)
 
void listAdd (Stack *stack)
 
void listInplaceAdd (Stack *stack)
 
void listMulIntLeftInPlace (Stack *stack)
 
void listMulIntLeft (Stack *stack)
 
void listMulIntRight (Stack *stack)
 
void listSlice (Stack *stack)
 
void listSetItem (Stack *stack)
 
c10::AliasAnalysisKind aliasAnalysisFromSchema ()
 
c10::AliasAnalysisKind aliasAnalysisConservative ()
 
c10::AliasAnalysisKind aliasAnalysisSpecialCase ()
 
template<class T >
c10::List< T > make_result_list (const TypePtr &elemType)
 
void noop (Stack *n)
 
double round_to_even (double a)
 
static int64_t floordiv (int64_t a, int64_t b)
 
static int64_t floor (double a)
 
static int64_t ceil (double a)
 
static int64_t gcd (int64_t a, int64_t b)
 
static at::Tensor to_dispatch (at::Tensor self, c10::optional< at::Device > device, c10::optional< at::ScalarType > scalarType, bool non_blocking, bool copy)
 
template<typename T >
getItem (const c10::List< T > &list, int64_t idx)
 
template<typename T >
void setItem (const c10::List< T > &list, int64_t idx, T &&value)
 
template<typename T >
void minList (Stack *stack)
 
template<typename T >
void maxList (Stack *stack)
 
template<typename T >
void listRemove (Stack *stack)
 
template<typename T >
void listMin (Stack *stack)
 
template<typename T >
void listMax (Stack *stack)
 
template<typename T >
void listIndex (Stack *stack)
 
template<typename T >
void listCount (Stack *stack)
 
template<typename T >
void listEq (Stack *stack)
 
template<typename T >
void listNe (Stack *stack)
 
bool tensor_list_equal (const c10::List< at::Tensor > &a, const c10::List< at::Tensor > &b)
 
template<typename T >
void listContains (Stack *stack)
 
template<typename T >
void listSort (Stack *stack)
 
template<typename T >
void listCopyAndSort (Stack *stack)
 
int64_t slice_indices_adjust (int64_t length, int64_t *start, int64_t *stop, int64_t step)
 
void createFusionGroups (Block *block, AliasDb *aliasDb)
 
void fuseStaticSubgraphs (std::shared_ptr< Graph > graph)
 
Operation createStaticSubgraphRuntime (const Node *node)
 
bool canHandle (Node *node)
 
bool canMerge (Node *consumer, Node *producer, AliasDb *aliasDb)
 
NodegetOrCreateStaticSubgraph (Node *n, AliasDb *aliasDb)
 
value_list sortReverseTopological (ArrayRef< Value * > inputs, Block *b)
 
static void debugDumpFusionGroup (const std::string &msg, Node *n)
 
c10::optional< Node * > tryMerge (Node *fusion_group, Node *to_merge, AliasDb *aliasDb)
 
std::pair< graph_node_list::iterator, bool > createFusionGroup (Node *fusion_node, AliasDb *aliasDb)
 
std::pair< graph_node_list::iterator, bool > scanNode (Node *n, AliasDb *aliasDb)
 
void PrepareGraphForStaticRuntime (std::shared_ptr< torch::jit::Graph > graph)
 
TORCH_API std::shared_ptr< InferenceModulePrepareForStaticRuntime (const torch::jit::Module &m, InferenceModuleOptions opts=InferenceModuleOptions())
 
TORCH_API std::shared_ptr< InferenceModulePrepareForStaticRuntime (std::shared_ptr< torch::jit::Graph > g, InferenceModuleOptions opts=InferenceModuleOptions())
 
void initStaticRuntimeBindings (PyObject *module)
 
 C10_DEFINE_REGISTRY (SROperatorRegistry, SROperatorFunctor)
 
 C10_DEFINE_REGISTRY (SRViewOperatorRegistry, SROperatorFunctor)
 
bool canRunOutOfPlace (Node *n)
 
bool canReuseInputsOutputs (Node *n)
 
bool isViewOp (Node *n)
 
bool canReuseInputs (Node *n)
 
bool canReuseOutputs (Node *n)
 
bool canRunNatively (Node *n)
 
 REGISTER_OPERATOR_FUNCTOR (aten::add, aten_add, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto &in1_t=p_node->Input(1).toTensor();auto in2_s=p_node->Input(2).toScalar();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::cpu::add_out(out_t, in0_t, in1_t, in2_s);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::mul, aten_mul, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto &in1_t=p_node->Input(1).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::mul_out(out_t, in0_t, in1_t);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::addmm, aten_addmm, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto &in1_t=p_node->Input(1).toTensor();auto &in2_t=p_node->Input(2).toTensor();auto in3_s=p_node->Input(3).toScalar();auto in4_s=p_node->Input(4).toScalar();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::addmm_cpu_out(out_t, in0_t, in1_t, in2_t, in3_s, in4_s);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::clamp, aten_clamp, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto in1_s=p_node->Input(1).toScalar();auto in2_s=p_node->Input(2).toScalar();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::clamp_out(out_t, in0_t, in1_s, in2_s);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::bmm, aten_bmm, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto &in1_t=p_node->Input(1).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::bmm_out_cpu(out_t, in0_t, in1_t);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::nan_to_num, aten_nan_to_num, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto input_size=p_node->inputs().size();auto &in0_t=p_node->Input(0).toTensor();double in1_d=input_size > 1 ? p_node->Input(1).toDouble() :0;double in2_d=input_size > 2 ? p_node->Input(2).toDouble() :std::numeric_limits< double >::infinity();double in3_d=input_size > 3 ? p_node->Input(3).toDouble() :-std::numeric_limits< double >::infinity();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::nan_to_num_out(out_t, in0_t, in1_d, in2_d, in3_d);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::cat, aten_cat, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto in0_tl=p_node->Input(0).toTensorVector();auto in1_i=p_node->Input(1).toInt();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_tl[0]);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::_cat_out_cpu(out_t, in0_tl, in1_i);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::tanh, aten_tanh, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::tanh_out(out_t, in0_t);};})
 
SROperator aten_stack (Node *n)
 
 REGISTER_OPERATOR_FUNCTOR (aten::stack, aten_stack, aten_stack)
 
 REGISTER_OPERATOR_FUNCTOR (aten::sigmoid, aten_sigmoid, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::sigmoid_out(out_t, in0_t);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::leaky_relu, aten_leaky_relu, [](Node *n) -> SROperator { auto in1=toIValue(n->inputs()[1]);if(in1) { auto in1_s=in1->toScalar();return[=](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();at::native::leaky_relu_out(out_t, in0_t, in1_s);};} else { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();auto in1_s=p_node->Input(1).toScalar();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();at::native::leaky_relu_out(out_t, in0_t, in1_s);};} })
 
 REGISTER_OPERATOR_FUNCTOR (aten::relu, aten_relu, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::threshold_out(out_t, in0_t, 0, 0);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::logit, aten_logit, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();double in1_d=p_node->inputs().size() > 1 ? p_node->Input(1).toDouble() :-1.0;if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);at::native::logit_out(out_t, in0_t, in1_d);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::clone, aten_clone, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &in0_t=p_node->Input(0).toTensor();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(in0_t);} auto &out_t=p_node->Output(0).toTensor();at::native::resize_as_(out_t, in0_t, c10::nullopt);at::native::copy_(out_t, in0_t, false);};})
 
 REGISTER_OPERATOR_FUNCTOR_OPT (quantized::embedding_bag_byte_rowwise_offsets, quantized_embedding_bag_byte_rowwise_offsets, false, true, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &weight=p_node->Input(0).toTensor();auto &indices=p_node->Input(1).toTensor();auto offsets=p_node->Input(2).toOptional< at::Tensor >();auto pruned_weights=p_node->Input(5).toBool();auto per_sample_weights=p_node->Input(6).toOptional< at::Tensor >();auto compressed_indices_mapping=p_node->Input(7).toOptional< at::Tensor >();auto include_last_offset=p_node->Input(8).toBool();if(p_node->Output(0).isNone()) { p_node->Output(0)=at::empty({0}, weight.options().dtype(at::kFloat));} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);return at::native::embedding_bag_byte_rowwise_offsets_out(out_t, weight, indices, offsets, false, 0, pruned_weights, per_sample_weights, compressed_indices_mapping, include_last_offset);};})
 
 REGISTER_OPERATOR_FUNCTOR_OPT (quantized::embedding_bag_4bit_rowwise_offsets, embedding_bag_4bit_rowwise_offsets, false, true, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &weight=p_node->Input(0).toTensor();auto &indices=p_node->Input(1).toTensor();auto offsets=p_node->Input(2).toOptional< at::Tensor >();auto pruned_weights=p_node->Input(5).toBool();auto per_sample_weights=p_node->Input(6).toOptional< at::Tensor >();auto compressed_indices_mapping=p_node->Input(7).toOptional< at::Tensor >();auto include_last_offset=p_node->Input(8).toBool();if(p_node->Output(0).isNone()) { p_node->Output(0)=at::empty({0}, weight.options().dtype(at::kFloat));} auto &out_t=p_node->Output(0).toTensor();fastResizeToZero(out_t);return at::native::embedding_bag_byte_rowwise_offsets_out(out_t, weight, indices, offsets, false, 0, pruned_weights, per_sample_weights, compressed_indices_mapping, include_last_offset);};})
 
 REGISTER_OPERATOR_FUNCTOR (aten::narrow, aten_narrow, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &self=p_node->Input(0).toTensor();auto dim=p_node->Input(1).toInt();int64_t start=0;if(p_node->Input(2).isScalar()) { start=p_node->Input(2).toInt();} else { auto &t=p_node->Input(2).toTensor();start=t.item< int64_t >();} auto length=p_node->Input(3).toInt();if(p_node->Output(0).isNone()) { p_node->Output(0)=create_empty_from(self);} auto &output=p_node->Output(0).toTensor();fastResizeToZero(output);at::native::narrow_copy_dense_cpu_out(self, dim, start, length, output);};})
 
 REGISTER_VIEW_OPERATOR_FUNCTOR (aten::reshape, aten_reshape, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { auto &self=p_node->Input(0).toTensor();auto proposed_shape=p_node->Input(1).toIntVector();if(p_node->Output(0).isNone()) { p_node->Output(0)=at::Tensor();} auto &out=p_node->Output(0).toTensor();at::native::reshape_out(out, self, proposed_shape, true);};})
 
 REGISTER_VIEW_OPERATOR_FUNCTOR (aten::flatten, aten_flatten, [](Node *n) -> SROperator { return[](ProcessedNode *p_node) { DCHECK(p_node->inputs().size()==3);auto &self=p_node->Input(0).toTensor();auto start_dim=p_node->Input(1).toInt();auto end_dim=p_node->Input(2).toInt();if(p_node->Output(0).isNone()) { p_node->Output(0)=at::Tensor();} auto &out=p_node->Output(0).toTensor();at::native::flatten_out(out, self, start_dim, end_dim);};})
 
std::function< void(ProcessedNode *)> getOutOfPlaceOperation (Node *n)
 
std::function< void(ProcessedNode *)> getNativeOperation (Node *n)
 
 C10_DECLARE_REGISTRY (SROperatorRegistry, SROperatorFunctor)
 
 C10_DECLARE_REGISTRY (SRViewOperatorRegistry, SROperatorFunctor)
 
at::Tensor create_empty_from (const at::Tensor &t)
 
bool checkResizedDataPtr (at::Tensor &t)
 
void fastResizeToZero (at::Tensor &t)
 
void ConcatAddMulReplaceNaNClip (std::shared_ptr< torch::jit::Graph > &graph)
 
void CastedBatchOneHotLengths (std::shared_ptr< torch::jit::Graph > &graph)
 
void ConcatBatchMatMulBatchGather (std::shared_ptr< torch::jit::Graph > &graph)
 
void ClipRangesGatherRangesLengthsToOffsets (std::shared_ptr< torch::jit::Graph > &graph)
 
void ClipRangesGatherSigridHash (std::shared_ptr< torch::jit::Graph > &graph)
 
void ClipRangesGatherRangesSigridHash (std::shared_ptr< torch::jit::Graph > &graph)
 
void FuseInferenceOpsForSparseNN (std::shared_ptr< torch::jit::Graph > &graph)
 
std::pair< std::shared_ptr< Graph >, Value * > extractClosure (Value *closure)
 
Argument originalReturnType (const TupleTypePtr &tup)
 
std::string overloadedSchemaString (const FunctionSchema &schema)
 
bool isHelperFunction (const std::string &method_name)
 
void loadModule (const CompilationUnit &module)
 
void loadFunctions ()
 
c10::optional< GradientPairgradientInfoForSchema (const FunctionSchema &schema)
 
bool hasGradientInfoForSchema (const FunctionSchema &schema)
 
void tupleUnpack (Stack &stack)
 
void format (Stack &stack, size_t num_inputs)
 
void percentFormat (Stack &stack, size_t num_inputs)
 
void listUnpack (Stack &stack, size_t num_outputs)
 
void tupleConstruct (Stack &stack, size_t num_inputs)
 
void namedTupleConstruct (Stack &stack, at::TupleTypePtr type, size_t num_inputs)
 
void listConstruct (Stack &stack, const at::ListType &type, size_t num_inputs)
 
void dictConstruct (Stack &stack, const at::DictTypePtr &type, size_t num_inputs)
 
void createObject (Stack &stack, const at::ClassTypePtr &type)
 
void isinstance (Stack &stack, at::ArrayRef< at::TypePtr > types)
 
void tupleSlice (Stack &stack, size_t begin, size_t end)
 
void dequantize (Stack &stack)
 
void writeArchiveAndTensors (const std::string &archive_name, const char *data, size_t size, const std::vector< at::Tensor > &tensors, caffe2::serialize::PyTorchStreamWriter &out)
 
std::string pretty_print_onnx (const std::shared_ptr< Graph > &graph, const std::map< std::string, at::Tensor > &initializers, int64_t onnx_opset_version, bool defer_weight_export, ::torch::onnx::OperatorExportTypes operator_export_type, bool google_printer, bool keep_initializers_as_inputs, const std::map< std::string, int > &custom_opsets, bool add_node_names)
 
std::tuple< std::shared_ptr<::ONNX_NAMESPACE::ModelProto >, RawDataExportMap, SymbolDimMapexport_onnx (const std::shared_ptr< Graph > &graph, const std::map< std::string, at::Tensor > &initializers, int64_t onnx_opset_version, const std::unordered_map< std::string, std::unordered_map< std::int64_t, std::string > > &dynamic_axes, bool defer_weight_export, ::torch::onnx::OperatorExportTypes operator_export_type, bool strip_doc_string, bool keep_initializers_as_inputs, const std::map< std::string, int > &custom_opsets, bool add_node_names, bool use_external_data_format, const std::string &onnx_file_path)
 
std::string serialize_model_proto_to_string (const std::shared_ptr<::ONNX_NAMESPACE::ModelProto > &model_proto)
 
void check_onnx_proto (const std::string &proto_string)
 
TORCH_API std::tuple< std::shared_ptr<::ONNX_NAMESPACE::ModelProto >, RawDataExportMap, SymbolDimMapexport_onnx (const std::shared_ptr< Graph > &graph, const std::map< std::string, at::Tensor > &initializers, int64_t onnx_opset_version, const std::unordered_map< std::string, std::unordered_map< int64_t, std::string > > &dynamic_axes, bool defer_weight_export=false, ::torch::onnx::OperatorExportTypes operator_export_type=::torch::onnx::OperatorExportTypes::ONNX, bool strip_doc_string=true, bool keep_initializers_as_inputs=true, const std::map< std::string, int > &custom_opsets={}, bool add_node_names=true, bool use_external_data_format=false, const std::string &onnx_file_path=std::string())
 
TORCH_API void ExportModule (const Module &module, std::ostream &out, const ExtraFilesMap &metadata=ExtraFilesMap(), bool bytecode_format=false, bool save_mobile_debug_info=false)
 
TORCH_API void ExportModule (const Module &module, const std::string &filename, const ExtraFilesMap &metadata=ExtraFilesMap(), bool bytecode_format=false, bool save_mobile_debug_info=false)
 
TORCH_API void ExportModule (const Module &module, const std::function< size_t(const void *, size_t)> &writer_func, const ExtraFilesMap &metadata=ExtraFilesMap(), bool bytecode_format=false, bool save_mobile_debug_info=false)
 
TORCH_API void SetExportModuleExtraFilesHook (ExportModuleExtraFilesHook hook)
 
TORCH_API void SetExportModuleMobileInfoConverter (ExportModuleMobileInfoConverter converter)
 
TORCH_API std::vector< std::string > export_opnames (const Module &m)
 
void moduleMethodsTuple (const Module &module, std::vector< c10::IValue > &elements, c10::optional< std::vector< c10::IValue > > &debug_info_elements, bool save_mobile_debug_info)
 
void postSetStateValidate (const IValue &v)
 
IValue readArchiveAndTensors (const std::string &archive_name, c10::optional< TypeResolver > type_resolver, c10::optional< ObjLoader > obj_loader, c10::optional< at::Device > device, PyTorchStreamReader &stream_reader)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, std::istream &in, c10::optional< at::Device > device)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, std::istream &in, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, const std::string &filename, c10::optional< at::Device > device)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, const std::string &filename, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, std::unique_ptr< ReadAdapterInterface > rai, c10::optional< at::Device > device)
 
Module import_ir_module (std::shared_ptr< CompilationUnit > cu, std::unique_ptr< ReadAdapterInterface > rai, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
Module load (std::istream &in, c10::optional< c10::Device > device=c10::nullopt)
 Loads a serialized Module from the given istream. More...
 
Module load (std::istream &in, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
Module load (const std::string &filename, c10::optional< c10::Device > device=c10::nullopt)
 Loads a serialized Module from the given filename. More...
 
Module load (const std::string &filename, c10::optional< at::Device > device, ExtraFilesMap &extra_files)
 
Module load (std::shared_ptr< caffe2::serialize::ReadAdapterInterface > rai, c10::optional< c10::Device > device=c10::nullopt)
 Loads a serialized Module from the given shared_ptr rai. More...
 
Module load (std::shared_ptr< ReadAdapterInterface > rai, c10::optional< c10::Device > device, ExtraFilesMap &extra_files)
 
void moduleMethodsTuple (const Module &module, std::vector< c10::IValue > &elements)
 
std::string qualifierToArchivePath (const std::string &qualifier, const std::string &export_prefix)
 
std::shared_ptr< SourcefindSourceInArchiveFromQualifier (caffe2::serialize::PyTorchStreamReader &reader, const std::string &export_prefix, const std::string &qualifier)
 
Module LEGACY_deserialize (std::shared_ptr< CompilationUnit > cu, std::shared_ptr< caffe2::serialize::PyTorchStreamReader > reader, const c10::optional< c10::Device > &device)
 
std::string prettyPrint (const ::ONNX_NAMESPACE::ModelProto &model)
 
void pickle (std::function< void(const char *data_start, size_t data_len)> writer, const IValue &ivalue, std::vector< at::Tensor > *tensor_table=nullptr)
 Pickle an IValue by calling a function to handle writing the data. More...
 
std::vector< char > pickle (const IValue &ivalue, std::vector< at::Tensor > *tensor_table=nullptr)
 Save a torch::IValue in a format compatible with Python's pickle module. More...
 
std::vector< char > pickle_save (const IValue &ivalue)
 Save a torch::IValue in a format that can be loaded by both torch::pickle_load in C++ and torch.load in Python. More...
 
IValue pickle_load (const std::vector< char > &data)
 Deserialize a torch::IValue from bytes produced by either torch::pickle_save in C++ or torch.save in Python. More...
 
IValue unpickle (std::function< size_t(char *, size_t)> reader, TypeResolver type_resolver, const std::vector< at::Tensor > *tensor_table)
 reader is a function that takes in a size to read from some pickled binary. More...
 
IValue unpickle (const char *data, size_t size, TypeResolver type_resolver=nullptr, const std::vector< at::Tensor > *tensor_table=nullptr)
 Decode a chunk of memory containing pickled data into its torch::IValues. More...
 
static double swapDouble (double value)
 
WriteableTensorData getWriteableTensorData (const at::Tensor &tensor, bool to_cpu)
 
bool checkHasValidSetGetState (const std::shared_ptr< c10::ClassType > &cls)
 
void setTypeTags (bool state)
 
bool getTypeTags ()
 
uint64_t getStorageKey (const at::Tensor &tensor)
 
static bool isValidIdentifierChar (char c, size_t pos)
 
static bool isValidIdentifier (const std::string &name)
 
TORCH_API bool printerHasSpecialCaseFor (c10::Symbol sym)
 
static void restoreAccurateTypeTagsIfPossible (const IValue &root)
 
void restoreAccurateTypeTags (const IValue &root, const TypePtr &type_tag)
 
void restoreContainerTypeTags (IValue &ivalue, const TypePtr &type)
 
template<typename T >
void append (std::vector< T > &a, T &&e)
 
template<>
void append< bool > (std::vector< bool > &a, bool &&e)
 
static std::vector< int64_t > tupleToIntList (const IValue &v)
 
template<typename T >
static std::vector< T > convertList (const IValue &v)
 
bool is_valid_python_id_char (char c)
 
void initTensorExprBindings (PyObject *module)
 
def export_opnames (m)
 
def annotate (the_type, the_value)
 
def script_if_tracing (fn)
 
def isinstance (obj, target_type)
 

Variables

const char * schema_declarations
 
thread_local bool inline_everything = false
 
constexpr int kCPUDevice = -1
 
auto scalar_operators_source
 
auto _ntuple_ops
 
auto floordiv
 
auto tensor_properties
 
auto aten_ops
 
const auto aten_ops_additional
 
auto _test_serialization_subcmul
 
auto div_tensor
 
auto div_tensor_scalar
 
auto div_scalar_scalar
 
auto div_tensor_out
 
auto div__tensor
 
auto div__scalar
 
auto full
 
auto full_out
 
thread_local std::vector< Callcalls
 
static const std::unordered_map< int, int > binary_prec
 
static const std::unordered_map< int, int > unary_prec
 
static const char * valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~"
 
static const TreeList empty_trees = {}
 
constexpr int max_tensor_display_size = 10
 
static constexpr int kNextDirection = 0
 
static constexpr int kPrevDirection = 1
 
static constexpr topo_position_t kLowerBound = INT64_MIN
 
static constexpr topo_position_t kUpperBound = INT64_MAX
 
static constexpr topo_position_t kMidPoint = 0
 
static constexpr topo_position_t kAppendInterval = 1099511627776ULL
 
static constexpr size_t min_fusion_size = 4
 
RegisterOperators mm_tree_reduction_reg ({Operator("prim::MMTreeReduce(...) -> Tensor", [](Stack *stack) { auto num_inputs=pop(stack).toInt();std::vector< at::Tensor > inputs;inputs.reserve(num_inputs);for(auto it=stack->end() - num_inputs;it !=stack->end();++it) { inputs.push_back(std::move(*it).toTensor());} drop(stack, num_inputs);AT_ASSERT(inputs.size() > 0);AT_ASSERT(inputs.size() % 2==0);size_t side_num_elems=inputs.size()/2;auto lhs_inputs=at::TensorList(inputs).slice(0, side_num_elems);auto rhs_inputs=at::TensorList(inputs).slice(side_num_elems);if(have_same_shape(lhs_inputs) &&have_same_shape(rhs_inputs) &&shape_is_fast_for_reduce(lhs_inputs[0], rhs_inputs[0])) { bool lhs_input_transposed=should_be_transposed(lhs_inputs);bool rhs_input_transposed=should_be_transposed(rhs_inputs);at::Tensor lhs, rhs;if(lhs_input_transposed) { std::vector< at::Tensor > lhs_contig_inputs=transpose_inputs(lhs_inputs);lhs=at::cat(lhs_contig_inputs, 0);lhs=lhs.t();} else { lhs=at::cat(lhs_inputs, 1);} if(rhs_input_transposed) { std::vector< at::Tensor > rhs_contig_inputs=transpose_inputs(rhs_inputs);rhs=at::cat(rhs_contig_inputs, 1);rhs=rhs.t();} else { rhs=at::cat(rhs_inputs, 0);} push(stack, at::mm(lhs, rhs));} else { auto acc=at::mm(inputs[0], inputs[side_num_elems]);for(size_t i=1;i< side_num_elems;++i) { acc.add_(at::mm(inputs[i], inputs[side_num_elems+i]));} push(stack, std::move(acc));} }, aliasAnalysisIsSpecialCase())})
 
RegisterOperators mm_batch_side_reg ({Operator(prim::MMBatchSide, [](const Node *node) -> Operation { size_t num_other_side_inputs=node->inputs().size() - 1;Side single_side=static_cast< Side >(node->i(Symbol::attr("side")));return[num_other_side_inputs, single_side](Stack *stack) { at::Tensor side_input;std::vector< at::Tensor > other_side_inputs;other_side_inputs.reserve(num_other_side_inputs);for(auto it=stack->end() - num_other_side_inputs;it !=stack->end();++it) { other_side_inputs.push_back(std::move(*it).toTensor());} drop(stack, num_other_side_inputs);pop(stack, side_input);auto any_other_input=other_side_inputs[0];if(have_same_shape(other_side_inputs) &&shape_is_fast_for_side(other_side_inputs[0])) { auto other_side_input=at::cat(other_side_inputs, single_side==Side::LHS ? 1 :0);auto mm_out=single_side==Side::LHS ? side_input.mm(other_side_input) :other_side_input.mm(side_input);auto outputs=at::chunk(mm_out, num_other_side_inputs, single_side==Side::LHS ? 1 :0);stack->insert(stack->end(), std::make_move_iterator(outputs.begin()), std::make_move_iterator(outputs.end()));} else { if(single_side==Side::LHS) { for(at::Tensor &other :other_side_inputs) { stack->emplace_back(side_input.mm(other));} } else { for(at::Tensor &other :other_side_inputs) { stack->emplace_back(other.mm(side_input));} } } };}, aliasAnalysisIsSpecialCase())})
 
RegisterOperators reg_ops ({Operator("aten::_ncf_unsqueeze(Tensor(a) self, int ndim) -> Tensor(a)", [](Stack *stack) { const int64_t ndim=pop(stack).toInt();auto self=pop(stack).toTensor();c10::SmallVector< int64_t, 8 > sizes(ndim, 1);AT_ASSERT(self.dim()==1);sizes.at(1)=self.size(0);push(stack, self.reshape(sizes));}, aliasAnalysisFromSchema()), Operator("aten::_ncf_view(Tensor(a) self, int[] input_shape, int normalized_ndim) -> Tensor(a)", [](Stack *stack) { const int64_t normalized_ndim=pop(stack).toInt();auto input_shape=pop(stack).toIntList();auto self=pop(stack).toTensor();const int64_t input_ndim=input_shape.size();c10::SmallVector< int64_t, 8 > sizes(input_ndim, 1);for(int i=0;i< input_ndim - normalized_ndim;++i) { sizes.at(i)=input_shape.get(i);} push(stack, self.reshape(sizes));}, aliasAnalysisFromSchema())})
 
const int ONNX_OPSET_9 = 9
 
const int ONNX_OPSET_10 = 10
 
const int ONNX_OPSET_11 = 11
 
const int ONNX_OPSET_12 = 12
 
const int ONNX_OPSET_13 = 13
 
static const int OPSET_VERSION_1 = 1
 
static const int OPSET_VERSION_9 = 9
 
static const int OPSET_VERSION_10 = 10
 
static const int OPSET_VERSION_11 = 11
 
static const int OPSET_VERSION_12 = 12
 
static const int OPSET_VERSION_13 = 13
 
static GraphPassNameType graphPassID = 1
 
std::vector< std::string > _static_quantizable_call_funcs
 
std::vector< std::string > _static_quantizable_aten_funcs
 
std::vector< std::string > _dynamic_quantizable_call_funcs
 
std::vector< std::string > _dynamic_quantizable_aten_funcs
 
std::vector< std::string > _static_weight_only_quant_aten_funcs
 
std::vector< std::string > _static_weight_only_quant_call_funcs
 
std::vector< std::string > _single_input_general_shape_call_funcs
 
std::vector< std::string > _single_input_general_shape_aten_funcs
 
std::vector< std::string > _single_input_general_value_call_funcs
 
std::vector< std::string > _single_input_general_value_aten_funcs
 
std::vector< std::string > _clamp_funcs
 
const float _asym_scale = 1.0f / 256.0f
 
const int _asym_zero_point = 0
 
const float _sym_scale = 2.0f / 256.0f
 
const int _sym_zero_point = 128
 
std::tuple< c10::QScheme, QParamVector_per_tensor_asym_qparam
 
std::tuple< c10::QScheme, QParamVector_per_tensor_sym_qparam
 
std::unordered_map< NodeKind, std::tuple< c10::QScheme, QParamVector > > _fixed_qparams_map
 
AtenFuncArgs _observe_inputs_aten_func = {}
 
CallFuncArgs _observe_inputs_call_func = {{"batch_norm", 1}}
 
std::vector< std::string > _tensor_info_funcs = {"size", "len", "dim", "numel"}
 
std::vector< std::string > _propagate_quant_single_input_ops = {"cat"}
 
std::vector< std::string > _propagate_quant_binary_ops
 
static const auto countsAttribute = Symbol::attr("none_counts")
 
static bool texpr_reductions_enabled = false
 
static bool texpr_fuser_enabled_ = true
 
RegisterOperators TensorExprOps ({ torch::jit::Operator(prim::TensorExprGroup, createTensorExprOp, AliasAnalysisKind::INTERNAL_SPECIAL_CASE), })
 
static constexpr std::array< const char *, 47 > magic_method_names
 
thread_local bool kOptimize = true
 
thread_local bool autodiff_subgraph_inlining = true
 
thread_local std::weak_ptr< Graphlast_executed_optimized_graph
 
RegisterOperators reg_graph_executor_ops ({Operator(prim::DifferentiableGraph, [](const Node *n) -> Operation { return DifferentiableGraphOp(getGradient(n));}, aliasAnalysisInternalSpecialCase())})
 
const size_t autodiffSubgraphNodeThreshold = 2
 
const size_t autodiffSubgraphInlineThreshold = 5
 
static constexpr size_t instruction_size = 8
 
static constexpr char * strOpCode []
 
thread_local InterpreterStateImpltls_int_state_ptr_ = nullptr
 
static std::atomic< bool > executor_mode {true}
 
static std::atomic< bool > profiling_mode {true}
 
static std::atomic< size_t > num_profiled_runs {kDefaultNumProfiledRuns}
 
static std::atomic< size_t > bailout_depth {kDefaultBailoutDepth}
 
static const double degToRad = std::acos(-1.0) / 180.0
 
static const double radToDeg = 180.0 / std::acos(-1.0)
 
RegisterOperators StaticSubgraphOps ({torch::jit::Operator(prim::StaticSubgraph, createStaticSubgraphRuntime, AliasAnalysisKind::INTERNAL_SPECIAL_CASE)})
 
constexpr size_t BYTECODE_INDEX_INSTRUCTION = 0
 
constexpr size_t BYTECODE_INDEX_OPERATOR = 1
 
constexpr size_t BYTECODE_INDEX_CONSTANT = 2
 
constexpr size_t BYTECODE_INDEX_TYPE = 3
 
constexpr size_t BYTECODE_INDEX_REGISTER_SIZE = 4
 
constexpr size_t BYTECODE_INDEX_SCHEMA_ARGUMENTS = 0
 
constexpr size_t BYTECODE_INDEX_SCHEMA_RETURNS = 1
 
constexpr size_t BYTECODE_INDEX_ARGUMENT_NAME = 0
 
constexpr size_t BYTECODE_INDEX_ARGUMENT_TYPE = 1
 
constexpr size_t BYTECODE_INDEX_ARGUMENT_DEFAULT_VALUE = 2
 
constexpr size_t BYTECODE_INDEX_MODULE_DEBUG_INFO = 0
 
static const std::string kExportSuffix = "py"
 
constexpr static uint8_t PROTOCOL_VERSION = 2
 
static const std::unordered_set< std::string > reserved_names
 
 _fork = fork
 
 _wait = wait
 
 Error = torch._C.JITException
 
 __name__
 
 __qualname__
 

Typedef Documentation

◆ AliasAnalysisKind

Definition at line 8 of file operator_options.h.

◆ ArrayRef

template<typename T >
using torch::jit::ArrayRef = typedef at::ArrayRef<T>

Definition at line 146 of file ir.h.

◆ AtenFuncArgs

using torch::jit::AtenFuncArgs = typedef std::vector<FuncArg>

Definition at line 15 of file helper.cpp.

◆ attribute_list

Definition at line 78 of file module.h.

◆ AttributeMap

using torch::jit::AttributeMap = typedef std::unordered_map<std::string, Const>

Definition at line 43 of file ir_emitter.cpp.

◆ buffer_list

Definition at line 82 of file module.h.

◆ CallFuncArgs

using torch::jit::CallFuncArgs = typedef std::vector<FuncArg>

Definition at line 16 of file helper.cpp.

◆ ClassMethodDefaults

using torch::jit::ClassMethodDefaults = typedef std::unordered_map<std::string, FunctionDefaults>

Definition at line 56 of file script_init.cpp.

◆ const_graph_node_list

Definition at line 52 of file graph_node_list.h.

◆ const_graph_node_list_iterator

◆ Dimension

using torch::jit::Dimension = typedef int64_t

Definition at line 83 of file profiling_record.h.

◆ ExportModuleExtraFilesHook

using torch::jit::ExportModuleExtraFilesHook = typedef std::function<ExtraFilesMap(const Module&)>

Definition at line 101 of file export.h.

◆ ExportModuleMobileInfoConverter

using torch::jit::ExportModuleMobileInfoConverter = typedef std::function<c10::Dict<std::string, std::string>( const Module&, const std::unordered_map<std::string, std::string>&)>

Definition at line 104 of file export.h.

◆ ExtraFilesMap

typedef std::unordered_map< std::string, std::string > torch::jit::ExtraFilesMap

Definition at line 42 of file module.h.

◆ FloatAttr

Definition at line 92 of file attributes.h.

◆ FloatsAttr

Definition at line 93 of file attributes.h.

◆ FunctionDefaults

using torch::jit::FunctionDefaults = typedef std::unordered_map<std::string, py::object>

Definition at line 55 of file script_init.cpp.

◆ FunctionTable

using torch::jit::FunctionTable = typedef std::unordered_map<std::string, Function&>

Definition at line 40 of file ir_emitter.cpp.

◆ graph_node_list

Definition at line 51 of file graph_node_list.h.

◆ graph_node_list_iterator

◆ GraphPass

using torch::jit::GraphPass = typedef std::function<void(std::shared_ptr<Graph>&)>

Definition at line 24 of file pass_manager.h.

◆ GraphPassEntry

Definition at line 33 of file pass_manager.h.

◆ GraphPassNameType

using torch::jit::GraphPassNameType = typedef unsigned int

Definition at line 28 of file pass_manager.h.

◆ InferredType

Definition at line 272 of file pybind_utils.h.

◆ InlinedCallStackEntry

Definition at line 110 of file scope.h.

◆ InlinedCallStackPtr

InlinedCallStack is an element in a list representing callstack of functions that have been inlined.

Each such element holds info about the current callsite (Function and SourceRange) and a pointer to the next element in the list. The last element in the list represents the innermost function that was inlined.

For instance, if a node has a callstack [foo, source_range1] -> [bar, source_range2] it means that this node was originally from function 'bar' that was called at 'source_range2' in function 'foo' that was called in the current function at 'source_range1'.

If a node did not come from any inlined function, its callstack will be empty.

The callstack lists only grow, we never remove elements from them, which allows us to reuse same elements in different lists. For instance, if we inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham' and 'baz', the callstacks would look like:

[baz, source_range3] – \ --> [foo, source_range1] -> [bar, source_range2] / [ham, source_range4] –

Definition at line 109 of file scope.h.

◆ IntAttr

Definition at line 94 of file attributes.h.

◆ IntsAttr

Definition at line 95 of file attributes.h.

◆ IValueAttr

◆ JitOp

Definition at line 18 of file parser.cpp.

◆ JitValue

Definition at line 17 of file parser.cpp.

◆ Kwargs

using torch::jit::Kwargs = typedef std::unordered_map<std::string, at::IValue>

Definition at line 22 of file function.h.

◆ ListAttributeMap

using torch::jit::ListAttributeMap = typedef std::unordered_map<std::string, std::vector<Const> >

Definition at line 44 of file ir_emitter.cpp.

◆ MatchFilter

using torch::jit::MatchFilter = typedef std::function< bool(const Match&, const std::unordered_map<std::string, Value*>&)>

Definition at line 26 of file subgraph_rewrite.h.

◆ module_list

Definition at line 70 of file module.h.

◆ ModuleLookup

using torch::jit::ModuleLookup = typedef std::function<Module(const std::vector<std::string>&)>

Definition at line 86 of file module.h.

◆ ModuleMethodVector

using torch::jit::ModuleMethodVector = typedef std::vector<std::pair<Module, std::string> >

Definition at line 17 of file helper.h.

◆ ModulePtr

◆ ModuleQConfigMap

using torch::jit::ModuleQConfigMap = typedef std::unordered_map<ModulePtr, c10::optional<QConfig> >

Definition at line 20 of file insert_observers.cpp.

◆ named_attribute_list

◆ named_buffer_list

◆ named_module_list

◆ named_parameter_list

◆ NameModule

Definition at line 57 of file module.h.

◆ NameTensor

Definition at line 59 of file module.h.

◆ NameValue

Definition at line 58 of file module.h.

◆ node_list

using torch::jit::node_list = typedef std::vector<Node*>

Definition at line 142 of file ir.h.

◆ node_set

using torch::jit::node_set = typedef std::set<const Node*>

Definition at line 372 of file ir.cpp.

◆ NodeKind

using torch::jit::NodeKind = typedef Symbol

Definition at line 147 of file ir.h.

◆ ObjectPtr

◆ ObjLoader

Definition at line 14 of file unpickler.h.

◆ Operation

using torch::jit::Operation = typedef std::function<void(Stack*)>

Definition at line 12 of file stack.h.

◆ OperationCreator

using torch::jit::OperationCreator = typedef Operation (*)(const Node*)

Definition at line 34 of file operator.h.

◆ parameter_list

Definition at line 74 of file module.h.

◆ ParamMap

using torch::jit::ParamMap = typedef std::map<std::string, IValue>

Definition at line 20 of file helper.h.

◆ PrePackingOpsFilterFn

using torch::jit::PrePackingOpsFilterFn = typedef std::function<bool(Node*)>

Definition at line 9 of file prepack_folding.h.

◆ PrintHandler

using torch::jit::PrintHandler = typedef void (*)(const std::string&)

Definition at line 12 of file print_handler.h.

◆ QConfig

using torch::jit::QConfig = typedef std::tuple<Module, Module>

Definition at line 20 of file insert_observers.h.

◆ QConfigDict

using torch::jit::QConfigDict = typedef std::unordered_map<std::string, c10::optional<QConfig> >

Definition at line 21 of file insert_observers.h.

◆ QParamVector

using torch::jit::QParamVector = typedef std::vector<std::pair<std::string, IValue> >

Definition at line 21 of file helper.h.

◆ RawDataExportMap

using torch::jit::RawDataExportMap = typedef std::unordered_map<std::string, at::Tensor>

Definition at line 26 of file export.h.

◆ RegisterPass

Definition at line 57 of file pass_manager.h.

◆ ResolutionCallback

using torch::jit::ResolutionCallback = typedef std::function<py::object(std::string)>

Definition at line 54 of file script_init.cpp.

◆ ResolverPtr

typedef std::shared_ptr< Resolver > torch::jit::ResolverPtr

Definition at line 35 of file compilation_unit.h.

◆ ScopePtr

Definition at line 21 of file scope.h.

◆ SourceLoader

using torch::jit::SourceLoader = typedef std::function<std::shared_ptr<Source>(const std::string&)>

Definition at line 19 of file import_source.h.

◆ SourceRangeRecords

using torch::jit::SourceRangeRecords = typedef std::vector<TaggedRange>

Definition at line 203 of file source_range.h.

◆ SparseBitVector

Definition at line 16 of file liveness.h.

◆ SROperator

using torch::jit::SROperator = typedef std::function<void(ProcessedNode*)>

Definition at line 9 of file ops.h.

◆ SROpFunctor

using torch::jit::SROpFunctor = typedef SROperator (*)(Node* n)

Definition at line 10 of file ops.h.

◆ Stack

typedef std::vector< c10::IValue > torch::jit::Stack

Definition at line 21 of file function.h.

◆ StringAttr

Definition at line 96 of file attributes.h.

◆ StringsAttr

Definition at line 97 of file attributes.h.

◆ SugaredValuePtr

using torch::jit::SugaredValuePtr = typedef std::shared_ptr<SugaredValue>

Definition at line 17 of file sugared_value.h.

◆ SymbolDimMap

using torch::jit::SymbolDimMap = typedef std::map<c10::ShapeSymbol, std::string>

Definition at line 28 of file export.h.

◆ TaskLauncher

typedef std::function< void(std::function< void()>)> torch::jit::TaskLauncher

Definition at line 24 of file function.h.

◆ Tensor

using torch::jit::Tensor = typedef at::Tensor

Definition at line 16 of file frozen_conv_folding.cpp.

◆ tensor_type_converter_t

◆ TensorAttr

◆ TensorsAttr

◆ TokenTrieRef

using torch::jit::TokenTrieRef = typedef std::unique_ptr<TokenTrie>

Definition at line 138 of file lexer.h.

◆ topo_position_t

using torch::jit::topo_position_t = typedef int64_t

Definition at line 148 of file ir.h.

◆ TreeList

Definition at line 30 of file tree.h.

◆ TreeRef

Definition at line 29 of file tree.h.

◆ TypeAttr

◆ TypeEnvironment

Definition at line 27 of file convert_to_ssa.cpp.

◆ TypePtr

Definition at line 11 of file schema_type_parser.h.

◆ TypeResolver

using torch::jit::TypeResolver = typedef std::function<c10::StrongTypePtr(const c10::QualifiedName&)>

Definition at line 11 of file unpickler.h.

◆ TypesAttr

◆ TypeTable

using torch::jit::TypeTable = typedef std::unordered_map<std::string, TypePtr>

Definition at line 42 of file ir_emitter.cpp.

◆ use_list

using torch::jit::use_list = typedef std::vector<Use>

Definition at line 144 of file ir.h.

◆ value_list

typedef std::vector< Value * > torch::jit::value_list

Definition at line 143 of file ir.h.

◆ value_map

using torch::jit::value_map = typedef std::unordered_map<Value*, Value*>

Definition at line 21 of file autodiff.cpp.

◆ value_set

using torch::jit::value_set = typedef std::unordered_set<Value*>

Definition at line 22 of file autodiff.cpp.

◆ ValueEnvironment

Definition at line 26 of file convert_to_ssa.cpp.

◆ ValueSet

using torch::jit::ValueSet = typedef std::unordered_set<const Value*>

Definition at line 149 of file ir.h.

◆ ValueTable

using torch::jit::ValueTable = typedef std::unordered_map<std::string, SugaredValuePtr>

Definition at line 41 of file ir_emitter.cpp.

◆ ValueToParamPairMap

using torch::jit::ValueToParamPairMap = typedef std::map<Value*, std::pair<std::string, IValue> >

Definition at line 18 of file helper.h.

Enumeration Type Documentation

◆ AttributeKind

enum class torch::jit::AttributeKind
strong
Enumerator
fs 
is 
ss 
ts 
gs 
ty 
tys 
ival 

Definition at line 22 of file attributes.h.

◆ DCESideEffectPolicy

enum class torch::jit::DCESideEffectPolicy : uint8_t
strong
Enumerator
DONT_DELETE_NODES_WITH_SIDE_EFFECTS 
ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS 

Definition at line 14 of file dead_code_elimination.h.

◆ ExitStatus

enum class torch::jit::ExitStatus
strong
Enumerator
WILL 
MIGHT 
WONT 
THROWS 

Definition at line 18 of file exit_transforms.cpp.

◆ IterableModuleKind

enum class torch::jit::IterableModuleKind
strong
Enumerator
NONE 
LIST 
DICT 

Definition at line 13 of file concrete_module_type.h.

◆ JitLoggingLevels

enum class torch::jit::JitLoggingLevels
strong
Enumerator
GRAPH_DUMP 
GRAPH_UPDATE 
GRAPH_DEBUG 

Definition at line 45 of file jit_log.h.

◆ LoopStatus

enum class torch::jit::LoopStatus
strong
Enumerator
NOT_IN_LOOP 
IN_LOOP 
IN_UNROLLED_LOOP 

Definition at line 600 of file ir_emitter.cpp.

◆ MobileOptimizerType

enum class torch::jit::MobileOptimizerType : int8_t
strong
Enumerator
CONV_BN_FUSION 
INSERT_FOLD_PREPACK_OPS 
REMOVE_DROPOUT 
FUSE_ADD_RELU 
HOIST_CONV_PACKED_PARAMS 

Definition at line 9 of file xnnpack_rewrite.h.

◆ NoneStatus

Enumerator
ALWAYS 
MAYBE 
NEVER 

Definition at line 181 of file ir_emitter.cpp.

◆ OpCode

enum torch::jit::OpCode : uint8_t
Enumerator
OP 
OPN 
LOAD 
MOVE 
STOREN 
STORE 
DROP 
DROPR 
LOADC 
JF 
JMP 
LOOP 
RET 
WAIT 
CALL 
GUARD 
TYPECHECK 
FAIL_GUARD 
PROFILE_OP 
TAIL_CALL 
INTERFACE_CALL 
GET_ATTR 
SET_ATTR 
LIST_UNPACK 
TUPLE_CONSTRUCT 
NAMED_TUPLE_CONSTRUCT 
LIST_CONSTRUCT 
DICT_CONSTRUCT 
CREATE_OBJECT 
ISINSTANCE 
TUPLE_SLICE 
FORK 
WARN 
ENTER 
EXIT 

Definition at line 59 of file instruction.h.

◆ PickleOpCode

enum class torch::jit::PickleOpCode : char
strong
Enumerator
MARK 
STOP 
POP 
POP_MARK 
DUP 
FLOAT 
INT 
BININT 
BININT1 
LONG 
BININT2 
NONE 
PERSID 
BINPERSID 
REDUCE 
STRING 
BINSTRING 
SHORT_BINSTRING 
UNICODE_ 
BINUNICODE 
APPEND 
BUILD 
GLOBAL 
DICT 
EMPTY_DICT 
APPENDS 
GET 
BINGET 
INST 
LONG_BINGET 
LIST 
EMPTY_LIST 
OBJ 
PUT 
BINPUT 
LONG_BINPUT 
SETITEM 
TUPLE 
EMPTY_TUPLE 
SETITEMS 
BINFLOAT 
PROTO 
NEWOBJ 
EXT1 
EXT2 
EXT4 
TUPLE1 
TUPLE2 
TUPLE3 
NEWTRUE 
NEWFALSE 
LONG1 
LONG4 
BINBYTES 
SHORT_BINBYTES 
SHORT_BINUNICODE 
BINUNICODE8 
BINBYTES8 
EMPTY_SET 
ADDITEMS 
FROZENSET 
NEWOBJ_EX 
STACK_GLOBAL 
MEMOIZE 
FRAME 

Definition at line 18 of file pickler.h.

◆ QuantizedParamsType

Enumerator
CONV 
LINEAR 

Definition at line 139 of file unpack_quantized_weights.cpp.

◆ QuantType

enum torch::jit::QuantType : uint8_t
Enumerator
DYNAMIC 
STATIC 

Definition at line 9 of file quantization_type.h.

◆ Side

enum class torch::jit::Side
strong
Enumerator
LHS 
RHS 

Definition at line 249 of file batch_mm.cpp.

◆ TokenKind

Enumerator
TK_DUMMY_START 
TK_EOF 
TK_WHITESPACE 
TK_WHITESPACE_EOF 
TK_NUMBER 
TK_NEWLINE 
TK_INDENT 
TK_DEDENT 
TK_DEF 
TK_EQUIVALENT 
TK_IDENT 
TK_STRING 
TK_STRINGLITERAL 
TK_CONST 
TK_LIST 
TK_DICT 
TK_OPTION 
TK_APPLY 
TK_COMPREHENSION 
TK_RANGE_CONSTRAINT 
TK_PARAM 
TK_INFERRED 
TK_ACCESS 
TK_ASSIGN 
TK_AUG_ASSIGN 
TK_ATTRIBUTE 
TK_IF 
TK_ELSE 
TK_ELIF 
TK_WHILE 
TK_EXPR_STMT 
TK_RETURN 
TK_IS 
TK_ISNOT 
TK_NE 
TK_EQ 
TK_LE 
TK_GE 
TK_FLOOR_DIV 
TK_IF_EXPR 
TK_TRUE 
TK_FALSE 
TK_NONE 
TK_AND 
TK_OR 
TK_NOT 
TK_LSHIFT 
TK_RSHIFT 
TK_CAST 
TK_PLUS_EQ 
TK_MINUS_EQ 
TK_TIMES_EQ 
TK_DIV_EQ 
TK_MOD_EQ 
TK_BIT_OR_EQ 
TK_BIT_AND_EQ 
TK_BIT_XOR_EQ 
TK_LSHIFT_EQ 
TK_RSHIFT_EQ 
TK_POW_EQ 
TK_GLOBAL 
TK_BUILT_IN 
TK_SUBSCRIPT 
TK_VAR 
TK_NOTHING 
TK_DICT_LITERAL 
TK_LIST_LITERAL 
TK_TUPLE_LITERAL 
TK_FOR 
TK_IN 
TK_NOTIN 
TK_STARRED 
TK_UNARY_MINUS 
TK_POW 
TK_ARROW 
TK_DECL 
TK_SLICE_EXPR 
TK_TYPE_COMMENT 
TK_RAISE 
TK_ASSERT 
TK_DOTS 
TK_LIST_COMP 
TK_DICT_COMP 
TK_BREAK 
TK_CONTINUE 
TK_DELETE 
TK_PASS 
TK_CLASS_DEF 
TK_IMPORT 
TK_WITH 
TK_WITH_ITEM 
TK_AS 
TK_PROP 
TK_ELLIPSIS 

Definition at line 124 of file lexer.h.

◆ Transform

enum class torch::jit::Transform
strong
Enumerator
Returns 
LoopContinuations 

Definition at line 20 of file exit_transforms.cpp.

Function Documentation

◆ _jit_debug_module_iterators()

static py::dict torch::jit::_jit_debug_module_iterators ( Module module)
static

Definition at line 651 of file script_init.cpp.

References debugMakeList(), debugMakeNamedList(), and module.

Referenced by initJitScriptBindings().

◆ _load_extra_only_for_mobile()

void torch::jit::_load_extra_only_for_mobile ( const std::string &  filename,
c10::optional< at::Device device,
ExtraFilesMap extra_files 
)

Load only the contents of the "extra/" files whose names are passed in the map (extra_files).

Populate the corresponding values with the contents of those files. Do not attempt to load the entire model, and stop once the extra files have been extracted.

This API is needed to be able to load GPU models on linux CPU machines and extract only the extra files so that we can inspect the metadata that was added to the .ptl archive when it was generated.

Definition at line 606 of file import.cpp.

References device, generate-wrapper::filename, torch::MobileObserverConfig::getModuleObserver(), torch::observerConfig(), and at::native::rand().

◆ _load_for_mobile() [1/6]

TORCH_API mobile::Module torch::jit::_load_for_mobile ( const std::string &  filename,
c10::optional< at::Device device 
)

Definition at line 520 of file import.cpp.

References _load_for_mobile(), device, and generate-wrapper::filename.

◆ _load_for_mobile() [2/6]

TORCH_API mobile::Module torch::jit::_load_for_mobile ( const std::string &  filename,
c10::optional< at::Device device,
ExtraFilesMap extra_files 
)

Definition at line 543 of file import.cpp.

References _load_for_mobile(), device, generate-wrapper::filename, and module.

◆ _load_for_mobile() [3/6]

TORCH_API mobile::Module torch::jit::_load_for_mobile ( std::istream &  in,
c10::optional< at::Device device 
)

Definition at line 513 of file import.cpp.

References _load_for_mobile(), device, and in.

Referenced by initJitScriptBindings(), main(), and pytorch_jni::PytorchJni::PytorchJni().

◆ _load_for_mobile() [4/6]

TORCH_API mobile::Module torch::jit::_load_for_mobile ( std::istream &  in,
c10::optional< at::Device device,
ExtraFilesMap extra_files 
)

Definition at line 534 of file import.cpp.

References _load_for_mobile(), device, in, and module.

◆ _load_for_mobile() [5/6]

TORCH_API mobile::Module torch::jit::_load_for_mobile ( std::unique_ptr< ReadAdapterInterface rai,
c10::optional< c10::Device device 
)

Definition at line 527 of file import.cpp.

References _load_for_mobile(), and device.

◆ _load_for_mobile() [6/6]

◆ _load_parameters() [1/3]

TORCH_API std::map< std::string, at::Tensor > torch::jit::_load_parameters ( const std::string &  filename,
c10::optional< at::Device device 
)

Definition at line 228 of file import_data.cpp.

References _load_parameters(), device, and generate-wrapper::filename.

◆ _load_parameters() [2/3]

TORCH_API std::map< std::string, at::Tensor > torch::jit::_load_parameters ( std::istream &  in,
c10::optional< at::Device device 
)

Definition at line 221 of file import_data.cpp.

References _load_parameters(), device, and in.

◆ _load_parameters() [3/3]

TORCH_API std::map< std::string, at::Tensor > torch::jit::_load_parameters ( std::unique_ptr< ReadAdapterInterface rai,
c10::optional< c10::Device device 
)

Definition at line 235 of file import_data.cpp.

References device, caffe2::e, caffe2.contrib.aten.gen_op::key, at::vec256::map(), and value.

Referenced by _load_parameters().

◆ _propagate_and_assign_input_shapes()

static std::shared_ptr<Graph> torch::jit::_propagate_and_assign_input_shapes ( Graph graph,
const std::vector< at::Tensor > &  inputs,
bool  with_grad = false,
bool  propagate = true 
)
static

◆ _propagate_shapes()

static std::shared_ptr<Graph> torch::jit::_propagate_shapes ( Graph graph,
std::vector< at::Tensor inputs,
bool  with_grad = false 
)
static

◆ _save_parameters() [1/2]

TORCH_API void torch::jit::_save_parameters ( const std::map< std::string, at::Tensor > &  map,
const std::string &  filename 
)

◆ _save_parameters() [2/2]

TORCH_API void torch::jit::_save_parameters ( const std::map< std::string, at::Tensor > &  map,
std::ostream &  out 
)

Definition at line 85 of file export_data.cpp.

References caffe2::e, c10::Dict< Key, Value >::insert(), at::vec256::map(), and out.

◆ addCondAsOutput()

◆ addFunctionToModule()

void torch::jit::addFunctionToModule ( Module module,
const StrongFunctionPtr func 
)

◆ addInputToBlock()

Value * torch::jit::addInputToBlock ( Block block)

Definition at line 96 of file helper.cpp.

References torch::jit::Block::addInput(), and block.

Referenced by initPythonIRBindings().

◆ addNodeToBlock()

Node * torch::jit::addNodeToBlock ( Block block,
Symbol  kind,
ArrayRef< Value * >  inputs 
)

◆ addParamAsArgument()

Value* torch::jit::addParamAsArgument ( Function function,
std::string &  name,
IValue attr 
)

◆ addReverseInline()

◆ aliasAnalysisConservative()

c10::AliasAnalysisKind torch::jit::aliasAnalysisConservative ( )
inline

Definition at line 42 of file register_ops_utils.h.

References c10::CONSERVATIVE.

◆ aliasAnalysisFromSchema()

c10::AliasAnalysisKind torch::jit::aliasAnalysisFromSchema ( )
inline

Definition at line 38 of file register_ops_utils.h.

References c10::FROM_SCHEMA.

◆ aliasAnalysisHasSpecialCaseFor()

◆ aliasAnalysisSpecialCase()

c10::AliasAnalysisKind torch::jit::aliasAnalysisSpecialCase ( )
inline

Definition at line 46 of file register_ops_utils.h.

References c10::INTERNAL_SPECIAL_CASE.

◆ alwaysRaisesException()

TORCH_API bool torch::jit::alwaysRaisesException ( Block block)

◆ annotate()

◆ AnnotateWarns() [1/2]

void torch::jit::AnnotateWarns ( Block b)

◆ AnnotateWarns() [2/2]

TORCH_API void torch::jit::AnnotateWarns ( const std::shared_ptr< Graph > &  graph)

Definition at line 24 of file annotate_warns.cpp.

References AnnotateWarns(), and caffe2.contrib.aten.docs.sample::graph.

◆ append()

template<typename T >
void torch::jit::append ( std::vector< T > &  a,
T &&  e 
)
inline

◆ append< bool >()

template<>
void torch::jit::append< bool > ( std::vector< bool > &  a,
bool &&  e 
)
inline

Definition at line 231 of file unpickler.cpp.

References caffe2.contrib.aten.docs.sample::a, and caffe2::e.

◆ argumentToIValue()

IValue torch::jit::argumentToIValue ( const FunctionSchema schema,
size_t  argumentPosition,
py::handle  object 
)
inline

Definition at line 586 of file pybind_utils.h.

References caffe2::argument, friendlyTypeName(), c10::str(), and toIValue().

Referenced by createStackForSchema().

◆ as_function()

c10::optional< StrongFunctionPtr > torch::jit::as_function ( const py::object &  obj)

Definition at line 25 of file