pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

torch::autograd Namespace Reference

Namespaces

namespace  _functions
 
namespace  anomaly_mode
 
namespace  detail
 
namespace  forward_ad
 
namespace  function
 
namespace  functional
 
namespace  generated
 
namespace  grad_mode
 
namespace  gradcheck
 
namespace  impl
 
namespace  profiler
 
namespace  python
 
namespace  utils
 
namespace  variable
 
namespace  VariableType
 

Classes

struct  AccumulateGrad
 
struct  AnomalyMetadata
 
struct  AnomalyMode
 
struct  AutogradContext
 Context to save information during forward that can be accessed in backward in custom autograd operations (see torch::autograd::Function for details). More...
 
struct  ComputeRequiresGrad
 
struct  CopyBackwards
 
struct  CopySlices
 
struct  CppFunctionPreHook
 
struct  CppNode
 
struct  DefaultFunctionType
 
struct  DelayedError
 
class  DetectAnomalyGuard
 A RAII guard that enables Anomaly Detection Mode. More...
 
struct  Edge
 Represents a particular input of a function. More...
 
struct  Engine
 
struct  Error
 
struct  ExtractVariables
 
struct  Flatten
 
struct  ForwardADLevel
 
struct  ForwardGrad
 
struct  Function
 To use custom autograd operations, implement a Function subclass with static forward and backward functions: More...
 
struct  FunctionPostHook
 
struct  FunctionPreHook
 
struct  Gather
 
struct  GraphRoot
 
struct  GraphTask
 
class  GraphTaskGuard
 
struct  Identity
 
struct  InputBuffer
 
struct  InputMetadata
 Records type, shape, and device of tensor and, where applicable, the stream the correspondingoperation took place on. More...
 
struct  Node
 
class  NodeGuard
 
struct  NodeTask
 
struct  NotImplemented
 
struct  PyAnomalyMetadata
 
struct  PyFunctionPostHook
 
struct  PyFunctionPreHook
 
struct  PyNode
 
struct  ReadyQueue
 
class  SavedVariable
 A snapshot of a variable at a certain version. More...
 
struct  Scatter
 
struct  symbolic_unconvertible
 
struct  SymbolicContext
 
struct  THPCppFunction
 
struct  TraceableFunction
 See Node::is_traceable() for definition. More...
 
struct  UndefinedGrad
 
struct  UndefinedGradBackward
 
struct  VariableHooks
 
struct  VariableInfo
 

Typedefs

using Variable = at::Tensor
 Variable is exactly the same as Tensor (i.e. More...
 
using ConstQuantizerPtr = const c10::intrusive_ptr< Quantizer > &
 
using hooks_list = std::vector< std::function< Variable(const Variable &)> >
 
template<typename X , typename... Args>
using forward_t = decltype(X::forward(nullptr, std::declval< Args >()...))
 
using EngineStub = Engine &(*)()
 
using tensor_list = std::vector< at::Tensor >
 
using variable_list = std::vector< Variable >
 
using edge_list = std::vector< Edge >
 
using saved_variable_list = std::vector< SavedVariable >
 
using IndexRange = std::pair< size_t, size_t >
 
using function_constructor = std::function< std::shared_ptr< Node >(edge_list &&)>
 
using GradMode = at::GradMode
 
using AutoGradMode = at::AutoGradMode
 

Functions

void initFFTFunctions (PyObject *module)
 
void initLinalgFunctions (PyObject *module)
 
static PyObjectTHPVariable__parse_to (PyObject *module, PyObject *args, PyObject *kwargs)
 
void initNNFunctions (PyObject *module)
 
Tensor dispatch_arange (Scalar end, Tensor result)
 
Tensor dispatch_arange (Scalar end, const TensorOptions &options)
 
Tensor dispatch_arange (Scalar start, Scalar end, Scalar step, Tensor result)
 
Tensor dispatch_arange (Scalar start, Scalar end, Scalar step, const TensorOptions &options)
 
static PyObjectTHPVariable_arange (PyObject *self, PyObject *args, PyObject *kwargs)
 
Tensor dispatch_range (Scalar start, Scalar end, Scalar step, Tensor result)
 
Tensor dispatch_range (Scalar start, Scalar end, Scalar step, const TensorOptions &options)
 
static PyObjectTHPVariable_range (PyObject *self, PyObject *args, PyObject *kwargs)
 
Tensor dispatch_full (IntArrayRef size, Scalar fill_val, const TensorOptions &options)
 
Tensor dispatch_full (IntArrayRef size, Scalar fill_val, c10::optional< DimnameList > names, const TensorOptions &options)
 
Tensor dispatch_full (IntArrayRef size, Scalar fill_val, Tensor result)
 
static PyObjectTHPVariable_full (PyObject *self, PyObject *args, PyObject *kwargs)
 
Tensor dispatch_randint (int64_t high, IntArrayRef size, c10::optional< Generator > generator, Tensor result)
 
Tensor dispatch_randint (int64_t high, IntArrayRef size, c10::optional< Generator > generator, const TensorOptions &options)
 
Tensor dispatch_randint (int64_t high, IntArrayRef size, Tensor result)
 
Tensor dispatch_randint (int64_t high, IntArrayRef size, const TensorOptions &options)
 
Tensor dispatch_randint (int64_t low, int64_t high, IntArrayRef size, c10::optional< Generator > generator, Tensor result)
 
Tensor dispatch_randint (int64_t low, int64_t high, IntArrayRef size, c10::optional< Generator > generator, const TensorOptions &options)
 
Tensor dispatch_randint (int64_t low, int64_t high, IntArrayRef size, Tensor result)
 
Tensor dispatch_randint (int64_t low, int64_t high, IntArrayRef size, const TensorOptions &options)
 
static PyObjectTHPVariable_randint (PyObject *self_, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_as_tensor (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_from_numpy (PyObject *module, PyObject *arg)
 
static Tensor dispatch_nonzero (const Tensor &self)
 
static Tensor dispatch_nonzero (const Tensor &self, Tensor out)
 
static std::vector< Tensordispatch_nonzero_numpy (const Tensor &self)
 
static PyObjectTHPVariable_nonzero (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_sparse_coo_tensor (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable__sparse_coo_tensor_unsafe (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_tensor (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_get_device (PyObject *self_, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_numel (PyObject *self_, PyObject *args, PyObject *kwargs)
 
void initTorchFunctions (PyObject *module)
 
static PyObjectTHPVariable__is_view (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_apply_ (PyObject *self, PyObject *arg)
 
static PyObjectTHPVariable_size (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_stride (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_get_device (PyObject *self_, PyObject *args)
 
static PyObjectTHPVariable_has_names (PyObject *self_, PyObject *args)
 
static PyObjectTHPVariable_data_ptr (PyObject *self_, PyObject *args)
 
static PyObjectTHPVariable_storage_offset (PyObject *self_, PyObject *args)
 
static PyObjectTHPVariable_dim (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_numel (PyObject *self, PyObject *args)
 
static Tensor dispatch_contiguous (const Tensor &self, at::MemoryFormat memory_format)
 
static PyObjectTHPVariable_contiguous (PyObject *self, PyObject *args, PyObject *kwargs)
 
static Tensor dispatch_copy_ (Tensor &self, const Tensor &other, bool non_blocking)
 
static PyObjectTHPVariable_copy_ (PyObject *self, PyObject *args, PyObject *kwargs)
 
static double dispatch_to_CDouble (const Tensor &self)
 
static c10::complex< double > dispatch_to_CComplexDouble (const Tensor &self)
 
static int64_t dispatch_to_CLong (const Tensor &self)
 
static bool dispatch_to_Bool (const Tensor &self)
 
static PyObjectTHPVariable_float_scalar (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_complex_scalar (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_integral_scalar (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_index_scalar (PyObject *self, PyObject *args)
 
static Tensor dispatch_invert (const Tensor &self)
 
static PyObjectTHPVariable_invert (PyObject *self, PyObject *args)
 
static Tensor dispatch_to (const Tensor &self, Device device, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format)
 
static Tensor dispatch_to (const Tensor &self, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format)
 
static Tensor dispatch_to (const Tensor &self, ScalarType dtype, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format)
 
static Tensor dispatch_to (const Tensor &self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format)
 
static PyObjectTHPVariable_cpu (PyObject *self, PyObject *args, PyObject *kwargs)
 
static Tensor dispatch_nonzero (const Tensor &self)
 
static std::vector< Tensordispatch_nonzero_numpy (const Tensor &self)
 
static PyObjectTHPVariable_nonzero (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_cuda (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_xpu (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_to_type (PyObject *self, ScalarType scalarType, c10::optional< c10::MemoryFormat > optional_memory_format)
 
static PyObjectTHPVariable_byte (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_char (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_double (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_float (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_half (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_int (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_long (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_short (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_bool (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_bfloat16 (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_element_size (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_numpy (PyObject *self, PyObject *arg)
 
static PyObjectTHPVariable_requires_grad_ (PyObject *self, PyObject *args, PyObject *kwargs)
 
bool dispatch_is_contiguous (Tensor &self, MemoryFormat memory_format)
 
static PyObjectTHPVariable_is_contiguous (PyObject *self_, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_item (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_map_ (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_map2_ (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_new (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_new_ones (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_new_tensor (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_storage (PyObject *self, PyObject *arg)
 
static PyObjectTHPVariable_storage_type (PyObject *self, PyObject *arg)
 
static PyObjectTHPVariable_to (PyObject *self, PyObject *args, PyObject *kwargs)
 
static PyObjectTHPVariable_tolist (PyObject *self, PyObject *args)
 
static PyObjectTHPVariable_type (PyObject *self, PyObject *args, PyObject *kwargs)
 
Tuple[_OptionalTensor,...] _make_grads (Sequence[torch.Tensor] outputs, Sequence[_OptionalTensor] grads)
 
Tuple[_OptionalTensor,...] _tensor_or_tensors_to_tuple (Optional[_TensorOrTensors] tensors, int length)
 
None backward (_TensorOrTensors tensors, Optional[_TensorOrTensors] grad_tensors=None, Optional[bool] retain_graph=None, bool create_graph=False, Optional[_TensorOrTensors] grad_variables=None, Optional[Sequence[torch.Tensor]] inputs=None)
 
Tuple[torch.Tensor,...] grad (_TensorOrTensors outputs, _TensorOrTensors inputs, Optional[_TensorOrTensors] grad_outputs=None, Optional[bool] retain_graph=None, bool create_graph=False, bool only_inputs=True, bool allow_unused=False)
 
def _is_checkpoint_valid ()
 
def variable (*args, **kwargs)
 
variable_list _make_grads (const variable_list &outputs, const variable_list &grad_outputs)
 
variable_list run_backward (const variable_list &outputs, const variable_list &grad_outputs, bool keep_graph, bool create_graph, const variable_list &inputs, bool allow_unused, bool accumulate_grad)
 
void backward (const variable_list &tensors, const variable_list &grad_tensors={}, c10::optional< bool > retain_graph=c10::nullopt, bool create_graph=false, const variable_list &inputs={})
 Computes the sum of gradients of given tensors with respect to graph leaves. More...
 
variable_list grad (const variable_list &outputs, const variable_list &inputs, const variable_list &grad_outputs={}, c10::optional< bool > retain_graph=c10::nullopt, bool create_graph=false, bool allow_unused=false)
 Computes and returns the sum of gradients of outputs with respect to the inputs. More...
 
variable_list _wrap_outputs (const variable_list &input_vars, const std::unordered_set< at::TensorImpl * > &non_differentiable, const std::unordered_set< at::TensorImpl * > &dirty_inputs, const at::ArrayRef< Variable > raw_outputs, const std::shared_ptr< Node > &cdata)
 
void check_variable_result (const Variable &original, const Variable &result, std::string hook_name)
 
template<typename... Args>
void extract_vars (std::vector< bool > &is_var, variable_list &list, Args &&... args)
 
template<typename T >
std::enable_if< std::is_same< T, variable_list >::value, T & >::type to_output_type (variable_list &output_list)
 
template<typename T >
std::enable_if< std::is_same< T, Variable >::value, T >::type to_output_type (variable_list &output_list)
 
static variable_list call_pre_hooks (Node &fn, variable_list inputs)
 
static variable_list call_post_hooks (Node &fn, variable_list outputs, const variable_list &inputs)
 
static bool is_compatible_type (const at::TensorOptions &expected, const at::TensorOptions &actual)
 
void set_device (int device)
 
void validate_outputs (const edge_list &edges, variable_list &grads, const std::function< std::string(const std::string &)> &format_error)
 
static variable_list call_function (std::shared_ptr< GraphTask > &graph_task, Node *func, InputBuffer &inputBuffer)
 
std::atomic< EngineStubengine_stub (Engine::get_base_engine)
 
void set_default_engine_stub (EngineStub stub)
 
bool isForwardADEnabled ()
 
void setForwardADEnabled (bool value)
 
static void gatherFunctions (Node *func, std::vector< std::shared_ptr< Node > > &stack)
 
void deleteNode (Node *function)
 
void create_gradient_edge (Variable &variable, std::shared_ptr< Node > function)
 Create an Edge between the given variable and the function, which is assumed to be the gradient function of this variable (i.e. More...
 
bool any_variable_requires_grad (const variable_list &variables)
 Return true if any of the variables in the list require a gradient. More...
 
template<typename... Variables>
edge_list collect_next_edges (Variables &&... variables)
 Return the next edges of all the given variables, or tuples of variables. More...
 
variable_list wrap_outputs (const variable_list &inputs, tensor_list &&outputs, const function_constructor &ctr)
 Wraps the tensor outputs in variables and creates the grad_fn and sets the grad_fn if necessary. More...
 
void check_input_variables (const char *name, const variable_list &inputs, int args, int required_args=-1, bool allow_undefined=false)
 Checks that inputs contains exactly args items and that the first required_args items are not nullptr. More...
 
template<typename... Args>
bool compute_requires_grad (Args &&... args)
 
void set_history (at::Tensor &variable, const std::shared_ptr< Node > &grad_fn)
 
void set_history (std::vector< Variable > &&variables, const std::shared_ptr< Node > &grad_fn)
 
void set_history (std::vector< Variable > &variables, const std::shared_ptr< Node > &grad_fn)
 
static PyObjectset_autocast_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectis_autocast_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectclear_autocast_cache (PyObject *_unused, PyObject *arg)
 
static PyObjectautocast_increment_nesting (PyObject *_unused, PyObject *arg)
 
static PyObjectautocast_decrement_nesting (PyObject *_unused, PyObject *arg)
 
static PyObjectset_forward_AD_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectis_forward_AD_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectset_grad_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectis_grad_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectset_anomaly_mode_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectis_anomaly_mode_enabled (PyObject *_unused, PyObject *arg)
 
static PyObjectpython_enter_dual_level (PyObject *_unused, PyObject *arg)
 
static PyObjectpython_exit_dual_level (PyObject *_unused, PyObject *args, PyObject *kwargs)
 
PyMethodDef * python_functions ()
 
static void accumulate (std::vector< Variable > &buffer, const size_t pos, Variable &&var)
 
void _print_stack (PyObject *stack, const std::string &current_node_name, bool is_parent)
 
PyObjectTHPCppFunction_next_functions (THPCppFunction *self, PyObject *hook)
 
PyObjectTHPCppFunction_metadata (THPCppFunction *self, void *_unused)
 
PyObjectTHPCppFunction_requires_grad (THPCppFunction *self, void *unused)
 
PyObjectTHPCppFunction_register_hook_dict (PyObject *self, PyObject *_var)
 
PyObjectTHPCppFunction_register_hook (PyObject *self, PyObject *hook)
 
PyObjectTHPCppFunction_name (PyObject *self, PyObject *noargs)
 
PyTypeObject * _initFunctionPyTypeObject (PyTypeObject &type, const char *name, PyGetSetDef *function_properties, PyMethodDef *function_methods)
 
PyObjectfunctionToPyObject (const std::shared_ptr< Node > &cdata)
 
void registerCppFunction (const std::type_info &type, PyTypeObject *pytype)
 
PyObjectregisterFunctionHook (Node &fn, PyObject *hook)
 
template<typename Ctor >
PyObjectCppFunction_pynew (PyTypeObject *type, PyObject *args, PyObject *kwds)
 
template<typename Ctor >
PyTypeObject * createForwardFunctionPyTypeObject (PyTypeObject &type, const char *name, PyGetSetDef *function_properties=nullptr, PyMethodDef *function_methods=nullptr)
 
bool ensure_tuple (THPObjectPtr &obj)
 Cast an object into a tuple, if it is not a tuple already. More...
 
static PyObjectTHPVariable_pynew (PyTypeObject *type, PyObject *args, PyObject *kwds)
 
void init_legacy_variable (PyObject *module)
 
void initTensorImplConversion (PyObject *module)
 
Py_ssize_t THPVariable_length (PyObject *self)
 
static int64_t count_specified_dimensions (PyObject *index)
 
static void invalid_index (PyObject *obj)
 
static Variable sequenceToVariable (c10::DispatchKey dispatch_key, PyObject *seq)
 
static Variable valueToTensor (c10::TensorOptions options, PyObject *value, const at::Device &device)
 
static void checkUnpackSlice (PyObject *index, Py_ssize_t *start_ptr, Py_ssize_t *stop_ptr, Py_ssize_t *step_ptr)
 
static void recordSliceTrace (PyObject *obj)
 
static void recordSelectTrace (const Tensor &index_tensor)
 
static Variable applySlicing (const Variable &self, PyObject *index, variable_list &outIndices, bool is_tracing, const at::Device &self_device, const IntArrayRef &self_sizes, int64_t specified_dims)
 
static bool treatSequenceAsTuple (PyObject *index)
 
static THPObjectPtr wrapTuple (PyObject *index)
 
PyObjectTHPVariable_getitem (PyObject *self, PyObject *index)
 
int THPVariable_setitem (PyObject *self, PyObject *index, PyObject *py_value)
 
void handle_view_on_rebase (DifferentiableViewMeta *diff_view_meta, bool indirect)
 
void check_inplace (const Tensor &tensor, bool requires_grad)
 
void check_inplace (const TensorList tensors, bool requires_grad)
 
void throw_error_out_requires_grad (const char *name)
 
void throw_error_for_complex_autograd (const Tensor &tensor, const char *name)
 
void throw_error_for_complex_autograd (const TensorList &tensorlist, const char *name)
 
void rebase_history (Variable &var, std::shared_ptr< Node > grad_fn)
 
void rebase_history (std::vector< Variable > &&vars, std::shared_ptr< Node > grad_fn)
 
void increment_version (Tensor &t)
 
template<typename... Args>
variable_list flatten_tensor_args (Args &&... args)
 
Tensor as_view (const Tensor &base, const Tensor &tensor, bool is_bw_differentiable, bool is_fw_differentiable, std::function< Tensor(const Tensor &)> view_func=nullptr, CreationMeta creation_meta=CreationMeta::DEFAULT, bool allow_tensor_metadata_change=true)
 
std::vector< Tensoras_view (const Tensor &base, std::vector< Tensor > &tensors, bool is_bw_differentiable, bool is_fw_differentiable, CreationMeta creation_meta=CreationMeta::DEFAULT)
 
void check_no_requires_grad (const Tensor &tensor, const char *name)
 
void check_no_requires_grad (const c10::optional< Tensor > &tensor, const char *name)
 
void check_no_requires_grad (TensorList tensors, const char *name)
 
void check_no_requires_grad (const c10::List< c10::optional< Tensor > > &tensors, const char *name)
 
std::vector< SavedVariablemake_saved_variable_list (TensorList tensors)
 
std::vector< SavedVariablemake_saved_variable_list (const c10::List< c10::optional< at::Tensor > > &tensors)
 
std::vector< std::vector< int64_t > > to_args_sizes (TensorList tensors)
 
std::vector< ScalarType > to_args_scalartypes (TensorList tensors)
 

Variables

 $
 
static PyObjectTHPFFTVariableFunctionsModule = NULL
 
static PyObjectTHPLinalgVariableFunctionsModule = NULL
 
static PyObjectTHPNNVariableFunctionsModule = NULL
 
static PyObjectTHPVariableFunctionsModule = NULL
 
static PyTypeObject THPVariableFunctions
 
list __all__ = ['Variable', 'Function', 'backward', 'grad_mode']
 
 _OptionalTensor = Optional[torch.Tensor]
 
static thread_local int worker_device = NO_DEVICE
 
static thread_local bool checkpoint_valid = true
 
static thread_local int current_depth = 0
 
static thread_local int total_depth = 0
 
static thread_local std::shared_ptr< GraphTaskcurrent_graph_task = nullptr
 
static thread_local std::shared_ptr< ReadyQueuelocal_ready_queue = nullptr
 
static constexpr int NO_DEVICE = -2
 
static constexpr int CPU_DEVICE = -1
 
static constexpr int MAX_DEPTH = 60
 
static thread_local std::shared_ptr< Nodecurrent_evaluating_node = nullptr
 
static PyMethodDef methods []
 
static struct PyMethodDef default_methods []
 
static struct PyGetSetDef default_properties []
 
static std::unordered_map< std::type_index, THPObjectPtrcpp_function_types
 
PyTypeObject THPLegacyVariableType
 
PyMethodDef variable_methods []
 
const char * ERR_BACKWARD_TWICE
 
VariableHooks variableHooks
 

Detailed Description

``torch.autograd`` provides classes and functions implementing automatic
differentiation of arbitrary scalar valued functions. It requires minimal
changes to the existing code - you only need to declare :class:`Tensor` s
for which gradients should be computed with the ``requires_grad=True`` keyword.
As of now, we only support autograd for floating point :class:`Tensor` types (
half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble).

Typedef Documentation

◆ AutoGradMode

Definition at line 9 of file grad_mode.h.

◆ ConstQuantizerPtr

Definition at line 42 of file VariableType.h.

◆ edge_list

using torch::autograd::edge_list = typedef std::vector<Edge>

Definition at line 33 of file function.h.

◆ EngineStub

using torch::autograd::EngineStub = typedef Engine& (*)()

Definition at line 390 of file engine.h.

◆ forward_t

template<typename X , typename... Args>
using torch::autograd::forward_t = typedef decltype(X::forward(nullptr, std::declval<Args>()...))

Definition at line 23 of file custom_function.h.

◆ function_constructor

using torch::autograd::function_constructor = typedef std::function<std::shared_ptr<Node>(edge_list&&)>

Definition at line 17 of file utils.h.

◆ GradMode

Definition at line 8 of file grad_mode.h.

◆ hooks_list

using torch::autograd::hooks_list = typedef std::vector<std::function<Variable(const Variable&)> >

Definition at line 8 of file cpp_hook.h.

◆ IndexRange

using torch::autograd::IndexRange = typedef std::pair<size_t, size_t>

Definition at line 35 of file function.h.

◆ saved_variable_list

Definition at line 34 of file function.h.

◆ tensor_list

using torch::autograd::tensor_list = typedef std::vector<at::Tensor>

Definition at line 31 of file function.h.

◆ Variable

Variable is exactly the same as Tensor (i.e.

we have using Variable = at::Tensor). This means you can perform all the usual mathematical and other operations you can perform on Tensors also on Variables.

The only reason we are keeping the Variable class is backward compatibility with external user's legacy C++ frontend code. Our intention is to eliminate the Variable class in the near future.

Definition at line 23 of file VariableType.h.

◆ variable_list

typedef std::vector< Variable > torch::autograd::variable_list

Definition at line 32 of file function.h.

Function Documentation

◆ _initFunctionPyTypeObject()

PyTypeObject * torch::autograd::_initFunctionPyTypeObject ( PyTypeObject &  type,
const char *  name,
PyGetSetDef *  function_properties,
PyMethodDef *  function_methods 
)

◆ _is_checkpoint_valid()

def torch.autograd._is_checkpoint_valid ( )
private

◆ _make_grads() [1/2]

variable_list torch::autograd::_make_grads ( const variable_list outputs,
const variable_list grad_outputs 
)

◆ _make_grads() [2/2]

Tuple[_OptionalTensor, ...] torch.autograd._make_grads ( Sequence[torch.Tensor outputs,
Sequence[_OptionalTensor grads 
)
private

◆ _print_stack()

void torch::autograd::_print_stack ( PyObject stack,
const std::string &  current_node_name,
bool  is_parent 
)

◆ _tensor_or_tensors_to_tuple()

Tuple[_OptionalTensor, ...] torch.autograd._tensor_or_tensors_to_tuple ( Optional[_TensorOrTensors]  tensors,
int  length 
)
private

Definition at line 60 of file __init__.py.

References _tensor_or_tensors_to_tuple(), and torch::jit.isinstance().

Referenced by _tensor_or_tensors_to_tuple(), backward(), and grad().

◆ _wrap_outputs()

◆ accumulate()

static void torch::autograd::accumulate ( std::vector< Variable > &  buffer,
const size_t  pos,
Variable &&  var 
)
static

Definition at line 14 of file input_buffer.cpp.

References TORCH_INTERNAL_ASSERT, and caffe2::var.

Referenced by at::native::_index_put_impl_(), at::native::legacy::cuda::_th_put_(), at::native::legacy::cpu::_th_put_(), torch::autograd::InputBuffer::add(), caffe2::DivFunctor< Context >::Backward(), caffe2::MulFunctor< Context >::Backward(), caffe2::MeanReducer< Context >::Backward(), caffe2::CanonicalDims(), at::native::cat_sparse(), torch::autograd::generated::details::cat_tensors_backward(), c10d::checkSplitSizes(), caffe2::MomentsGradientOp< T, Context >::Compute(), caffe2::math::CopyMatrix< std::uint16_t, CPUContext >(), caffe2::BatchMatMulFP16FakeOp< Context, Engine, USE_ACC_FP16, USE_TMP_ACCUMULATOR, USE_CUSTOM_ACC32 >::DoRunWithType(), caffe2::SpatialBNFakeLoweredFp16Op::DoRunWithType(), caffe2::SpatialBNFakeFp16Op::DoRunWithType(), caffe2::BatchMatMulOp< Context, Engine >::DoRunWithType(), caffe2::RemovePaddingOp< Context >::DoRunWithType(), caffe2::SpatialBNOp< Context >::DoRunWithType(), caffe2::SpatialBNGradientOp< Context >::DoRunWithType(), caffe2::UnpackSegmentsOp< Context >::DoRunWithType2(), c10::TensorImpl::Extend(), at::native::flatten_out(), caffe2::CoshGradientFunctor< Context >::Forward(), caffe2::SinhGradientFunctor< Context >::Forward(), caffe2::CubeGradientFunctor< Context >::Forward(), caffe2::GeluGradientFunctor< Context >::Forward(), caffe2::CbrtGradientFunctor< Context >::Forward(), caffe2::RsqrtGradientFunctor< Context >::Forward(), caffe2::AbsGradientFunctor< Context >::Forward(), caffe2::AcosGradientFunctor< Context >::Forward(), caffe2::AsinGradientFunctor< Context >::Forward(), caffe2::AtanGradientFunctor< Context >::Forward(), caffe2::CosGradientFunctor< Context >::Forward(), caffe2::ErfGradientFunctor< Context >::Forward(), caffe2::SinGradientFunctor< Context >::Forward(), caffe2::SoftsignGradientFunctor< Context >::Forward(), caffe2::TanGradientFunctor< Context >::Forward(), caffe2::TanhGradientFunctor< Context >::Forward(), caffe2::EluGradientFunctor< Context >::Forward(), caffe2::HardSigmoidGradientFunctor< Context >::Forward(), caffe2::ReciprocalGradientFunctor< Context >::Forward(), caffe2::ReluNGradientFunctor< Context >::Forward(), caffe2::ReluGradientFunctor< Context >::Forward(), caffe2::SigmoidGradientFunctor< Context >::Forward(), caffe2::emulator::StdOutputFormatter::get_mean(), caffe2::ConvPoolOpBase< Context >::GetDimsSize(), caffe2::HSoftmaxOpBase< T, Context >::getIntermediateOutputSize(), caffe2::DiagonalFillOp< Context >::GetStepSize(), caffe2::math::Im2ColNdNCHW(), at::native::index_put(), at::native::index_put_(), caffe2::math::utils::IsBothEndsBroadcastBinaryOp(), caffe2::math::utils::IsColwiseBroadcastBinaryOp(), caffe2::ConvDNNLowPOp< T, ReluFused >::IsConvGEMM_(), caffe2::math::utils::IsRowwiseBroadcastBinaryOp(), at::native::linalg_tensorinv(), at::native::linalg_tensorsolve(), caffe2::TensorRTOp::MaybeAdjustOutputShape(), c10::multiply_integers(), caffe2::ConvDNNLowPOp< T, ReluFused >::NoIm2ColNHWC_(), at::prod_intlist(), c10::TensorImpl::ReserveSpace(), caffe2::QTensor< Context >::Resize(), caffe2::BatchBucketOneHotOp< Context >::RunOnDevice(), caffe2::PercentileOp< Context >::RunOnDevice(), caffe2::SplitOp< Context >::RunOnDevice(), caffe2::SplitByLengthsOp< Context >::RunOnDevice(), caffe2::LengthsRangeFillOp< Context >::RunOnDevice(), caffe2::TTSparseLengthsSumGradientOp< T, Context >::RunOnDevice(), caffe2::TopKOp< T, Context >::RunOnDevice(), caffe2::TopKGradientOp< T, Context >::RunOnDevice(), caffe2::LengthsToSegmentIdsOp< Context >::RunOnDevice(), caffe2::utils::ConstTensorView< T >::size(), caffe2::SliceImpl(), c10::sum_integers(), at::sum_intlist(), caffe2::ConvDNNLowPPackWeightOp::TakeDepthWise3x3x3FastPath_(), caffe2::TensorInferenceFunction(), caffe2::SimpleNet::TEST_Benchmark(), and caffe2::WeightedSampleDequeueBlobsOp< Context >::WeightedSampleDequeueBlobsOp().

◆ any_variable_requires_grad()

bool torch::autograd::any_variable_requires_grad ( const variable_list variables)
inline

Return true if any of the variables in the list require a gradient.

Definition at line 497 of file function.h.

References variable().

Referenced by torch::autograd::Function< T >::apply(), unpack_input(), and wrap_outputs().

◆ applySlicing()

◆ as_view() [1/2]

Tensor torch::autograd::as_view ( const Tensor base,
const Tensor tensor,
bool  is_bw_differentiable,
bool  is_fw_differentiable,
std::function< Tensor(const Tensor &)>  view_func = nullptr,
CreationMeta  creation_meta = CreationMeta::DEFAULT,
bool  allow_tensor_metadata_change = true 
)
inline

◆ as_view() [2/2]

std::vector<Tensor> torch::autograd::as_view ( const Tensor base,
std::vector< Tensor > &  tensors,
bool  is_bw_differentiable,
bool  is_fw_differentiable,
CreationMeta  creation_meta = CreationMeta::DEFAULT 
)
inline

◆ autocast_decrement_nesting()

static PyObject* torch::autograd::autocast_decrement_nesting ( PyObject _unused,
PyObject arg 
)
static

◆ autocast_increment_nesting()

static PyObject* torch::autograd::autocast_increment_nesting ( PyObject _unused,
PyObject arg 
)
static

◆ backward() [1/2]

None torch.autograd.backward ( _TensorOrTensors  tensors,
Optional[_TensorOrTensors]   grad_tensors = None,
Optional[bool]   retain_graph = None,
bool   create_graph = False,
Optional[_TensorOrTensors]   grad_variables = None,
Optional[Sequence[torch.Tensor]]   inputs = None 
)
Computes the sum of gradients of given tensors w.r.t. graph leaves.

The graph is differentiated using the chain rule. If any of ``tensors``
are non-scalar (i.e. their data has more than one element) and require
gradient, then the Jacobian-vector product would be computed, in this
case the function additionally requires specifying ``grad_tensors``.
It should be a sequence of matching length, that contains the "vector"
in the Jacobian-vector product, usually the gradient of the differentiated
function w.r.t. corresponding tensors (``None`` is an acceptable value for
all tensors that don't need gradient tensors).

This function accumulates gradients in the leaves - you might need to zero
``.grad`` attributes or set them to ``None`` before calling it.
See :ref:`Default gradient layouts<default-grad-layouts>`
for details on the memory layout of accumulated gradients.

.. note::
    Using this method with ``create_graph=True`` will create a reference cycle
    between the parameter and its gradient which can cause a memory leak.
    We recommend using ``autograd.grad`` when creating the graph to avoid this.
    If you have to use this function, make sure to reset the ``.grad`` fields of your
    parameters to ``None`` after use to break the cycle and avoid the leak.

.. note::

    If you run any forward ops, create ``grad_tensors``, and/or call ``backward``
    in a user-specified CUDA stream context, see
    :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.

Args:
    tensors (sequence of Tensor): Tensors of which the derivative will be
        computed.
    grad_tensors (sequence of (Tensor or None)): The "vector" in the Jacobian-vector
        product, usually gradients w.r.t. each element of corresponding tensors.
        None values can be specified for scalar Tensors or ones that don't require
        grad. If a None value would be acceptable for all grad_tensors, then this
        argument is optional.
    retain_graph (bool, optional): If ``False``, the graph used to compute the grad
        will be freed. Note that in nearly all cases setting this option to ``True``
        is not needed and often can be worked around in a much more efficient
        way. Defaults to the value of ``create_graph``.
    create_graph (bool, optional): If ``True``, graph of the derivative will
        be constructed, allowing to compute higher order derivative products.
        Defaults to ``False``.
    inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
        accumulated into ``.grad``. All other Tensors will be ignored. If not
        provided, the gradient is accumulated into all the leaf Tensors that were
        used to compute the attr::tensors. All the provided inputs must be leaf
        Tensors.

Definition at line 68 of file __init__.py.

References _make_grads(), _tensor_or_tensors_to_tuple(), backward(), torch::jit.isinstance(), and c10::aten.len().

Referenced by torch.autograd.function.BackwardCFunction.apply(), backward(), torch.distributed.pipeline.sync.checkpoint.Checkpoint.backward(), torch.utils.checkpoint.CheckpointFunction.backward(), torch::distributed::rpc::PyRRef.backward(), torch.tensor.Tensor.backward(), pt_engine.TorchTensorEngine.backward(), torch.sparse.mm(), and torch.distributed.pipeline.sync._balance.profile.profile_times().

◆ backward() [2/2]

TORCH_API void torch::autograd::backward ( const variable_list tensors,
const variable_list grad_tensors = {},
c10::optional< bool >  retain_graph = c10::nullopt,
bool  create_graph = false,
const variable_list inputs = {} 
)

Computes the sum of gradients of given tensors with respect to graph leaves.

The graph is differentiated using the chain rule. If any of tensors are non-scalar (i.e. their data has more than one element) and require gradient, then the Jacobian-vector product would be computed, in this case the function additionally requires specifying grad_tensors. It should be a sequence of matching length, that contains the "vector" in the Jacobian-vector product, usually the gradient of the differentiated function w.r.t. corresponding tensors (torch::Tensor() is an acceptable value for all tensors that don't need gradient tensors).

This function accumulates gradients in the leaves - you might need to zero them before calling it.

Parameters
tensorsTensors of which the derivative will be computed.
grad_tensorsThe "vector" in the Jacobian-vector product, usually gradients w.r.t. each element of corresponding tensors. torch::Tensor() values can be specified for scalar Tensors or ones that don't require grad. If a torch::Tensor() value would be acceptable for all grad_tensors, then this argument is optional.
retain_graphIf false, the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to true is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.
create_graphIf true, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to false.
inputsInputs w.r.t. which the gradient will be accumulated into at::Tensor::grad. All other Tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute param tensors. All the provided inputs must be leaf Tensors.

Definition at line 130 of file autograd.cpp.

References _make_grads(), inputs, run_backward(), caffe2::tensors, and c10::optional< T >::value().

◆ call_function()

static variable_list torch::autograd::call_function ( std::shared_ptr< GraphTask > &  graph_task,
Node func,
InputBuffer inputBuffer 
)
static

◆ call_post_hooks()

static variable_list torch::autograd::call_post_hooks ( Node fn,
variable_list  outputs,
const variable_list inputs 
)
static

Definition at line 539 of file engine.cpp.

References caffe2.perfkernels.hp_emblookup_codegen::fn, inputs, and outputs.

Referenced by call_function().

◆ call_pre_hooks()

static variable_list torch::autograd::call_pre_hooks ( Node fn,
variable_list  inputs 
)
static

Definition at line 532 of file engine.cpp.

References caffe2.perfkernels.hp_emblookup_codegen::fn, and inputs.

Referenced by call_function().

◆ check_inplace() [1/2]

◆ check_inplace() [2/2]

void torch::autograd::check_inplace ( const TensorList  tensors,
bool  requires_grad 
)
inline

◆ check_input_variables()

TORCH_API void torch::autograd::check_input_variables ( const char *  name,
const variable_list inputs,
int  args,
int  required_args = -1,
bool  allow_undefined = false 
)

Checks that inputs contains exactly args items and that the first required_args items are not nullptr.

If not specified, required_args defaults to args.

Definition at line 40 of file utils.cpp.

References compare-fastrnn-results::args, inputs, and name.

Referenced by torch::autograd::AccumulateGrad::apply(), torch::autograd::CopyBackwards::apply(), and torch::autograd::CopySlices::apply().

◆ check_no_requires_grad() [1/4]

void torch::autograd::check_no_requires_grad ( const c10::List< c10::optional< Tensor > > &  tensors,
const char *  name 
)
inline

Definition at line 279 of file VariableTypeUtils.h.

References check_no_requires_grad(), name, torch::tensor(), and caffe2::tensors.

◆ check_no_requires_grad() [2/4]

void torch::autograd::check_no_requires_grad ( const c10::optional< Tensor > &  tensor,
const char *  name 
)
inline

Definition at line 267 of file VariableTypeUtils.h.

References check_no_requires_grad(), name, and torch::tensor().

◆ check_no_requires_grad() [3/4]

void torch::autograd::check_no_requires_grad ( const Tensor tensor,
const char *  name 
)
inline

Definition at line 257 of file VariableTypeUtils.h.

References name, torch::tensor(), and caffe2::var.

Referenced by check_no_requires_grad().

◆ check_no_requires_grad() [4/4]

void torch::autograd::check_no_requires_grad ( TensorList  tensors,
const char *  name 
)
inline

Definition at line 273 of file VariableTypeUtils.h.

References check_no_requires_grad(), name, torch::tensor(), and caffe2::tensors.

◆ check_variable_result()

TORCH_API void torch::autograd::check_variable_result ( const Variable original,
const Variable result,
std::string  hook_name 
)

Definition at line 160 of file custom_function.cpp.

References hook_name().

Referenced by check_single_result().

◆ checkUnpackSlice()

static void torch::autograd::checkUnpackSlice ( PyObject index,
Py_ssize_t *  start_ptr,
Py_ssize_t *  stop_ptr,
Py_ssize_t *  step_ptr 
)
inlinestatic

◆ clear_autocast_cache()

static PyObject* torch::autograd::clear_autocast_cache ( PyObject _unused,
PyObject arg 
)
static

Definition at line 230 of file init.cpp.

References at::autocast::clear_cache(), END_HANDLE_TH_ERRORS, and HANDLE_TH_ERRORS.

◆ collect_next_edges()

template<typename... Variables>
edge_list torch::autograd::collect_next_edges ( Variables &&...  variables)

◆ compute_requires_grad()

◆ count_specified_dimensions()

static int64_t torch::autograd::count_specified_dimensions ( PyObject index)
inlinestatic

◆ CppFunction_pynew()

template<typename Ctor >
PyObject* torch::autograd::CppFunction_pynew ( PyTypeObject *  type,
PyObject args,
PyObject kwds 
)

◆ create_gradient_edge()

void torch::autograd::create_gradient_edge ( Variable variable,
std::shared_ptr< Node function 
)
inline

Create an Edge between the given variable and the function, which is assumed to be the gradient function of this variable (i.e.

the function through which this variable is backpropagated during the backward pass). This sets the grad_fn property of the variable. This function assumes that the Variable is a new input to the gradient function and its input_nr thus equal to function->num_inputs(). Additionally, it increments the Node's number of inputs by one. Approximately equivalent to variable.set_gradient_edge(function, function->add_input_metadata(variable.dispatch_type(), variable.sizes())). If you don't want the Node's num_inputs to be incremented, use set_gradient_edge directly.

Definition at line 488 of file function.h.

References torch::autograd::impl::set_gradient_edge(), and variable().

Referenced by wrap_outputs().

◆ createForwardFunctionPyTypeObject()

template<typename Ctor >
PyTypeObject* torch::autograd::createForwardFunctionPyTypeObject ( PyTypeObject &  type,
const char *  name,
PyGetSetDef *  function_properties = nullptr,
PyMethodDef *  function_methods = nullptr 
)

Definition at line 56 of file python_cpp_function.h.

References _initFunctionPyTypeObject(), name, and type.

◆ deleteNode()

TORCH_API void torch::autograd::deleteNode ( Node function)

◆ dispatch_arange() [1/4]

Tensor torch::autograd::dispatch_arange ( Scalar  end,
const TensorOptions options 
)
inline

◆ dispatch_arange() [2/4]

Tensor torch::autograd::dispatch_arange ( Scalar  end,
Tensor  result 
)
inline

Definition at line 64 of file python_torch_functions.cpp.

References at::native::arange_out(), and matmul_dlmc_bench::end.

Referenced by THPVariable_arange().

◆ dispatch_arange() [3/4]

Tensor torch::autograd::dispatch_arange ( Scalar  start,
Scalar  end,
Scalar  step,
const TensorOptions options 
)
inline

◆ dispatch_arange() [4/4]

Tensor torch::autograd::dispatch_arange ( Scalar  start,
Scalar  end,
Scalar  step,
Tensor  result 
)
inline

◆ dispatch_contiguous()

static Tensor torch::autograd::dispatch_contiguous ( const Tensor self,
at::MemoryFormat  memory_format 
)
static

Definition at line 229 of file python_variable_methods.cpp.

References at::device_of(), and c10::memory_format().

Referenced by THPVariable_contiguous().

◆ dispatch_copy_()

static Tensor torch::autograd::dispatch_copy_ ( Tensor self,
const Tensor other,
bool  non_blocking 
)
static

Definition at line 272 of file python_variable_methods.cpp.

References at::device_of(), and at::meta::other.

Referenced by THPVariable_copy_().

◆ dispatch_full() [1/3]

Tensor torch::autograd::dispatch_full ( IntArrayRef  size,
Scalar  fill_val,
c10::optional< DimnameList names,
const TensorOptions options 
)
inline

◆ dispatch_full() [2/3]

Tensor torch::autograd::dispatch_full ( IntArrayRef  size,
Scalar  fill_val,
const TensorOptions options 
)
inline

◆ dispatch_full() [3/3]

Tensor torch::autograd::dispatch_full ( IntArrayRef  size,
Scalar  fill_val,
Tensor  result 
)
inline

Definition at line 211 of file python_torch_functions.cpp.

References torch::jit::full_out, and size.

◆ dispatch_invert()

static Tensor torch::autograd::dispatch_invert ( const Tensor self)
static

Definition at line 389 of file python_variable_methods.cpp.

References at::device_of().

Referenced by THPVariable_invert().

◆ dispatch_is_contiguous()

bool torch::autograd::dispatch_is_contiguous ( Tensor self,
MemoryFormat  memory_format 
)
inline

Definition at line 764 of file python_variable_methods.cpp.

References c10::memory_format().

Referenced by THPVariable_is_contiguous().

◆ dispatch_nonzero() [1/3]

static Tensor torch::autograd::dispatch_nonzero ( const Tensor self)
static

Definition at line 389 of file python_torch_functions.cpp.

References at::device_of().

Referenced by THPVariable_nonzero().

◆ dispatch_nonzero() [2/3]

static Tensor torch::autograd::dispatch_nonzero ( const Tensor self)
static

Definition at line 454 of file python_variable_methods.cpp.

References at::device_of().

◆ dispatch_nonzero() [3/3]

static Tensor torch::autograd::dispatch_nonzero ( const Tensor self,
Tensor  out 
)
static

Definition at line 395 of file python_torch_functions.cpp.

References at::device_of(), and out.

◆ dispatch_nonzero_numpy() [1/2]

static std::vector<Tensor> torch::autograd::dispatch_nonzero_numpy ( const Tensor self)
static

Definition at line 401 of file python_torch_functions.cpp.

References at::device_of().

Referenced by THPVariable_nonzero().

◆ dispatch_nonzero_numpy() [2/2]

static std::vector<Tensor> torch::autograd::dispatch_nonzero_numpy ( const Tensor self)
static

Definition at line 460 of file python_variable_methods.cpp.

References at::device_of().

◆ dispatch_randint() [1/8]

Tensor torch::autograd::dispatch_randint ( int64_t  high,
IntArrayRef  size,
c10::optional< Generator >  generator,
const TensorOptions options 
)
inline

◆ dispatch_randint() [2/8]

Tensor torch::autograd::dispatch_randint ( int64_t  high,
IntArrayRef  size,
c10::optional< Generator >  generator,
Tensor  result 
)
inline

Definition at line 273 of file python_torch_functions.cpp.

References at::native::randint_out(), and size.

Referenced by THPVariable_randint().

◆ dispatch_randint() [3/8]

Tensor torch::autograd::dispatch_randint ( int64_t  high,
IntArrayRef  size,
const TensorOptions options 
)
inline

◆ dispatch_randint() [4/8]

Tensor torch::autograd::dispatch_randint ( int64_t  high,
IntArrayRef  size,
Tensor  result 
)
inline

Definition at line 282 of file python_torch_functions.cpp.

References at::native::randint_out(), and size.

◆ dispatch_randint() [5/8]

Tensor torch::autograd::dispatch_randint ( int64_t  low,
int64_t  high,
IntArrayRef  size,
c10::optional< Generator >  generator,
const TensorOptions options 
)
inline

◆ dispatch_randint() [6/8]

Tensor torch::autograd::dispatch_randint ( int64_t  low,
int64_t  high,
IntArrayRef  size,
c10::optional< Generator >  generator,
Tensor  result 
)
inline

Definition at line 291 of file python_torch_functions.cpp.

References at::native::randint_out(), and size.

◆ dispatch_randint() [7/8]

Tensor torch::autograd::dispatch_randint ( int64_t  low,
int64_t  high,
IntArrayRef  size,
const TensorOptions options 
)
inline

◆ dispatch_randint() [8/8]

Tensor torch::autograd::dispatch_randint ( int64_t  low,
int64_t  high,
IntArrayRef  size,
Tensor  result 
)
inline

Definition at line 300 of file python_torch_functions.cpp.

References at::native::randint_out(), and size.

◆ dispatch_range() [1/2]

Tensor torch::autograd::dispatch_range ( Scalar  start,
Scalar  end,
Scalar  step,
const TensorOptions options 
)
inline

◆ dispatch_range() [2/2]

Tensor torch::autograd::dispatch_range ( Scalar  start,
Scalar  end,
Scalar  step,
Tensor  result 
)
inline

◆ dispatch_to() [1/4]

static Tensor torch::autograd::dispatch_to ( const Tensor self,
bool  non_blocking,
bool  copy,
c10::optional< c10::MemoryFormat optional_memory_format 
)
static

◆ dispatch_to() [2/4]

static Tensor torch::autograd::dispatch_to ( const Tensor self,
Device  device,
bool  non_blocking,
bool  copy,
c10::optional< c10::MemoryFormat optional_memory_format 
)
static

◆ dispatch_to() [3/4]

static Tensor torch::autograd::dispatch_to ( const Tensor self,
Device  device,
ScalarType  dtype,
bool  non_blocking,
bool  copy,
c10::optional< c10::MemoryFormat optional_memory_format 
)
static

Definition at line 429 of file python_variable_methods.cpp.

References c10::aten::copy(), device, and caffe2::dtype.

◆ dispatch_to() [4/4]

static Tensor torch::autograd::dispatch_to ( const Tensor self,
ScalarType  dtype,
bool  non_blocking,
bool  copy,
c10::optional< c10::MemoryFormat optional_memory_format 
)
static

Definition at line 423 of file python_variable_methods.cpp.

References c10::aten::copy(), and caffe2::dtype.

◆ dispatch_to_Bool()

static bool torch::autograd::dispatch_to_Bool ( const Tensor self)
static

Definition at line 324 of file python_variable_methods.cpp.

References at::device_of(), and at::native::numel().

Referenced by THPVariable_item().

◆ dispatch_to_CComplexDouble()

static c10::complex<double> torch::autograd::dispatch_to_CComplexDouble ( const Tensor self)
static

◆ dispatch_to_CDouble()

static double torch::autograd::dispatch_to_CDouble ( const Tensor self)
static

◆ dispatch_to_CLong()

static int64_t torch::autograd::dispatch_to_CLong ( const Tensor self)
static

◆ engine_stub()

std::atomic<EngineStub> torch::autograd::engine_stub ( Engine::get_base_engine  )

◆ ensure_tuple()

bool torch::autograd::ensure_tuple ( THPObjectPtr obj)
inline

Cast an object into a tuple, if it is not a tuple already.

Returns true if the original object was not a tuple.

Definition at line 61 of file python_function.h.

References THPPointer< T >::get(), and THPPointer< T >::release().

Referenced by torch::autograd::PyNode::apply(), process_outputs(), and THPFunction_do_backward().

◆ extract_vars()

template<typename... Args>
void torch::autograd::extract_vars ( std::vector< bool > &  is_var,
variable_list list,
Args &&...  args 
)
inline

◆ flatten_tensor_args()

template<typename... Args>
variable_list torch::autograd::flatten_tensor_args ( Args &&...  args)
inline

◆ functionToPyObject()

◆ gatherFunctions()

static void torch::autograd::gatherFunctions ( Node func,
std::vector< std::shared_ptr< Node > > &  stack 
)
static

◆ grad() [1/2]

Tuple[torch.Tensor, ...] torch.autograd.grad ( _TensorOrTensors  outputs,
_TensorOrTensors  inputs,
Optional[_TensorOrTensors]   grad_outputs = None,
Optional[bool]   retain_graph = None,
bool   create_graph = False,
bool   only_inputs = True,
bool   allow_unused = False 
)
Computes and returns the sum of gradients of outputs w.r.t. the inputs.

``grad_outputs`` should be a sequence of length matching ``output``
containing the "vector" in Jacobian-vector product, usually the pre-computed
gradients w.r.t. each of the outputs. If an output doesn't require_grad,
then the gradient can be ``None``).

If ``only_inputs`` is ``True``, the function will only return a list of gradients
w.r.t the specified inputs. If it's ``False``, then gradient w.r.t. all remaining
leaves will still be computed, and will be accumulated into their ``.grad``
attribute.

.. note::

    If you run any forward ops, create ``grad_outputs``, and/or call ``grad``
    in a user-specified CUDA stream context, see
    :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.

Args:
    outputs (sequence of Tensor): outputs of the differentiated function.
    inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
        returned (and not accumulated into ``.grad``).
    grad_outputs (sequence of Tensor): The "vector" in the Jacobian-vector product.
        Usually gradients w.r.t. each output. None values can be specified for scalar
        Tensors or ones that don't require grad. If a None value would be acceptable
        for all grad_tensors, then this argument is optional. Default: None.
    retain_graph (bool, optional): If ``False``, the graph used to compute the grad
        will be freed. Note that in nearly all cases setting this option to ``True``
        is not needed and often can be worked around in a much more efficient
        way. Defaults to the value of ``create_graph``.
    create_graph (bool, optional): If ``True``, graph of the derivative will
        be constructed, allowing to compute higher order derivative products.
        Default: ``False``.
    allow_unused (bool, optional): If ``False``, specifying inputs that were not
        used when computing outputs (and therefore their grad is always zero)
        is an error. Defaults to ``False``.

Definition at line 150 of file __init__.py.

References _make_grads(), _tensor_or_tensors_to_tuple(), grad(), torch.handle_torch_function(), torch.overrides.has_torch_function, torch::jit.isinstance(), and c10::aten.len().

Referenced by torch.autograd.functional._autograd_grad(), torch::autograd::generated::details._euclidean_dist_backward(), torch::autograd::generated::details._fused_dropout_backward(), torch.distributions.kl._kl_expfamily_expfamily(), torch::autograd::generated::details._sparse_addmm_sparse_backward(), torch::autograd::generated::details.angle_backward(), torch::autograd::AccumulateGrad.apply(), torch::autograd::CopyBackwards.apply(), torch::autograd::UndefinedGradBackward.apply(), torch::autograd::CopySlices.apply(), torch::autograd::generated::details.as_strided_backward(), torch::autograd::generated::details.atan2_backward(), torch::autograd::generated::details.binary_cross_entropy_double_backward(), torch::autograd::generated::details.binary_cross_entropy_double_backward_grad_output(), torch::autograd::generated::details.cat_tensors_backward(), torch::autograd::generated::details.cholesky_backward(), torch::autograd::generated::details.cholesky_inverse_backward(), torch::autograd::generated::details.clamp_backward(), torch::autograd::generated::details.constant_pad_nd_backward(), torch.nn.grad.conv1d_input(), torch.nn.grad.conv1d_weight(), torch.nn.grad.conv2d_input(), torch.nn.grad.conv2d_weight(), torch.nn.grad.conv3d_input(), torch.nn.grad.conv3d_weight(), torch::autograd::generated::details.copysign_tensor_self_backward(), torch::autograd::generated::details.deg2rad_backward(), torch::autograd::generated::details.det_backward(), torch::autograd::generated::details.div_tensor_other_backward(), torch::autograd::generated::details.div_tensor_self_backward(), torch::autograd::generated::details.elu_double_backward(), torch::autograd::generated::details.embedding_dense_double_backward(), torch.distributions.exp_family.ExponentialFamily.entropy(), torch::autograd::ForwardADLevel.erase(), torch::autograd::generated::details.evenly_distribute_backward(), torch::autograd::generated::details.fft_c2r_backward(), torch::autograd::generated::details.fft_r2c_backward(), torch.autograd.gradcheck.get_analytical_jacobian(), torch::autograd::generated::details.glu_double_backward(), torch::autograd::generated::details.glu_double_backward_grad_output(), grad(), torch.autograd.gradcheck.gradcheck(), torch.autograd.gradcheck.gradgradcheck(), torch::autograd::generated::details.index_backward(), torch::autograd::generated::details.infinitely_differentiable_logit_backward(), torch::autograd::ForwardADLevel.insert(), torch::autograd::generated::details.kl_div_double_backward_grad_output(), torch::autograd::generated::details.l1_loss_double_backward(), torch::autograd::generated::details.l1_loss_double_backward_grad_output(), torch::autograd::generated::details.log1p_backward(), torch::autograd::generated::details.log_sigmoid_double_backward(), torch::autograd::generated::details.log_softmax_double_backward(), torch::autograd::generated::details.logcumsumexp_backward(), torch::autograd::generated::details.logdet_backward(), torch::autograd::generated::details.logsumexp_backward(), torch::autograd::generated::details.masked_scatter_backward(), torch::autograd::generated::details.max_pool_double_backward(), torch::autograd::generated::details.mean_backward(), torch::autograd::generated::details.mm_mat1_backward(), torch::autograd::generated::details.mm_mat2_backward(), torch::autograd::generated::details.mse_loss_double_backward(), torch::autograd::generated::details.mse_loss_double_backward_grad_output(), torch::autograd::generated::details.mul_tensor_backward(), torch::autograd::generated::details.mvlgamma_backward(), torch::autograd::generated::details.nansum_backward(), torch::autograd::generated::details.norm_backward(), torch::autograd::utils.obeys_layout_contract(), torch::autograd::generated::details.permute_backwards(), torch::autograd::generated::details.pow_backward(), torch::autograd::generated::details.pow_backward_exponent(), torch::autograd::generated::details.pow_backward_self(), torch::autograd::generated::details.prod_backward(), torch::autograd::generated::details.prod_safe_zeros_backward(), torch::autograd::generated::details.rad2deg_backward(), torch::autograd::generated::details.renorm_backward(), torch::autograd::generated::details.repeat_backward(), torch::autograd::generated::details.scale_grad_by_count(), torch::autograd::generated::details.sgn_backward(), torch::autograd::generated::details.slice_backward_wrapper(), torch::autograd::generated::details.smooth_l1_loss_double_backward(), torch::autograd::generated::details.smooth_l1_loss_double_backward_grad_output(), torch::autograd::generated::details.soft_margin_loss_double_backward(), torch::autograd::generated::details.soft_margin_loss_double_backward_grad_output(), torch::autograd::generated::details.softmax_double_backward(), torch::autograd::generated::details.softplus_double_backward(), torch::autograd::generated::details.solve_backward_A(), torch::autograd::generated::details.solve_backward_self(), torch::autograd::generated::details.sparse_sparse_matmul_backward(), torch::autograd::generated::details.std_backward(), torch::autograd::generated::details.sum_backward(), torch.autograd.gradcheck.test_batched_grad(), validate_outputs(), torch::autograd::generated::details.var_backward(), torch::autograd::generated::details.var_std_mean_backward(), torch.jit._trace.verify(), and torch._vmap_internals.vmap().

◆ grad() [2/2]

TORCH_API variable_list torch::autograd::grad ( const variable_list outputs,
const variable_list inputs,
const variable_list grad_outputs = {},
c10::optional< bool >  retain_graph = c10::nullopt,
bool  create_graph = false,
bool  allow_unused = false 
)

Computes and returns the sum of gradients of outputs with respect to the inputs.

grad_outputs should be a sequence of length matching output containing the "vector" in Jacobian-vector product, usually the pre-computed gradients w.r.t. each of the outputs. If an output doesn't require_grad, then the gradient can be torch::Tensor()).

Parameters
outputsoutputs of the differentiated function.
inputsInputs w.r.t. which the gradient will be returned (and not accumulated into at::Tensor::grad).
grad_outputsThe "vector" in the Jacobian-vector product. Usually gradients w.r.t. each output. torch::Tensor() values can be specified for scalar Tensors or ones that don't require grad. If a torch::Tensor() value would be acceptable for all grad_tensors, then this argument is optional. Default: {}.
retain_graphIf false, the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to true is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.
create_graphIf true, graph of the derivative will be constructed, allowing to compute higher order derivative products. Default: false.
allow_unusedIf false, specifying inputs that were not used when computing outputs (and therefore their grad is always zero) is an error. Defaults to false.

Definition at line 143 of file autograd.cpp.

References _make_grads(), inputs, outputs, run_backward(), and c10::optional< T >::value().

◆ handle_view_on_rebase()

void torch::autograd::handle_view_on_rebase ( DifferentiableViewMeta *  diff_view_meta,
bool  indirect 
)

See NOTE [ View + Inplace detection ] for justification of the logic below

Definition at line 493 of file variable.cpp.

References c10::str(), TORCH_CHECK, TORCH_INTERNAL_ASSERT, and TORCH_WARN.

Referenced by check_inplace(), and torch::autograd::VariableHooks::grad_fn().

◆ increment_version()

void torch::autograd::increment_version ( Tensor t)
inline

Definition at line 116 of file VariableTypeUtils.h.

References torch::autograd::impl::bump_version().

◆ init_legacy_variable()

void torch::autograd::init_legacy_variable ( PyObject module)

Definition at line 132 of file python_legacy_variable.cpp.

References module, and THPLegacyVariableType.

Referenced by initModule().

◆ initFFTFunctions()

void torch::autograd::initFFTFunctions ( PyObject module)

Definition at line 50 of file python_fft_functions.cpp.

References torch::fft::fft, module, and THPFFTVariableFunctionsModule.

Referenced by initModule().

◆ initLinalgFunctions()

void torch::autograd::initLinalgFunctions ( PyObject module)

Definition at line 36 of file python_linalg_functions.cpp.

References module, and THPLinalgVariableFunctionsModule.

Referenced by initModule().

◆ initNNFunctions()

void torch::autograd::initNNFunctions ( PyObject module)

Definition at line 81 of file python_nn_functions.cpp.

References module, and THPNNVariableFunctionsModule.

Referenced by initModule().

◆ initTensorImplConversion()

◆ initTorchFunctions()

void torch::autograd::initTorchFunctions ( PyObject module)

◆ invalid_index()

static void torch::autograd::invalid_index ( PyObject obj)
inlinestatic

Definition at line 80 of file python_variable_indexing.cpp.

Referenced by applySlicing().

◆ is_anomaly_mode_enabled()

static PyObject* torch::autograd::is_anomaly_mode_enabled ( PyObject _unused,
PyObject arg 
)
static

◆ is_autocast_enabled()

static PyObject* torch::autograd::is_autocast_enabled ( PyObject _unused,
PyObject arg 
)
static

Definition at line 220 of file init.cpp.

References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::autocast::is_enabled().

◆ is_compatible_type()

static bool torch::autograd::is_compatible_type ( const at::TensorOptions expected,
const at::TensorOptions actual 
)
static

◆ is_forward_AD_enabled()

static PyObject* torch::autograd::is_forward_AD_enabled ( PyObject _unused,
PyObject arg 
)
static

Definition at line 259 of file init.cpp.

References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and isForwardADEnabled().

◆ is_grad_enabled()

static PyObject* torch::autograd::is_grad_enabled ( PyObject _unused,
PyObject arg 
)
static

Definition at line 279 of file init.cpp.

References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::GradMode::is_enabled().

◆ isForwardADEnabled()

bool TORCH_API torch::autograd::isForwardADEnabled ( )

Definition at line 82 of file forward_grad.cpp.

Referenced by as_view(), and is_forward_AD_enabled().

◆ make_saved_variable_list() [1/2]

std::vector<SavedVariable> torch::autograd::make_saved_variable_list ( const c10::List< c10::optional< at::Tensor > > &  tensors)
inline

Definition at line 294 of file VariableTypeUtils.h.

References c10::fmap(), torch::tensor(), and caffe2::tensors.

◆ make_saved_variable_list() [2/2]

std::vector<SavedVariable> torch::autograd::make_saved_variable_list ( TensorList  tensors)
inline

Definition at line 288 of file VariableTypeUtils.h.

References c10::fmap(), torch::tensor(), and caffe2::tensors.

◆ python_enter_dual_level()

static PyObject* torch::autograd::python_enter_dual_level ( PyObject _unused,
PyObject arg 
)
static

◆ python_exit_dual_level()

static PyObject* torch::autograd::python_exit_dual_level ( PyObject _unused,
PyObject args,
PyObject kwargs 
)
static

◆ python_functions()

PyMethodDef * torch::autograd::python_functions ( )

Definition at line 349 of file init.cpp.

References methods.

Referenced by initModule().

◆ rebase_history() [1/2]

void torch::autograd::rebase_history ( std::vector< Variable > &&  vars,
std::shared_ptr< Node grad_fn 
)
inline

◆ rebase_history() [2/2]

void torch::autograd::rebase_history ( Variable var,
std::shared_ptr< Node grad_fn 
)
inline

Definition at line 95 of file VariableTypeUtils.h.

References torch::autograd::impl::rebase_history(), and caffe2::var.

◆ recordSelectTrace()

static void torch::autograd::recordSelectTrace ( const Tensor index_tensor)
inlinestatic

◆ recordSliceTrace()

static void torch::autograd::recordSliceTrace ( PyObject obj)
inlinestatic

◆ registerCppFunction()

void torch::autograd::registerCppFunction ( const std::type_info &  type,
PyTypeObject *  pytype 
)

Definition at line 240 of file python_cpp_function.cpp.

References cpp_function_types, and type.

Referenced by addClass(), and torch::autograd::generated::addClass().

◆ registerFunctionHook()

PyObject * torch::autograd::registerFunctionHook ( Node fn,
PyObject hook 
)

◆ run_backward()

variable_list torch::autograd::run_backward ( const variable_list outputs,
const variable_list grad_outputs,
bool  keep_graph,
bool  create_graph,
const variable_list inputs,
bool  allow_unused,
bool  accumulate_grad 
)

◆ sequenceToVariable()

static Variable torch::autograd::sequenceToVariable ( c10::DispatchKey  dispatch_key,
PyObject seq 
)
inlinestatic

◆ set_anomaly_mode_enabled()

static PyObject* torch::autograd::set_anomaly_mode_enabled ( PyObject _unused,
PyObject arg 
)
static

◆ set_autocast_enabled()

static PyObject* torch::autograd::set_autocast_enabled ( PyObject _unused,
PyObject arg 
)
static

◆ set_default_engine_stub()

TORCH_API void torch::autograd::set_default_engine_stub ( EngineStub  stub)

Definition at line 994 of file engine.cpp.

References engine_stub().

Referenced by THPEngine_initModule().

◆ set_device()

◆ set_forward_AD_enabled()

static PyObject* torch::autograd::set_forward_AD_enabled ( PyObject _unused,
PyObject arg 
)
static

Definition at line 249 of file init.cpp.

References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and setForwardADEnabled().

◆ set_grad_enabled()

static PyObject* torch::autograd::set_grad_enabled ( PyObject _unused,
PyObject arg 
)
static

◆ set_history() [1/3]

void torch::autograd::set_history ( at::Tensor variable,
const std::shared_ptr< Node > &  grad_fn 
)
inline

◆ set_history() [2/3]

void torch::autograd::set_history ( std::vector< Variable > &&  variables,
const std::shared_ptr< Node > &  grad_fn 
)
inline

Definition at line 73 of file utils.h.

References set_history(), and variable().

◆ set_history() [3/3]

void torch::autograd::set_history ( std::vector< Variable > &  variables,
const std::shared_ptr< Node > &  grad_fn 
)
inline

Definition at line 81 of file utils.h.

References set_history(), and variable().

◆ setForwardADEnabled()

void TORCH_API torch::autograd::setForwardADEnabled ( bool  value)

Definition at line 86 of file forward_grad.cpp.

References value.

Referenced by set_forward_AD_enabled().

◆ THPCppFunction_metadata()

PyObject * torch::autograd::THPCppFunction_metadata ( THPCppFunction self,
void *  _unused 
)

Definition at line 126 of file python_cpp_function.cpp.

References matmul_dlmc_bench::metadata.

◆ THPCppFunction_name()

PyObject * torch::autograd::THPCppFunction_name ( PyObject self,
PyObject noargs 
)

◆ THPCppFunction_next_functions()

PyObject * torch::autograd::THPCppFunction_next_functions ( THPCppFunction self,
PyObject hook 
)

◆ THPCppFunction_register_hook()

PyObject * torch::autograd::THPCppFunction_register_hook ( PyObject self,
PyObject hook 
)

◆ THPCppFunction_register_hook_dict()

PyObject * torch::autograd::THPCppFunction_register_hook_dict ( PyObject self,
PyObject _var 
)

◆ THPCppFunction_requires_grad()

PyObject * torch::autograd::THPCppFunction_requires_grad ( THPCppFunction self,
void *  unused 
)

Definition at line 134 of file python_cpp_function.cpp.

◆ THPVariable__is_view()

static PyObject* torch::autograd::THPVariable__is_view ( PyObject self,
PyObject args 
)
static

◆ THPVariable__parse_to()

◆ THPVariable__sparse_coo_tensor_unsafe()

◆ THPVariable_apply_()

◆ THPVariable_arange()

◆ THPVariable_as_tensor()

◆ THPVariable_bfloat16()

static PyObject* torch::autograd::THPVariable_bfloat16 ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_bool()

static PyObject* torch::autograd::THPVariable_bool ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_byte()

static PyObject* torch::autograd::THPVariable_byte ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_char()

static PyObject* torch::autograd::THPVariable_char ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_complex_scalar()

◆ THPVariable_contiguous()

◆ THPVariable_copy_()

static PyObject* torch::autograd::THPVariable_copy_ ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_cpu()

static PyObject* torch::autograd::THPVariable_cpu ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_cuda()

◆ THPVariable_data_ptr()

◆ THPVariable_dim()

◆ THPVariable_double()

static PyObject* torch::autograd::THPVariable_double ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_element_size()

static PyObject* torch::autograd::THPVariable_element_size ( PyObject self,
PyObject args 
)
static

◆ THPVariable_float()

static PyObject* torch::autograd::THPVariable_float ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_float_scalar()

◆ THPVariable_from_numpy()

static PyObject* torch::autograd::THPVariable_from_numpy ( PyObject module,
PyObject arg 
)
static

◆ THPVariable_full()

◆ THPVariable_get_device() [1/2]

◆ THPVariable_get_device() [2/2]

static PyObject* torch::autograd::THPVariable_get_device ( PyObject self_,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_getitem()

◆ THPVariable_half()

static PyObject* torch::autograd::THPVariable_half ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_has_names()

◆ THPVariable_index_scalar()

◆ THPVariable_int()

static PyObject* torch::autograd::THPVariable_int ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_integral_scalar()

◆ THPVariable_invert()

◆ THPVariable_is_contiguous()

◆ THPVariable_item()

◆ THPVariable_length()

◆ THPVariable_long()

static PyObject* torch::autograd::THPVariable_long ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_map2_()

◆ THPVariable_map_()

◆ THPVariable_new()

◆ THPVariable_new_ones()

◆ THPVariable_new_tensor()

◆ THPVariable_nonzero() [1/2]

static PyObject* torch::autograd::THPVariable_nonzero ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_nonzero() [2/2]

◆ THPVariable_numel() [1/2]

static PyObject* torch::autograd::THPVariable_numel ( PyObject self,
PyObject args 
)
static

◆ THPVariable_numel() [2/2]

static PyObject* torch::autograd::THPVariable_numel ( PyObject self_,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_numpy()

◆ THPVariable_pynew()

◆ THPVariable_randint()

◆ THPVariable_range()

◆ THPVariable_requires_grad_()

◆ THPVariable_setitem()

◆ THPVariable_short()

static PyObject* torch::autograd::THPVariable_short ( PyObject self,
PyObject args,
PyObject kwargs 
)
static

◆ THPVariable_size()

◆ THPVariable_sparse_coo_tensor()

◆ THPVariable_storage()

static PyObject* torch::autograd::THPVariable_storage ( PyObject self,
PyObject arg 
)
static

◆ THPVariable_storage_offset()

static PyObject* torch::autograd::THPVariable_storage_offset ( PyObject self_,
PyObject args 
)
static

◆ THPVariable_storage_type()

static PyObject* torch::autograd::THPVariable_storage_type ( PyObject self,
PyObject arg 
)
static

◆ THPVariable_stride()

◆ THPVariable_tensor()

◆ THPVariable_to()

◆ THPVariable_to_type()

◆ THPVariable_tolist()

◆ THPVariable_type()

◆ THPVariable_xpu()

◆ throw_error_for_complex_autograd() [1/2]

void torch::autograd::throw_error_for_complex_autograd ( const Tensor tensor,
const char *  name 
)
inline

◆ throw_error_for_complex_autograd() [2/2]

void torch::autograd::throw_error_for_complex_autograd ( const TensorList tensorlist,
const char *  name 
)
inline

Definition at line 87 of file VariableTypeUtils.h.

References name, torch::tensor(), and throw_error_for_complex_autograd().

◆ throw_error_out_requires_grad()

void torch::autograd::throw_error_out_requires_grad ( const char *  name)
inline

Definition at line 74 of file VariableTypeUtils.h.

References AT_ERROR, and name.

◆ to_args_scalartypes()

std::vector<ScalarType> torch::autograd::to_args_scalartypes ( TensorList  tensors)
inline

Definition at line 312 of file VariableTypeUtils.h.

References caffe2::tensors.

◆ to_args_sizes()

std::vector<std::vector<int64_t> > torch::autograd::to_args_sizes ( TensorList  tensors)
inline

Definition at line 304 of file VariableTypeUtils.h.

References caffe2::tensors.

◆ to_output_type() [1/2]

template<typename T >
std::enable_if<std::is_same<T,variable_list>::value,T&>::type torch::autograd::to_output_type ( variable_list output_list)

Definition at line 197 of file custom_function.h.

◆ to_output_type() [2/2]

template<typename T >
std::enable_if<std::is_same<T,Variable>::value,T>::type torch::autograd::to_output_type ( variable_list output_list)

Definition at line 200 of file custom_function.h.

◆ treatSequenceAsTuple()

static bool torch::autograd::treatSequenceAsTuple ( PyObject index)
inlinestatic

Definition at line 211 of file python_variable_indexing.cpp.

References index, and THPVariable_Check().

Referenced by wrapTuple().

◆ validate_outputs()

◆ valueToTensor()

◆ variable()

◆ wrap_outputs()

TORCH_API variable_list torch::autograd::wrap_outputs ( const variable_list inputs,
tensor_list &&  outputs,
const function_constructor ctr 
)

◆ wrapTuple()

static THPObjectPtr torch::autograd::wrapTuple ( PyObject index)
inlinestatic

Definition at line 251 of file python_variable_indexing.cpp.

References index, torch::res, and treatSequenceAsTuple().

Referenced by THPVariable_getitem(), and THPVariable_setitem().

Variable Documentation

◆ $

torch::autograd::$
Initial value:
{py_forwards}
static PyMethodDef fft_functions[] = {
${py_method_defs}
{NULL}
}

Definition at line 41 of file python_fft_functions.cpp.

Referenced by torch::autograd::generated::initialize_autogenerated_functions().

◆ __all__

list torch.autograd.__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
private

Definition at line 24 of file __init__.py.

◆ _OptionalTensor

torch.autograd._OptionalTensor = Optional[torch.Tensor]
private

Definition at line 26 of file __init__.py.

◆ checkpoint_valid

thread_local bool torch::autograd::checkpoint_valid = true
static

Definition at line 70 of file engine.cpp.

Referenced by call_function(), and torch::autograd::Engine::is_checkpoint_valid().

◆ cpp_function_types

std::unordered_map<std::type_index, THPObjectPtr> torch::autograd::cpp_function_types
static

Definition at line 191 of file python_cpp_function.cpp.

Referenced by functionToPyObject(), and registerCppFunction().

◆ CPU_DEVICE

◆ current_depth

thread_local int torch::autograd::current_depth = 0
static

Definition at line 73 of file engine.cpp.

Referenced by torch::autograd::Engine::execute_with_graph_task().

◆ current_evaluating_node

thread_local std::shared_ptr<Node> torch::autograd::current_evaluating_node = nullptr
static

◆ current_graph_task

thread_local std::shared_ptr<GraphTask> torch::autograd::current_graph_task = nullptr
static

◆ default_methods

struct PyMethodDef torch::autograd::default_methods[]
static
Initial value:
= {
{nullptr}
}
#define THP_FUNCTION_DEFAULT_METHODS

Definition at line 162 of file python_cpp_function.cpp.

Referenced by _initFunctionPyTypeObject().

◆ default_properties

struct PyGetSetDef torch::autograd::default_properties[]
static
Initial value:
= {
{nullptr}
}
#define THP_FUNCTION_DEFAULT_PROPERTIES

Definition at line 167 of file python_cpp_function.cpp.

Referenced by _initFunctionPyTypeObject().

◆ ERR_BACKWARD_TWICE

TORCH_API const char * torch::autograd::ERR_BACKWARD_TWICE
Initial value:
=
"Trying to backward through the graph a second time, but the saved intermediate "
"results have already been freed. Specify retain_graph=True when calling "
".backward() or autograd.grad() the first time."

Definition at line 122 of file saved_variable.cpp.

Referenced by torch::autograd::CopySlices::apply(), torch::autograd::AutogradContext::get_saved_variables(), torch::autograd::SavedVariable::unpack(), and unpack_saved_variables().

◆ local_ready_queue

thread_local std::shared_ptr<ReadyQueue> torch::autograd::local_ready_queue = nullptr
static

◆ MAX_DEPTH

constexpr int torch::autograd::MAX_DEPTH = 60
staticconstexpr

Definition at line 40 of file engine.h.

◆ methods

PyMethodDef torch::autograd::methods[]
static
Initial value:
= {
{"_set_grad_enabled", set_grad_enabled, METH_O, nullptr},
{"is_grad_enabled", is_grad_enabled, METH_NOARGS, nullptr},
{"_set_forward_AD_enabled", set_forward_AD_enabled, METH_O, nullptr},
{"_is_forward_AD_enabled", is_forward_AD_enabled, METH_NOARGS, nullptr},
{"set_autocast_enabled", set_autocast_enabled, METH_O, nullptr},
{"is_autocast_enabled", is_autocast_enabled, METH_NOARGS, nullptr},
{"clear_autocast_cache", clear_autocast_cache, METH_NOARGS, nullptr},
{"autocast_increment_nesting", autocast_increment_nesting, METH_NOARGS, nullptr},
{"autocast_decrement_nesting", autocast_decrement_nesting, METH_NOARGS, nullptr},
{"set_anomaly_enabled", set_anomaly_mode_enabled, METH_O, nullptr},
{"is_anomaly_enabled", is_anomaly_mode_enabled, METH_NOARGS, nullptr},
{"_enter_dual_level", python_enter_dual_level, METH_NOARGS, nullptr},
{"_exit_dual_level", castPyCFunctionWithKeywords(python_exit_dual_level), METH_VARARGS | METH_KEYWORDS, nullptr},
{nullptr, nullptr, 0, nullptr}
}
static PyObject * set_forward_AD_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:249
static PyObject * autocast_increment_nesting(PyObject *_unused, PyObject *arg)
Definition: init.cpp:237
static PyObject * is_forward_AD_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:259
static PyObject * is_autocast_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:220
static PyObject * autocast_decrement_nesting(PyObject *_unused, PyObject *arg)
Definition: init.cpp:243
static PyObject * is_grad_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:279
static PyObject * clear_autocast_cache(PyObject *_unused, PyObject *arg)
Definition: init.cpp:230
static PyObject * set_grad_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:269
static PyObject * is_anomaly_mode_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:299
static PyObject * python_enter_dual_level(PyObject *_unused, PyObject *arg)
Definition: init.cpp:309
static PyObject * set_anomaly_mode_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:289
static PyObject * python_exit_dual_level(PyObject *_unused, PyObject *args, PyObject *kwargs)
Definition: init.cpp:317
static PyObject * set_autocast_enabled(PyObject *_unused, PyObject *arg)
Definition: init.cpp:210
PyCFunction castPyCFunctionWithKeywords(PyCFunctionWithKeywords func)

Definition at line 332 of file init.cpp.

Referenced by python_functions().

◆ NO_DEVICE

constexpr int torch::autograd::NO_DEVICE = -2
staticconstexpr

◆ THPFFTVariableFunctionsModule

PyObject* torch::autograd::THPFFTVariableFunctionsModule = NULL
static

Definition at line 48 of file python_fft_functions.cpp.

Referenced by initFFTFunctions().

◆ THPLegacyVariableType

PyTypeObject torch::autograd::THPLegacyVariableType

Definition at line 91 of file python_legacy_variable.cpp.

Referenced by init_legacy_variable().

◆ THPLinalgVariableFunctionsModule

PyObject* torch::autograd::THPLinalgVariableFunctionsModule = NULL
static

Definition at line 34 of file python_linalg_functions.cpp.

Referenced by initLinalgFunctions().

◆ THPNNVariableFunctionsModule

PyObject* torch::autograd::THPNNVariableFunctionsModule = NULL
static

Definition at line 25 of file python_nn_functions.cpp.

Referenced by initNNFunctions(), and THPVariable__parse_to().

◆ THPVariableFunctions

PyTypeObject torch::autograd::THPVariableFunctions
static

Definition at line 498 of file python_torch_functions.cpp.

Referenced by initTorchFunctions().

◆ THPVariableFunctionsModule

PyObject* torch::autograd::THPVariableFunctionsModule = NULL
static

◆ total_depth

thread_local int torch::autograd::total_depth = 0
static

◆ variable_methods

PyMethodDef torch::autograd::variable_methods[]
extern

Referenced by THPVariable_initModule().

◆ variableHooks

at::impl::VariableHooksRegisterer registerVariableHooks & torch::autograd::variableHooks

Definition at line 340 of file variable.cpp.

◆ worker_device

thread_local int torch::autograd::worker_device = NO_DEVICE
static