pytorch
1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
![]() ![]() |
Namespaces | |
namespace | _functions |
namespace | anomaly_mode |
namespace | detail |
namespace | forward_ad |
namespace | function |
namespace | functional |
namespace | generated |
namespace | grad_mode |
namespace | gradcheck |
namespace | impl |
namespace | profiler |
namespace | python |
namespace | utils |
namespace | variable |
namespace | VariableType |
Classes | |
struct | AccumulateGrad |
struct | AnomalyMetadata |
struct | AnomalyMode |
struct | AutogradContext |
Context to save information during forward that can be accessed in backward in custom autograd operations (see torch::autograd::Function for details). More... | |
struct | ComputeRequiresGrad |
struct | CopyBackwards |
struct | CopySlices |
struct | CppFunctionPreHook |
struct | CppNode |
struct | DefaultFunctionType |
struct | DelayedError |
class | DetectAnomalyGuard |
A RAII guard that enables Anomaly Detection Mode. More... | |
struct | Edge |
Represents a particular input of a function. More... | |
struct | Engine |
struct | Error |
struct | ExtractVariables |
struct | Flatten |
struct | ForwardADLevel |
struct | ForwardGrad |
struct | Function |
To use custom autograd operations, implement a Function subclass with static forward and backward functions: More... | |
struct | FunctionPostHook |
struct | FunctionPreHook |
struct | Gather |
struct | GraphRoot |
struct | GraphTask |
class | GraphTaskGuard |
struct | Identity |
struct | InputBuffer |
struct | InputMetadata |
Records type, shape, and device of tensor and, where applicable, the stream the correspondingoperation took place on. More... | |
struct | Node |
class | NodeGuard |
struct | NodeTask |
struct | NotImplemented |
struct | PyAnomalyMetadata |
struct | PyFunctionPostHook |
struct | PyFunctionPreHook |
struct | PyNode |
struct | ReadyQueue |
class | SavedVariable |
A snapshot of a variable at a certain version. More... | |
struct | Scatter |
struct | symbolic_unconvertible |
struct | SymbolicContext |
struct | THPCppFunction |
struct | TraceableFunction |
See Node::is_traceable() for definition. More... | |
struct | UndefinedGrad |
struct | UndefinedGradBackward |
struct | VariableHooks |
struct | VariableInfo |
Typedefs | |
using | Variable = at::Tensor |
Variable is exactly the same as Tensor (i.e. More... | |
using | ConstQuantizerPtr = const c10::intrusive_ptr< Quantizer > & |
using | hooks_list = std::vector< std::function< Variable(const Variable &)> > |
template<typename X , typename... Args> | |
using | forward_t = decltype(X::forward(nullptr, std::declval< Args >()...)) |
using | EngineStub = Engine &(*)() |
using | tensor_list = std::vector< at::Tensor > |
using | variable_list = std::vector< Variable > |
using | edge_list = std::vector< Edge > |
using | saved_variable_list = std::vector< SavedVariable > |
using | IndexRange = std::pair< size_t, size_t > |
using | function_constructor = std::function< std::shared_ptr< Node >(edge_list &&)> |
using | GradMode = at::GradMode |
using | AutoGradMode = at::AutoGradMode |
Functions | |
void | initFFTFunctions (PyObject *module) |
void | initLinalgFunctions (PyObject *module) |
static PyObject * | THPVariable__parse_to (PyObject *module, PyObject *args, PyObject *kwargs) |
void | initNNFunctions (PyObject *module) |
Tensor | dispatch_arange (Scalar end, Tensor result) |
Tensor | dispatch_arange (Scalar end, const TensorOptions &options) |
Tensor | dispatch_arange (Scalar start, Scalar end, Scalar step, Tensor result) |
Tensor | dispatch_arange (Scalar start, Scalar end, Scalar step, const TensorOptions &options) |
static PyObject * | THPVariable_arange (PyObject *self, PyObject *args, PyObject *kwargs) |
Tensor | dispatch_range (Scalar start, Scalar end, Scalar step, Tensor result) |
Tensor | dispatch_range (Scalar start, Scalar end, Scalar step, const TensorOptions &options) |
static PyObject * | THPVariable_range (PyObject *self, PyObject *args, PyObject *kwargs) |
Tensor | dispatch_full (IntArrayRef size, Scalar fill_val, const TensorOptions &options) |
Tensor | dispatch_full (IntArrayRef size, Scalar fill_val, c10::optional< DimnameList > names, const TensorOptions &options) |
Tensor | dispatch_full (IntArrayRef size, Scalar fill_val, Tensor result) |
static PyObject * | THPVariable_full (PyObject *self, PyObject *args, PyObject *kwargs) |
Tensor | dispatch_randint (int64_t high, IntArrayRef size, c10::optional< Generator > generator, Tensor result) |
Tensor | dispatch_randint (int64_t high, IntArrayRef size, c10::optional< Generator > generator, const TensorOptions &options) |
Tensor | dispatch_randint (int64_t high, IntArrayRef size, Tensor result) |
Tensor | dispatch_randint (int64_t high, IntArrayRef size, const TensorOptions &options) |
Tensor | dispatch_randint (int64_t low, int64_t high, IntArrayRef size, c10::optional< Generator > generator, Tensor result) |
Tensor | dispatch_randint (int64_t low, int64_t high, IntArrayRef size, c10::optional< Generator > generator, const TensorOptions &options) |
Tensor | dispatch_randint (int64_t low, int64_t high, IntArrayRef size, Tensor result) |
Tensor | dispatch_randint (int64_t low, int64_t high, IntArrayRef size, const TensorOptions &options) |
static PyObject * | THPVariable_randint (PyObject *self_, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_as_tensor (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_from_numpy (PyObject *module, PyObject *arg) |
static Tensor | dispatch_nonzero (const Tensor &self) |
static Tensor | dispatch_nonzero (const Tensor &self, Tensor out) |
static std::vector< Tensor > | dispatch_nonzero_numpy (const Tensor &self) |
static PyObject * | THPVariable_nonzero (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_sparse_coo_tensor (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable__sparse_coo_tensor_unsafe (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_tensor (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_get_device (PyObject *self_, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_numel (PyObject *self_, PyObject *args, PyObject *kwargs) |
void | initTorchFunctions (PyObject *module) |
static PyObject * | THPVariable__is_view (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_apply_ (PyObject *self, PyObject *arg) |
static PyObject * | THPVariable_size (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_stride (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_get_device (PyObject *self_, PyObject *args) |
static PyObject * | THPVariable_has_names (PyObject *self_, PyObject *args) |
static PyObject * | THPVariable_data_ptr (PyObject *self_, PyObject *args) |
static PyObject * | THPVariable_storage_offset (PyObject *self_, PyObject *args) |
static PyObject * | THPVariable_dim (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_numel (PyObject *self, PyObject *args) |
static Tensor | dispatch_contiguous (const Tensor &self, at::MemoryFormat memory_format) |
static PyObject * | THPVariable_contiguous (PyObject *self, PyObject *args, PyObject *kwargs) |
static Tensor | dispatch_copy_ (Tensor &self, const Tensor &other, bool non_blocking) |
static PyObject * | THPVariable_copy_ (PyObject *self, PyObject *args, PyObject *kwargs) |
static double | dispatch_to_CDouble (const Tensor &self) |
static c10::complex< double > | dispatch_to_CComplexDouble (const Tensor &self) |
static int64_t | dispatch_to_CLong (const Tensor &self) |
static bool | dispatch_to_Bool (const Tensor &self) |
static PyObject * | THPVariable_float_scalar (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_complex_scalar (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_integral_scalar (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_index_scalar (PyObject *self, PyObject *args) |
static Tensor | dispatch_invert (const Tensor &self) |
static PyObject * | THPVariable_invert (PyObject *self, PyObject *args) |
static Tensor | dispatch_to (const Tensor &self, Device device, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format) |
static Tensor | dispatch_to (const Tensor &self, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format) |
static Tensor | dispatch_to (const Tensor &self, ScalarType dtype, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format) |
static Tensor | dispatch_to (const Tensor &self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional< c10::MemoryFormat > optional_memory_format) |
static PyObject * | THPVariable_cpu (PyObject *self, PyObject *args, PyObject *kwargs) |
static Tensor | dispatch_nonzero (const Tensor &self) |
static std::vector< Tensor > | dispatch_nonzero_numpy (const Tensor &self) |
static PyObject * | THPVariable_nonzero (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_cuda (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_xpu (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_to_type (PyObject *self, ScalarType scalarType, c10::optional< c10::MemoryFormat > optional_memory_format) |
static PyObject * | THPVariable_byte (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_char (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_double (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_float (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_half (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_int (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_long (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_short (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_bool (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_bfloat16 (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_element_size (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_numpy (PyObject *self, PyObject *arg) |
static PyObject * | THPVariable_requires_grad_ (PyObject *self, PyObject *args, PyObject *kwargs) |
bool | dispatch_is_contiguous (Tensor &self, MemoryFormat memory_format) |
static PyObject * | THPVariable_is_contiguous (PyObject *self_, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_item (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_map_ (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_map2_ (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_new (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_new_ones (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_new_tensor (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_storage (PyObject *self, PyObject *arg) |
static PyObject * | THPVariable_storage_type (PyObject *self, PyObject *arg) |
static PyObject * | THPVariable_to (PyObject *self, PyObject *args, PyObject *kwargs) |
static PyObject * | THPVariable_tolist (PyObject *self, PyObject *args) |
static PyObject * | THPVariable_type (PyObject *self, PyObject *args, PyObject *kwargs) |
Tuple[_OptionalTensor,...] | _make_grads (Sequence[torch.Tensor] outputs, Sequence[_OptionalTensor] grads) |
Tuple[_OptionalTensor,...] | _tensor_or_tensors_to_tuple (Optional[_TensorOrTensors] tensors, int length) |
None | backward (_TensorOrTensors tensors, Optional[_TensorOrTensors] grad_tensors=None, Optional[bool] retain_graph=None, bool create_graph=False, Optional[_TensorOrTensors] grad_variables=None, Optional[Sequence[torch.Tensor]] inputs=None) |
Tuple[torch.Tensor,...] | grad (_TensorOrTensors outputs, _TensorOrTensors inputs, Optional[_TensorOrTensors] grad_outputs=None, Optional[bool] retain_graph=None, bool create_graph=False, bool only_inputs=True, bool allow_unused=False) |
def | _is_checkpoint_valid () |
def | variable (*args, **kwargs) |
variable_list | _make_grads (const variable_list &outputs, const variable_list &grad_outputs) |
variable_list | run_backward (const variable_list &outputs, const variable_list &grad_outputs, bool keep_graph, bool create_graph, const variable_list &inputs, bool allow_unused, bool accumulate_grad) |
void | backward (const variable_list &tensors, const variable_list &grad_tensors={}, c10::optional< bool > retain_graph=c10::nullopt, bool create_graph=false, const variable_list &inputs={}) |
Computes the sum of gradients of given tensors with respect to graph leaves. More... | |
variable_list | grad (const variable_list &outputs, const variable_list &inputs, const variable_list &grad_outputs={}, c10::optional< bool > retain_graph=c10::nullopt, bool create_graph=false, bool allow_unused=false) |
Computes and returns the sum of gradients of outputs with respect to the inputs. More... | |
variable_list | _wrap_outputs (const variable_list &input_vars, const std::unordered_set< at::TensorImpl * > &non_differentiable, const std::unordered_set< at::TensorImpl * > &dirty_inputs, const at::ArrayRef< Variable > raw_outputs, const std::shared_ptr< Node > &cdata) |
void | check_variable_result (const Variable &original, const Variable &result, std::string hook_name) |
template<typename... Args> | |
void | extract_vars (std::vector< bool > &is_var, variable_list &list, Args &&... args) |
template<typename T > | |
std::enable_if< std::is_same< T, variable_list >::value, T & >::type | to_output_type (variable_list &output_list) |
template<typename T > | |
std::enable_if< std::is_same< T, Variable >::value, T >::type | to_output_type (variable_list &output_list) |
static variable_list | call_pre_hooks (Node &fn, variable_list inputs) |
static variable_list | call_post_hooks (Node &fn, variable_list outputs, const variable_list &inputs) |
static bool | is_compatible_type (const at::TensorOptions &expected, const at::TensorOptions &actual) |
void | set_device (int device) |
void | validate_outputs (const edge_list &edges, variable_list &grads, const std::function< std::string(const std::string &)> &format_error) |
static variable_list | call_function (std::shared_ptr< GraphTask > &graph_task, Node *func, InputBuffer &inputBuffer) |
std::atomic< EngineStub > | engine_stub (Engine::get_base_engine) |
void | set_default_engine_stub (EngineStub stub) |
bool | isForwardADEnabled () |
void | setForwardADEnabled (bool value) |
static void | gatherFunctions (Node *func, std::vector< std::shared_ptr< Node > > &stack) |
void | deleteNode (Node *function) |
void | create_gradient_edge (Variable &variable, std::shared_ptr< Node > function) |
Create an Edge between the given variable and the function , which is assumed to be the gradient function of this variable (i.e. More... | |
bool | any_variable_requires_grad (const variable_list &variables) |
Return true if any of the variables in the list require a gradient. More... | |
template<typename... Variables> | |
edge_list | collect_next_edges (Variables &&... variables) |
Return the next edges of all the given variables, or tuples of variables. More... | |
variable_list | wrap_outputs (const variable_list &inputs, tensor_list &&outputs, const function_constructor &ctr) |
Wraps the tensor outputs in variables and creates the grad_fn and sets the grad_fn if necessary. More... | |
void | check_input_variables (const char *name, const variable_list &inputs, int args, int required_args=-1, bool allow_undefined=false) |
Checks that inputs contains exactly args items and that the first required_args items are not nullptr. More... | |
template<typename... Args> | |
bool | compute_requires_grad (Args &&... args) |
void | set_history (at::Tensor &variable, const std::shared_ptr< Node > &grad_fn) |
void | set_history (std::vector< Variable > &&variables, const std::shared_ptr< Node > &grad_fn) |
void | set_history (std::vector< Variable > &variables, const std::shared_ptr< Node > &grad_fn) |
static PyObject * | set_autocast_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | is_autocast_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | clear_autocast_cache (PyObject *_unused, PyObject *arg) |
static PyObject * | autocast_increment_nesting (PyObject *_unused, PyObject *arg) |
static PyObject * | autocast_decrement_nesting (PyObject *_unused, PyObject *arg) |
static PyObject * | set_forward_AD_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | is_forward_AD_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | set_grad_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | is_grad_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | set_anomaly_mode_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | is_anomaly_mode_enabled (PyObject *_unused, PyObject *arg) |
static PyObject * | python_enter_dual_level (PyObject *_unused, PyObject *arg) |
static PyObject * | python_exit_dual_level (PyObject *_unused, PyObject *args, PyObject *kwargs) |
PyMethodDef * | python_functions () |
static void | accumulate (std::vector< Variable > &buffer, const size_t pos, Variable &&var) |
void | _print_stack (PyObject *stack, const std::string ¤t_node_name, bool is_parent) |
PyObject * | THPCppFunction_next_functions (THPCppFunction *self, PyObject *hook) |
PyObject * | THPCppFunction_metadata (THPCppFunction *self, void *_unused) |
PyObject * | THPCppFunction_requires_grad (THPCppFunction *self, void *unused) |
PyObject * | THPCppFunction_register_hook_dict (PyObject *self, PyObject *_var) |
PyObject * | THPCppFunction_register_hook (PyObject *self, PyObject *hook) |
PyObject * | THPCppFunction_name (PyObject *self, PyObject *noargs) |
PyTypeObject * | _initFunctionPyTypeObject (PyTypeObject &type, const char *name, PyGetSetDef *function_properties, PyMethodDef *function_methods) |
PyObject * | functionToPyObject (const std::shared_ptr< Node > &cdata) |
void | registerCppFunction (const std::type_info &type, PyTypeObject *pytype) |
PyObject * | registerFunctionHook (Node &fn, PyObject *hook) |
template<typename Ctor > | |
PyObject * | CppFunction_pynew (PyTypeObject *type, PyObject *args, PyObject *kwds) |
template<typename Ctor > | |
PyTypeObject * | createForwardFunctionPyTypeObject (PyTypeObject &type, const char *name, PyGetSetDef *function_properties=nullptr, PyMethodDef *function_methods=nullptr) |
bool | ensure_tuple (THPObjectPtr &obj) |
Cast an object into a tuple, if it is not a tuple already. More... | |
static PyObject * | THPVariable_pynew (PyTypeObject *type, PyObject *args, PyObject *kwds) |
void | init_legacy_variable (PyObject *module) |
void | initTensorImplConversion (PyObject *module) |
Py_ssize_t | THPVariable_length (PyObject *self) |
static int64_t | count_specified_dimensions (PyObject *index) |
static void | invalid_index (PyObject *obj) |
static Variable | sequenceToVariable (c10::DispatchKey dispatch_key, PyObject *seq) |
static Variable | valueToTensor (c10::TensorOptions options, PyObject *value, const at::Device &device) |
static void | checkUnpackSlice (PyObject *index, Py_ssize_t *start_ptr, Py_ssize_t *stop_ptr, Py_ssize_t *step_ptr) |
static void | recordSliceTrace (PyObject *obj) |
static void | recordSelectTrace (const Tensor &index_tensor) |
static Variable | applySlicing (const Variable &self, PyObject *index, variable_list &outIndices, bool is_tracing, const at::Device &self_device, const IntArrayRef &self_sizes, int64_t specified_dims) |
static bool | treatSequenceAsTuple (PyObject *index) |
static THPObjectPtr | wrapTuple (PyObject *index) |
PyObject * | THPVariable_getitem (PyObject *self, PyObject *index) |
int | THPVariable_setitem (PyObject *self, PyObject *index, PyObject *py_value) |
void | handle_view_on_rebase (DifferentiableViewMeta *diff_view_meta, bool indirect) |
void | check_inplace (const Tensor &tensor, bool requires_grad) |
void | check_inplace (const TensorList tensors, bool requires_grad) |
void | throw_error_out_requires_grad (const char *name) |
void | throw_error_for_complex_autograd (const Tensor &tensor, const char *name) |
void | throw_error_for_complex_autograd (const TensorList &tensorlist, const char *name) |
void | rebase_history (Variable &var, std::shared_ptr< Node > grad_fn) |
void | rebase_history (std::vector< Variable > &&vars, std::shared_ptr< Node > grad_fn) |
void | increment_version (Tensor &t) |
template<typename... Args> | |
variable_list | flatten_tensor_args (Args &&... args) |
Tensor | as_view (const Tensor &base, const Tensor &tensor, bool is_bw_differentiable, bool is_fw_differentiable, std::function< Tensor(const Tensor &)> view_func=nullptr, CreationMeta creation_meta=CreationMeta::DEFAULT, bool allow_tensor_metadata_change=true) |
std::vector< Tensor > | as_view (const Tensor &base, std::vector< Tensor > &tensors, bool is_bw_differentiable, bool is_fw_differentiable, CreationMeta creation_meta=CreationMeta::DEFAULT) |
void | check_no_requires_grad (const Tensor &tensor, const char *name) |
void | check_no_requires_grad (const c10::optional< Tensor > &tensor, const char *name) |
void | check_no_requires_grad (TensorList tensors, const char *name) |
void | check_no_requires_grad (const c10::List< c10::optional< Tensor > > &tensors, const char *name) |
std::vector< SavedVariable > | make_saved_variable_list (TensorList tensors) |
std::vector< SavedVariable > | make_saved_variable_list (const c10::List< c10::optional< at::Tensor > > &tensors) |
std::vector< std::vector< int64_t > > | to_args_sizes (TensorList tensors) |
std::vector< ScalarType > | to_args_scalartypes (TensorList tensors) |
Variables | |
$ | |
static PyObject * | THPFFTVariableFunctionsModule = NULL |
static PyObject * | THPLinalgVariableFunctionsModule = NULL |
static PyObject * | THPNNVariableFunctionsModule = NULL |
static PyObject * | THPVariableFunctionsModule = NULL |
static PyTypeObject | THPVariableFunctions |
list | __all__ = ['Variable', 'Function', 'backward', 'grad_mode'] |
_OptionalTensor = Optional[torch.Tensor] | |
static thread_local int | worker_device = NO_DEVICE |
static thread_local bool | checkpoint_valid = true |
static thread_local int | current_depth = 0 |
static thread_local int | total_depth = 0 |
static thread_local std::shared_ptr< GraphTask > | current_graph_task = nullptr |
static thread_local std::shared_ptr< ReadyQueue > | local_ready_queue = nullptr |
static constexpr int | NO_DEVICE = -2 |
static constexpr int | CPU_DEVICE = -1 |
static constexpr int | MAX_DEPTH = 60 |
static thread_local std::shared_ptr< Node > | current_evaluating_node = nullptr |
static PyMethodDef | methods [] |
static struct PyMethodDef | default_methods [] |
static struct PyGetSetDef | default_properties [] |
static std::unordered_map< std::type_index, THPObjectPtr > | cpp_function_types |
PyTypeObject | THPLegacyVariableType |
PyMethodDef | variable_methods [] |
const char * | ERR_BACKWARD_TWICE |
VariableHooks | variableHooks |
``torch.autograd`` provides classes and functions implementing automatic differentiation of arbitrary scalar valued functions. It requires minimal changes to the existing code - you only need to declare :class:`Tensor` s for which gradients should be computed with the ``requires_grad=True`` keyword. As of now, we only support autograd for floating point :class:`Tensor` types ( half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble).
using torch::autograd::AutoGradMode = typedef at::AutoGradMode |
Definition at line 9 of file grad_mode.h.
using torch::autograd::ConstQuantizerPtr = typedef const c10::intrusive_ptr<Quantizer>& |
Definition at line 42 of file VariableType.h.
using torch::autograd::edge_list = typedef std::vector<Edge> |
Definition at line 33 of file function.h.
using torch::autograd::EngineStub = typedef Engine& (*)() |
using torch::autograd::forward_t = typedef decltype(X::forward(nullptr, std::declval<Args>()...)) |
Definition at line 23 of file custom_function.h.
using torch::autograd::function_constructor = typedef std::function<std::shared_ptr<Node>(edge_list&&)> |
using torch::autograd::GradMode = typedef at::GradMode |
Definition at line 8 of file grad_mode.h.
using torch::autograd::hooks_list = typedef std::vector<std::function<Variable(const Variable&)> > |
Definition at line 8 of file cpp_hook.h.
using torch::autograd::IndexRange = typedef std::pair<size_t, size_t> |
Definition at line 35 of file function.h.
using torch::autograd::saved_variable_list = typedef std::vector<SavedVariable> |
Definition at line 34 of file function.h.
using torch::autograd::tensor_list = typedef std::vector<at::Tensor> |
Definition at line 31 of file function.h.
typedef at::Tensor torch::autograd::Variable |
Variable
is exactly the same as Tensor
(i.e.
we have using Variable = at::Tensor
). This means you can perform all the usual mathematical and other operations you can perform on Tensor
s also on Variable
s.
The only reason we are keeping the Variable
class is backward compatibility with external user's legacy C++ frontend code. Our intention is to eliminate the Variable
class in the near future.
Definition at line 23 of file VariableType.h.
typedef std::vector< Variable > torch::autograd::variable_list |
Definition at line 32 of file function.h.
PyTypeObject * torch::autograd::_initFunctionPyTypeObject | ( | PyTypeObject & | type, |
const char * | name, | ||
PyGetSetDef * | function_properties, | ||
PyMethodDef * | function_methods | ||
) |
Definition at line 172 of file python_cpp_function.cpp.
References default_methods, default_properties, name, and type.
Referenced by torch::autograd::generated::addClass(), createForwardFunctionPyTypeObject(), and torch::autograd::DefaultFunctionType::DefaultFunctionType().
|
private |
Definition at line 242 of file __init__.py.
References _is_checkpoint_valid().
Referenced by _is_checkpoint_valid(), and torch.utils.checkpoint.CheckpointFunction.backward().
variable_list torch::autograd::_make_grads | ( | const variable_list & | outputs, |
const variable_list & | grad_outputs | ||
) |
Definition at line 17 of file autograd.cpp.
References LEGACY_CONTIGUOUS_MEMORY_FORMAT, at::native::ones_like(), at::native::metal::mpscnn::output, outputs, and TORCH_CHECK.
|
private |
Definition at line 28 of file __init__.py.
References _make_grads(), torch::jit.isinstance(), compare-fastrnn-results.str, type, and c10::prim.zip().
Referenced by _make_grads(), backward(), and grad().
void torch::autograd::_print_stack | ( | PyObject * | stack, |
const std::string & | current_node_name, | ||
bool | is_parent | ||
) |
Definition at line 81 of file python_anomaly_mode.cpp.
References THPPointer< T >::get(), torch.distributions.constraints::stack, THPUtils_unpackString(), and TORCH_WARN.
Referenced by torch::autograd::PyAnomalyMetadata::print_stack().
|
private |
Definition at line 60 of file __init__.py.
References _tensor_or_tensors_to_tuple(), and torch::jit.isinstance().
Referenced by _tensor_or_tensors_to_tuple(), backward(), and grad().
TORCH_API variable_list torch::autograd::_wrap_outputs | ( | const variable_list & | input_vars, |
const std::unordered_set< at::TensorImpl * > & | non_differentiable, | ||
const std::unordered_set< at::TensorImpl * > & | dirty_inputs, | ||
const at::ArrayRef< Variable > | raw_outputs, | ||
const std::shared_ptr< Node > & | cdata | ||
) |
Definition at line 20 of file custom_function.cpp.
References AT_ASSERT, torch::autograd::impl::clear_hooks(), torch::autograd::impl::get_autograd_meta(), inputs, num_outputs, at::native::output_nr(), outputs, torch::autograd::impl::rebase_history(), torch::autograd::impl::set_gradient_edge(), set_history(), c10::ArrayRef< T >::size(), TORCH_CHECK, TORCH_WARN, torch::autograd::impl::try_get_grad_accumulator(), caffe2::var, and torch::autograd::AccumulateGrad::variable.
Referenced by torch::autograd::Function< T >::apply().
|
static |
Definition at line 14 of file input_buffer.cpp.
References TORCH_INTERNAL_ASSERT, and caffe2::var.
Referenced by at::native::_index_put_impl_(), at::native::legacy::cuda::_th_put_(), at::native::legacy::cpu::_th_put_(), torch::autograd::InputBuffer::add(), caffe2::DivFunctor< Context >::Backward(), caffe2::MulFunctor< Context >::Backward(), caffe2::MeanReducer< Context >::Backward(), caffe2::CanonicalDims(), at::native::cat_sparse(), torch::autograd::generated::details::cat_tensors_backward(), c10d::checkSplitSizes(), caffe2::MomentsGradientOp< T, Context >::Compute(), caffe2::math::CopyMatrix< std::uint16_t, CPUContext >(), caffe2::BatchMatMulFP16FakeOp< Context, Engine, USE_ACC_FP16, USE_TMP_ACCUMULATOR, USE_CUSTOM_ACC32 >::DoRunWithType(), caffe2::SpatialBNFakeLoweredFp16Op::DoRunWithType(), caffe2::SpatialBNFakeFp16Op::DoRunWithType(), caffe2::BatchMatMulOp< Context, Engine >::DoRunWithType(), caffe2::RemovePaddingOp< Context >::DoRunWithType(), caffe2::SpatialBNOp< Context >::DoRunWithType(), caffe2::SpatialBNGradientOp< Context >::DoRunWithType(), caffe2::UnpackSegmentsOp< Context >::DoRunWithType2(), c10::TensorImpl::Extend(), at::native::flatten_out(), caffe2::CoshGradientFunctor< Context >::Forward(), caffe2::SinhGradientFunctor< Context >::Forward(), caffe2::CubeGradientFunctor< Context >::Forward(), caffe2::GeluGradientFunctor< Context >::Forward(), caffe2::CbrtGradientFunctor< Context >::Forward(), caffe2::RsqrtGradientFunctor< Context >::Forward(), caffe2::AbsGradientFunctor< Context >::Forward(), caffe2::AcosGradientFunctor< Context >::Forward(), caffe2::AsinGradientFunctor< Context >::Forward(), caffe2::AtanGradientFunctor< Context >::Forward(), caffe2::CosGradientFunctor< Context >::Forward(), caffe2::ErfGradientFunctor< Context >::Forward(), caffe2::SinGradientFunctor< Context >::Forward(), caffe2::SoftsignGradientFunctor< Context >::Forward(), caffe2::TanGradientFunctor< Context >::Forward(), caffe2::TanhGradientFunctor< Context >::Forward(), caffe2::EluGradientFunctor< Context >::Forward(), caffe2::HardSigmoidGradientFunctor< Context >::Forward(), caffe2::ReciprocalGradientFunctor< Context >::Forward(), caffe2::ReluNGradientFunctor< Context >::Forward(), caffe2::ReluGradientFunctor< Context >::Forward(), caffe2::SigmoidGradientFunctor< Context >::Forward(), caffe2::emulator::StdOutputFormatter::get_mean(), caffe2::ConvPoolOpBase< Context >::GetDimsSize(), caffe2::HSoftmaxOpBase< T, Context >::getIntermediateOutputSize(), caffe2::DiagonalFillOp< Context >::GetStepSize(), caffe2::math::Im2ColNdNCHW(), at::native::index_put(), at::native::index_put_(), caffe2::math::utils::IsBothEndsBroadcastBinaryOp(), caffe2::math::utils::IsColwiseBroadcastBinaryOp(), caffe2::ConvDNNLowPOp< T, ReluFused >::IsConvGEMM_(), caffe2::math::utils::IsRowwiseBroadcastBinaryOp(), at::native::linalg_tensorinv(), at::native::linalg_tensorsolve(), caffe2::TensorRTOp::MaybeAdjustOutputShape(), c10::multiply_integers(), caffe2::ConvDNNLowPOp< T, ReluFused >::NoIm2ColNHWC_(), at::prod_intlist(), c10::TensorImpl::ReserveSpace(), caffe2::QTensor< Context >::Resize(), caffe2::BatchBucketOneHotOp< Context >::RunOnDevice(), caffe2::PercentileOp< Context >::RunOnDevice(), caffe2::SplitOp< Context >::RunOnDevice(), caffe2::SplitByLengthsOp< Context >::RunOnDevice(), caffe2::LengthsRangeFillOp< Context >::RunOnDevice(), caffe2::TTSparseLengthsSumGradientOp< T, Context >::RunOnDevice(), caffe2::TopKOp< T, Context >::RunOnDevice(), caffe2::TopKGradientOp< T, Context >::RunOnDevice(), caffe2::LengthsToSegmentIdsOp< Context >::RunOnDevice(), caffe2::utils::ConstTensorView< T >::size(), caffe2::SliceImpl(), c10::sum_integers(), at::sum_intlist(), caffe2::ConvDNNLowPPackWeightOp::TakeDepthWise3x3x3FastPath_(), caffe2::TensorInferenceFunction(), caffe2::SimpleNet::TEST_Benchmark(), and caffe2::WeightedSampleDequeueBlobsOp< Context >::WeightedSampleDequeueBlobsOp().
|
inline |
Return true if any of the variables in the list require a gradient.
Definition at line 497 of file function.h.
References variable().
Referenced by torch::autograd::Function< T >::apply(), unpack_input(), and wrap_outputs().
|
inlinestatic |
Definition at line 134 of file python_variable_indexing.cpp.
References checkUnpackSlice(), dim, at::Tensor::dim(), at::indexing::Ellipsis, at::indexing::handleDimInMultiDimIndexing(), c10::attr::idx(), index, caffe2::int64_t, invalid_index(), torch.jit._trace::is_tracing(), c10::isIntegralType(), c10::kByte, at::legacyExtractDispatchKey(), at::indexing::None, recordSelectTrace(), recordSliceTrace(), at::Tensor::scalar_type(), detail::scalar_type(), sequenceToVariable(), size, c10::ArrayRef< T >::size(), torch.cuda.profiler::start(), step, torch.cuda.profiler::stop(), torch::tensor(), THPUtils_checkLong(), THPUtils_unpackLong(), THPVariable_Check(), and THPVariable_Unpack().
Referenced by THPVariable_getitem(), and THPVariable_setitem().
|
inline |
Definition at line 137 of file VariableTypeUtils.h.
References torch::autograd::impl::get_autograd_meta(), at::Tensor::is_view(), isForwardADEnabled(), c10::nullopt, torch::tensor(), and TORCH_CHECK.
|
inline |
Definition at line 204 of file VariableTypeUtils.h.
References torch::autograd::impl::get_autograd_meta(), at::Tensor::is_view(), isForwardADEnabled(), c10::nullopt, torch::tensor(), caffe2::tensors, TORCH_CHECK, and TORCH_INTERNAL_ASSERT.
|
static |
Definition at line 243 of file init.cpp.
References at::autocast::decrement_nesting(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and THPUtils_packInt64().
|
static |
Definition at line 237 of file init.cpp.
References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, at::autocast::increment_nesting(), and THPUtils_packInt64().
None torch.autograd.backward | ( | _TensorOrTensors | tensors, |
Optional[_TensorOrTensors] | grad_tensors = None , |
||
Optional[bool] | retain_graph = None , |
||
bool | create_graph = False , |
||
Optional[_TensorOrTensors] | grad_variables = None , |
||
Optional[Sequence[torch.Tensor]] | inputs = None |
||
) |
Computes the sum of gradients of given tensors w.r.t. graph leaves. The graph is differentiated using the chain rule. If any of ``tensors`` are non-scalar (i.e. their data has more than one element) and require gradient, then the Jacobian-vector product would be computed, in this case the function additionally requires specifying ``grad_tensors``. It should be a sequence of matching length, that contains the "vector" in the Jacobian-vector product, usually the gradient of the differentiated function w.r.t. corresponding tensors (``None`` is an acceptable value for all tensors that don't need gradient tensors). This function accumulates gradients in the leaves - you might need to zero ``.grad`` attributes or set them to ``None`` before calling it. See :ref:`Default gradient layouts<default-grad-layouts>` for details on the memory layout of accumulated gradients. .. note:: Using this method with ``create_graph=True`` will create a reference cycle between the parameter and its gradient which can cause a memory leak. We recommend using ``autograd.grad`` when creating the graph to avoid this. If you have to use this function, make sure to reset the ``.grad`` fields of your parameters to ``None`` after use to break the cycle and avoid the leak. .. note:: If you run any forward ops, create ``grad_tensors``, and/or call ``backward`` in a user-specified CUDA stream context, see :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`. Args: tensors (sequence of Tensor): Tensors of which the derivative will be computed. grad_tensors (sequence of (Tensor or None)): The "vector" in the Jacobian-vector product, usually gradients w.r.t. each element of corresponding tensors. None values can be specified for scalar Tensors or ones that don't require grad. If a None value would be acceptable for all grad_tensors, then this argument is optional. retain_graph (bool, optional): If ``False``, the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to ``True`` is not needed and often can be worked around in a much more efficient way. Defaults to the value of ``create_graph``. create_graph (bool, optional): If ``True``, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to ``False``. inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be accumulated into ``.grad``. All other Tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute the attr::tensors. All the provided inputs must be leaf Tensors.
Definition at line 68 of file __init__.py.
References _make_grads(), _tensor_or_tensors_to_tuple(), backward(), torch::jit.isinstance(), and c10::aten.len().
Referenced by torch.autograd.function.BackwardCFunction.apply(), backward(), torch.distributed.pipeline.sync.checkpoint.Checkpoint.backward(), torch.utils.checkpoint.CheckpointFunction.backward(), torch::distributed::rpc::PyRRef.backward(), torch.tensor.Tensor.backward(), pt_engine.TorchTensorEngine.backward(), torch.sparse.mm(), and torch.distributed.pipeline.sync._balance.profile.profile_times().
TORCH_API void torch::autograd::backward | ( | const variable_list & | tensors, |
const variable_list & | grad_tensors = {} , |
||
c10::optional< bool > | retain_graph = c10::nullopt , |
||
bool | create_graph = false , |
||
const variable_list & | inputs = {} |
||
) |
Computes the sum of gradients of given tensors with respect to graph leaves.
The graph is differentiated using the chain rule. If any of tensors
are non-scalar (i.e. their data has more than one element) and require gradient, then the Jacobian-vector product would be computed, in this case the function additionally requires specifying grad_tensors
. It should be a sequence of matching length, that contains the "vector" in the Jacobian-vector product, usually the gradient of the differentiated function w.r.t. corresponding tensors (torch::Tensor()
is an acceptable value for all tensors that don't need gradient tensors).
This function accumulates gradients in the leaves - you might need to zero them before calling it.
tensors | Tensors of which the derivative will be computed. |
grad_tensors | The "vector" in the Jacobian-vector product, usually gradients w.r.t. each element of corresponding tensors. torch::Tensor() values can be specified for scalar Tensors or ones that don't require grad. If a torch::Tensor() value would be acceptable for all grad_tensors, then this argument is optional. |
retain_graph | If false , the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to true is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph . |
create_graph | If true , graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to false . |
inputs | Inputs w.r.t. which the gradient will be accumulated into at::Tensor::grad . All other Tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute param tensors . All the provided inputs must be leaf Tensors. |
Definition at line 130 of file autograd.cpp.
References _make_grads(), inputs, run_backward(), caffe2::tensors, and c10::optional< T >::value().
|
static |
Definition at line 635 of file engine.cpp.
References call_post_hooks(), call_pre_hooks(), checkpoint_valid, caffe2.perfkernels.hp_emblookup_codegen::fn, inputs, outputs, validate_outputs(), and torch::autograd::InputBuffer::variables().
Referenced by torch::autograd::Engine::evaluate_function().
|
static |
Definition at line 539 of file engine.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, inputs, and outputs.
Referenced by call_function().
|
static |
Definition at line 532 of file engine.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, and inputs.
Referenced by call_function().
|
inline |
Definition at line 49 of file VariableTypeUtils.h.
References at::Tensor::_base(), AT_ERROR, torch::autograd::impl::get_autograd_meta(), handle_view_on_rebase(), at::GradMode::is_enabled(), at::Tensor::is_view(), at::Tensor::requires_grad(), fastrnns.scratch::requires_grad, and torch::tensor().
Referenced by check_inplace().
|
inline |
Definition at line 68 of file VariableTypeUtils.h.
References check_inplace(), fastrnns.scratch::requires_grad, torch::tensor(), and caffe2::tensors.
TORCH_API void torch::autograd::check_input_variables | ( | const char * | name, |
const variable_list & | inputs, | ||
int | args, | ||
int | required_args = -1 , |
||
bool | allow_undefined = false |
||
) |
Checks that inputs contains exactly args
items and that the first required_args
items are not nullptr.
If not specified, required_args
defaults to args
.
Definition at line 40 of file utils.cpp.
References compare-fastrnn-results::args, inputs, and name.
Referenced by torch::autograd::AccumulateGrad::apply(), torch::autograd::CopyBackwards::apply(), and torch::autograd::CopySlices::apply().
|
inline |
Definition at line 279 of file VariableTypeUtils.h.
References check_no_requires_grad(), name, torch::tensor(), and caffe2::tensors.
|
inline |
Definition at line 267 of file VariableTypeUtils.h.
References check_no_requires_grad(), name, and torch::tensor().
|
inline |
Definition at line 257 of file VariableTypeUtils.h.
References name, torch::tensor(), and caffe2::var.
Referenced by check_no_requires_grad().
|
inline |
Definition at line 273 of file VariableTypeUtils.h.
References check_no_requires_grad(), name, torch::tensor(), and caffe2::tensors.
TORCH_API void torch::autograd::check_variable_result | ( | const Variable & | original, |
const Variable & | result, | ||
std::string | hook_name | ||
) |
Definition at line 160 of file custom_function.cpp.
References hook_name().
Referenced by check_single_result().
|
inlinestatic |
Definition at line 111 of file python_variable_indexing.cpp.
References index, and THPUtils_unpackSlice.
Referenced by applySlicing(), THPVariable_getitem(), and THPVariable_setitem().
|
static |
Definition at line 230 of file init.cpp.
References at::autocast::clear_cache(), END_HANDLE_TH_ERRORS, and HANDLE_TH_ERRORS.
edge_list torch::autograd::collect_next_edges | ( | Variables &&... | variables | ) |
Return the next edges of all the given variables, or tuples of variables.
Definition at line 506 of file function.h.
References at::IterArgs< F >::apply(), and torch::autograd::detail::MakeNextFunctionList::next_edges.
Referenced by torch::distributed::autograd::addSendRpcBackward(), torch::autograd::Function< T >::apply(), torch::autograd::Scatter::apply(), torch::autograd::Gather::apply(), torch::autograd::VariableHooks::grad_fn(), unpack_input(), and wrap_outputs().
|
inline |
Definition at line 50 of file utils.h.
References at::IterArgs< F >::apply(), compare-fastrnn-results::args, at::GradMode::is_enabled(), and torch::autograd::ComputeRequiresGrad::out.
Referenced by torch::distributed::autograd::addRecvRpcBackward(), torch::autograd::Scatter::apply(), torch::autograd::Gather::apply(), and torch::distributed::autograd::getMessageWithAutograd().
|
inlinestatic |
Definition at line 56 of file python_variable_indexing.cpp.
References torch::check_has_torch_function(), index, caffe2::int64_t, c10::kBool, c10::kByte, size, THPVariable_Check(), THPVariable_CheckExact(), and caffe2::var.
Referenced by THPVariable_getitem(), and THPVariable_setitem().
PyObject* torch::autograd::CppFunction_pynew | ( | PyTypeObject * | type, |
PyObject * | args, | ||
PyObject * | kwds | ||
) |
Definition at line 19 of file python_cpp_function.h.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, THPPointer< T >::get(), HANDLE_TH_ERRORS, THPPointer< T >::release(), and type.
|
inline |
Create an Edge
between the given variable
and the function
, which is assumed to be the gradient function of this variable (i.e.
the function through which this variable is backpropagated during the backward pass). This sets the grad_fn
property of the variable
. This function assumes that the Variable
is a new input to the gradient function and its input_nr
thus equal to function->num_inputs()
. Additionally, it increments the Node
's number of inputs by one. Approximately equivalent to variable.set_gradient_edge(function, function->add_input_metadata(variable.dispatch_type(), variable.sizes()))
. If you don't want the Node
's num_inputs
to be incremented, use set_gradient_edge
directly.
Definition at line 488 of file function.h.
References torch::autograd::impl::set_gradient_edge(), and variable().
Referenced by wrap_outputs().
PyTypeObject* torch::autograd::createForwardFunctionPyTypeObject | ( | PyTypeObject & | type, |
const char * | name, | ||
PyGetSetDef * | function_properties = nullptr , |
||
PyMethodDef * | function_methods = nullptr |
||
) |
Definition at line 56 of file python_cpp_function.h.
References _initFunctionPyTypeObject(), name, and type.
Definition at line 81 of file function.cpp.
References gatherFunctions(), and torch.distributions.constraints::stack.
Referenced by torch::autograd::Function< T >::apply().
|
inline |
Definition at line 69 of file python_torch_functions.cpp.
References at::native::arange(), matmul_dlmc_bench::end, torch::utils::maybe_initialize_cuda(), and caffe2.perfkernels.hp_emblookup_codegen::options.
Definition at line 64 of file python_torch_functions.cpp.
References at::native::arange_out(), and matmul_dlmc_bench::end.
Referenced by THPVariable_arange().
|
inline |
Definition at line 80 of file python_torch_functions.cpp.
References at::native::arange(), matmul_dlmc_bench::end, torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, torch.cuda.profiler::start(), and step.
|
inline |
Definition at line 75 of file python_torch_functions.cpp.
References at::native::arange_out(), matmul_dlmc_bench::end, torch.cuda.profiler::start(), and step.
|
static |
Definition at line 229 of file python_variable_methods.cpp.
References at::device_of(), and c10::memory_format().
Referenced by THPVariable_contiguous().
|
static |
Definition at line 272 of file python_variable_methods.cpp.
References at::device_of(), and at::meta::other.
Referenced by THPVariable_copy_().
|
inline |
Definition at line 201 of file python_torch_functions.cpp.
References torch::jit::full, torch::utils::maybe_initialize_cuda(), microbenchmarks::names, caffe2.perfkernels.hp_emblookup_codegen::options, and size.
|
inline |
Definition at line 192 of file python_torch_functions.cpp.
References torch::jit::full, torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, and size.
Referenced by THPVariable_full().
|
inline |
Definition at line 211 of file python_torch_functions.cpp.
References torch::jit::full_out, and size.
Definition at line 389 of file python_variable_methods.cpp.
References at::device_of().
Referenced by THPVariable_invert().
|
inline |
Definition at line 764 of file python_variable_methods.cpp.
References c10::memory_format().
Referenced by THPVariable_is_contiguous().
Definition at line 389 of file python_torch_functions.cpp.
References at::device_of().
Referenced by THPVariable_nonzero().
Definition at line 454 of file python_variable_methods.cpp.
References at::device_of().
Definition at line 395 of file python_torch_functions.cpp.
References at::device_of(), and out.
Definition at line 401 of file python_torch_functions.cpp.
References at::device_of().
Referenced by THPVariable_nonzero().
Definition at line 460 of file python_variable_methods.cpp.
References at::device_of().
|
inline |
Definition at line 277 of file python_torch_functions.cpp.
References torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, at::native::randint(), and size.
|
inline |
Definition at line 273 of file python_torch_functions.cpp.
References at::native::randint_out(), and size.
Referenced by THPVariable_randint().
|
inline |
Definition at line 286 of file python_torch_functions.cpp.
References torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, at::native::randint(), and size.
|
inline |
Definition at line 282 of file python_torch_functions.cpp.
References at::native::randint_out(), and size.
|
inline |
Definition at line 295 of file python_torch_functions.cpp.
References torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, at::native::randint(), and size.
|
inline |
Definition at line 291 of file python_torch_functions.cpp.
References at::native::randint_out(), and size.
|
inline |
Definition at line 304 of file python_torch_functions.cpp.
References torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, at::native::randint(), and size.
|
inline |
Definition at line 300 of file python_torch_functions.cpp.
References at::native::randint_out(), and size.
|
inline |
Definition at line 150 of file python_torch_functions.cpp.
References matmul_dlmc_bench::end, torch::utils::maybe_initialize_cuda(), caffe2.perfkernels.hp_emblookup_codegen::options, c10::prim::range(), torch.cuda.profiler::start(), and step.
|
inline |
Definition at line 144 of file python_torch_functions.cpp.
References at::device_of(), matmul_dlmc_bench::end, torch.cuda.profiler::start(), and step.
Referenced by THPVariable_range().
|
static |
Definition at line 418 of file python_variable_methods.cpp.
References c10::aten::copy(), c10::memory_format(), and caffe2.perfkernels.hp_emblookup_codegen::options.
|
static |
Definition at line 408 of file python_variable_methods.cpp.
References c10::aten::copy(), device, c10::memory_format(), and caffe2.perfkernels.hp_emblookup_codegen::options.
Referenced by THPVariable_cpu(), THPVariable_cuda(), THPVariable_to(), THPVariable_to_type(), THPVariable_type(), and THPVariable_xpu().
|
static |
Definition at line 429 of file python_variable_methods.cpp.
References c10::aten::copy(), device, and caffe2::dtype.
|
static |
Definition at line 423 of file python_variable_methods.cpp.
References c10::aten::copy(), and caffe2::dtype.
|
static |
Definition at line 324 of file python_variable_methods.cpp.
References at::device_of(), and at::native::numel().
Referenced by THPVariable_item().
|
static |
Definition at line 306 of file python_variable_methods.cpp.
References at::device_of(), and at::native::numel().
Referenced by THPVariable_complex_scalar(), and THPVariable_item().
|
static |
Definition at line 297 of file python_variable_methods.cpp.
References at::device_of(), and at::native::numel().
Referenced by THPVariable_float_scalar(), THPVariable_integral_scalar(), and THPVariable_item().
|
static |
Definition at line 315 of file python_variable_methods.cpp.
References at::device_of(), caffe2::int64_t, and at::native::numel().
Referenced by THPVariable_index_scalar(), THPVariable_integral_scalar(), and THPVariable_item().
std::atomic<EngineStub> torch::autograd::engine_stub | ( | Engine::get_base_engine | ) |
Referenced by torch::autograd::Engine::get_default_engine(), and set_default_engine_stub().
|
inline |
Cast an object into a tuple, if it is not a tuple already.
Returns true if the original object was not a tuple.
Definition at line 61 of file python_function.h.
References THPPointer< T >::get(), and THPPointer< T >::release().
Referenced by torch::autograd::PyNode::apply(), process_outputs(), and THPFunction_do_backward().
|
inline |
Definition at line 192 of file custom_function.h.
References at::IterArgs< F >::apply(), compare-fastrnn-results::args, and c10::prim::list().
Referenced by torch::autograd::Function< T >::apply().
|
inline |
Definition at line 129 of file VariableTypeUtils.h.
References compare-fastrnn-results::args, torch::count_tensors(), Flatten, and out.
Definition at line 202 of file python_cpp_function.cpp.
References cpp_function_types, caffe2.perfkernels.hp_emblookup_codegen::fn, THPPointer< T >::get(), caffe2::it, THPPointer< T >::release(), type, and torch::autograd::DefaultFunctionType::type.
Referenced by torch::autograd::PyAnomalyMetadata::assign_parent(), THPCppFunction_next_functions(), THPFunction_next_functions(), and THPVariable_get_grad_fn().
|
static |
Definition at line 47 of file function.cpp.
References torch::autograd::Node::next_edges(), torch::autograd::Node::release_variables(), and torch.distributions.constraints::stack.
Referenced by deleteNode().
Tuple[torch.Tensor, ...] torch.autograd.grad | ( | _TensorOrTensors | outputs, |
_TensorOrTensors | inputs, | ||
Optional[_TensorOrTensors] | grad_outputs = None , |
||
Optional[bool] | retain_graph = None , |
||
bool | create_graph = False , |
||
bool | only_inputs = True , |
||
bool | allow_unused = False |
||
) |
Computes and returns the sum of gradients of outputs w.r.t. the inputs. ``grad_outputs`` should be a sequence of length matching ``output`` containing the "vector" in Jacobian-vector product, usually the pre-computed gradients w.r.t. each of the outputs. If an output doesn't require_grad, then the gradient can be ``None``). If ``only_inputs`` is ``True``, the function will only return a list of gradients w.r.t the specified inputs. If it's ``False``, then gradient w.r.t. all remaining leaves will still be computed, and will be accumulated into their ``.grad`` attribute. .. note:: If you run any forward ops, create ``grad_outputs``, and/or call ``grad`` in a user-specified CUDA stream context, see :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`. Args: outputs (sequence of Tensor): outputs of the differentiated function. inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be returned (and not accumulated into ``.grad``). grad_outputs (sequence of Tensor): The "vector" in the Jacobian-vector product. Usually gradients w.r.t. each output. None values can be specified for scalar Tensors or ones that don't require grad. If a None value would be acceptable for all grad_tensors, then this argument is optional. Default: None. retain_graph (bool, optional): If ``False``, the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to ``True`` is not needed and often can be worked around in a much more efficient way. Defaults to the value of ``create_graph``. create_graph (bool, optional): If ``True``, graph of the derivative will be constructed, allowing to compute higher order derivative products. Default: ``False``. allow_unused (bool, optional): If ``False``, specifying inputs that were not used when computing outputs (and therefore their grad is always zero) is an error. Defaults to ``False``.
Definition at line 150 of file __init__.py.
References _make_grads(), _tensor_or_tensors_to_tuple(), grad(), torch.handle_torch_function(), torch.overrides.has_torch_function, torch::jit.isinstance(), and c10::aten.len().
Referenced by torch.autograd.functional._autograd_grad(), torch::autograd::generated::details._euclidean_dist_backward(), torch::autograd::generated::details._fused_dropout_backward(), torch.distributions.kl._kl_expfamily_expfamily(), torch::autograd::generated::details._sparse_addmm_sparse_backward(), torch::autograd::generated::details.angle_backward(), torch::autograd::AccumulateGrad.apply(), torch::autograd::CopyBackwards.apply(), torch::autograd::UndefinedGradBackward.apply(), torch::autograd::CopySlices.apply(), torch::autograd::generated::details.as_strided_backward(), torch::autograd::generated::details.atan2_backward(), torch::autograd::generated::details.binary_cross_entropy_double_backward(), torch::autograd::generated::details.binary_cross_entropy_double_backward_grad_output(), torch::autograd::generated::details.cat_tensors_backward(), torch::autograd::generated::details.cholesky_backward(), torch::autograd::generated::details.cholesky_inverse_backward(), torch::autograd::generated::details.clamp_backward(), torch::autograd::generated::details.constant_pad_nd_backward(), torch.nn.grad.conv1d_input(), torch.nn.grad.conv1d_weight(), torch.nn.grad.conv2d_input(), torch.nn.grad.conv2d_weight(), torch.nn.grad.conv3d_input(), torch.nn.grad.conv3d_weight(), torch::autograd::generated::details.copysign_tensor_self_backward(), torch::autograd::generated::details.deg2rad_backward(), torch::autograd::generated::details.det_backward(), torch::autograd::generated::details.div_tensor_other_backward(), torch::autograd::generated::details.div_tensor_self_backward(), torch::autograd::generated::details.elu_double_backward(), torch::autograd::generated::details.embedding_dense_double_backward(), torch.distributions.exp_family.ExponentialFamily.entropy(), torch::autograd::ForwardADLevel.erase(), torch::autograd::generated::details.evenly_distribute_backward(), torch::autograd::generated::details.fft_c2r_backward(), torch::autograd::generated::details.fft_r2c_backward(), torch.autograd.gradcheck.get_analytical_jacobian(), torch::autograd::generated::details.glu_double_backward(), torch::autograd::generated::details.glu_double_backward_grad_output(), grad(), torch.autograd.gradcheck.gradcheck(), torch.autograd.gradcheck.gradgradcheck(), torch::autograd::generated::details.index_backward(), torch::autograd::generated::details.infinitely_differentiable_logit_backward(), torch::autograd::ForwardADLevel.insert(), torch::autograd::generated::details.kl_div_double_backward_grad_output(), torch::autograd::generated::details.l1_loss_double_backward(), torch::autograd::generated::details.l1_loss_double_backward_grad_output(), torch::autograd::generated::details.log1p_backward(), torch::autograd::generated::details.log_sigmoid_double_backward(), torch::autograd::generated::details.log_softmax_double_backward(), torch::autograd::generated::details.logcumsumexp_backward(), torch::autograd::generated::details.logdet_backward(), torch::autograd::generated::details.logsumexp_backward(), torch::autograd::generated::details.masked_scatter_backward(), torch::autograd::generated::details.max_pool_double_backward(), torch::autograd::generated::details.mean_backward(), torch::autograd::generated::details.mm_mat1_backward(), torch::autograd::generated::details.mm_mat2_backward(), torch::autograd::generated::details.mse_loss_double_backward(), torch::autograd::generated::details.mse_loss_double_backward_grad_output(), torch::autograd::generated::details.mul_tensor_backward(), torch::autograd::generated::details.mvlgamma_backward(), torch::autograd::generated::details.nansum_backward(), torch::autograd::generated::details.norm_backward(), torch::autograd::utils.obeys_layout_contract(), torch::autograd::generated::details.permute_backwards(), torch::autograd::generated::details.pow_backward(), torch::autograd::generated::details.pow_backward_exponent(), torch::autograd::generated::details.pow_backward_self(), torch::autograd::generated::details.prod_backward(), torch::autograd::generated::details.prod_safe_zeros_backward(), torch::autograd::generated::details.rad2deg_backward(), torch::autograd::generated::details.renorm_backward(), torch::autograd::generated::details.repeat_backward(), torch::autograd::generated::details.scale_grad_by_count(), torch::autograd::generated::details.sgn_backward(), torch::autograd::generated::details.slice_backward_wrapper(), torch::autograd::generated::details.smooth_l1_loss_double_backward(), torch::autograd::generated::details.smooth_l1_loss_double_backward_grad_output(), torch::autograd::generated::details.soft_margin_loss_double_backward(), torch::autograd::generated::details.soft_margin_loss_double_backward_grad_output(), torch::autograd::generated::details.softmax_double_backward(), torch::autograd::generated::details.softplus_double_backward(), torch::autograd::generated::details.solve_backward_A(), torch::autograd::generated::details.solve_backward_self(), torch::autograd::generated::details.sparse_sparse_matmul_backward(), torch::autograd::generated::details.std_backward(), torch::autograd::generated::details.sum_backward(), torch.autograd.gradcheck.test_batched_grad(), validate_outputs(), torch::autograd::generated::details.var_backward(), torch::autograd::generated::details.var_std_mean_backward(), torch.jit._trace.verify(), and torch._vmap_internals.vmap().
TORCH_API variable_list torch::autograd::grad | ( | const variable_list & | outputs, |
const variable_list & | inputs, | ||
const variable_list & | grad_outputs = {} , |
||
c10::optional< bool > | retain_graph = c10::nullopt , |
||
bool | create_graph = false , |
||
bool | allow_unused = false |
||
) |
Computes and returns the sum of gradients of outputs with respect to the inputs.
grad_outputs
should be a sequence of length matching output
containing the "vector" in Jacobian-vector product, usually the pre-computed gradients w.r.t. each of the outputs. If an output doesn't require_grad, then the gradient can be torch::Tensor()
).
outputs | outputs of the differentiated function. |
inputs | Inputs w.r.t. which the gradient will be returned (and not accumulated into at::Tensor::grad ). |
grad_outputs | The "vector" in the Jacobian-vector product. Usually gradients w.r.t. each output. torch::Tensor() values can be specified for scalar Tensors or ones that don't require grad. If a torch::Tensor() value would be acceptable for all grad_tensors, then this argument is optional. Default: {} . |
retain_graph | If false , the graph used to compute the grad will be freed. Note that in nearly all cases setting this option to true is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph . |
create_graph | If true , graph of the derivative will be constructed, allowing to compute higher order derivative products. Default: false . |
allow_unused | If false , specifying inputs that were not used when computing outputs (and therefore their grad is always zero) is an error. Defaults to false . |
Definition at line 143 of file autograd.cpp.
References _make_grads(), inputs, outputs, run_backward(), and c10::optional< T >::value().
void torch::autograd::handle_view_on_rebase | ( | DifferentiableViewMeta * | diff_view_meta, |
bool | indirect | ||
) |
See NOTE [ View + Inplace detection ] for justification of the logic below
Definition at line 493 of file variable.cpp.
References c10::str(), TORCH_CHECK, TORCH_INTERNAL_ASSERT, and TORCH_WARN.
Referenced by check_inplace(), and torch::autograd::VariableHooks::grad_fn().
|
inline |
Definition at line 116 of file VariableTypeUtils.h.
References torch::autograd::impl::bump_version().
void torch::autograd::init_legacy_variable | ( | PyObject * | module | ) |
Definition at line 132 of file python_legacy_variable.cpp.
References module, and THPLegacyVariableType.
Referenced by initModule().
void torch::autograd::initFFTFunctions | ( | PyObject * | module | ) |
Definition at line 50 of file python_fft_functions.cpp.
References torch::fft::fft, module, and THPFFTVariableFunctionsModule.
Referenced by initModule().
void torch::autograd::initLinalgFunctions | ( | PyObject * | module | ) |
Definition at line 36 of file python_linalg_functions.cpp.
References module, and THPLinalgVariableFunctionsModule.
Referenced by initModule().
void torch::autograd::initNNFunctions | ( | PyObject * | module | ) |
Definition at line 81 of file python_nn_functions.cpp.
References module, and THPNNVariableFunctionsModule.
Referenced by initModule().
void torch::autograd::initTensorImplConversion | ( | PyObject * | module | ) |
Definition at line 801 of file python_variable.cpp.
References at::cpp_custom_type_hack::cast(), c10::intrusive_ptr< TTarget, NullType >::get(), at::Tensor::getIntrusivePtr(), handle, matmul_dlmc_bench::m, module, diagnose_protobuf::p, ptr, torch::tensor(), TORCH_CHECK, c10::intrusive_ptr< TTarget, NullType >::unsafe_reclaim_from_nonowning(), and at::Tensor::wrap_tensor_impl().
Referenced by THPVariable_initModule().
void torch::autograd::initTorchFunctions | ( | PyObject * | module | ) |
Definition at line 539 of file python_torch_functions.cpp.
References module, THPVariableFunctions, and THPVariableFunctionsModule.
Referenced by THPVariable_initModule().
|
inlinestatic |
Definition at line 80 of file python_variable_indexing.cpp.
Referenced by applySlicing().
|
static |
Definition at line 299 of file init.cpp.
References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and torch::autograd::AnomalyMode::is_enabled().
Definition at line 220 of file init.cpp.
References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::autocast::is_enabled().
|
static |
Definition at line 546 of file engine.cpp.
References c10::TensorOptions::device(), c10::TensorOptions::is_sparse(), and c10::TensorOptions::type_equal().
Referenced by validate_outputs().
|
static |
Definition at line 259 of file init.cpp.
References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and isForwardADEnabled().
Definition at line 279 of file init.cpp.
References END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::GradMode::is_enabled().
bool TORCH_API torch::autograd::isForwardADEnabled | ( | ) |
Definition at line 82 of file forward_grad.cpp.
Referenced by as_view(), and is_forward_AD_enabled().
|
inline |
Definition at line 294 of file VariableTypeUtils.h.
References c10::fmap(), torch::tensor(), and caffe2::tensors.
|
inline |
Definition at line 288 of file VariableTypeUtils.h.
References c10::fmap(), torch::tensor(), and caffe2::tensors.
|
static |
Definition at line 309 of file init.cpp.
References END_HANDLE_TH_ERRORS, torch.autograd.forward_ad::enter_dual_level(), HANDLE_TH_ERRORS, caffe2::int64_t, and torch::autograd::utils::wrap().
|
static |
Definition at line 317 of file init.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, torch.autograd.forward_ad::exit_dual_level(), HANDLE_TH_ERRORS, and configure::parser.
PyMethodDef * torch::autograd::python_functions | ( | ) |
|
inline |
Definition at line 102 of file VariableTypeUtils.h.
References at::native::output_nr(), torch::autograd::impl::rebase_history(), and caffe2::var.
Definition at line 95 of file VariableTypeUtils.h.
References torch::autograd::impl::rebase_history(), and caffe2::var.
|
inlinestatic |
Definition at line 130 of file python_variable_indexing.cpp.
References c10::IntType::get(), and torch::jit::tracer::ArgumentStash::stashValue().
Referenced by applySlicing(), THPVariable_getitem(), and THPVariable_setitem().
|
inlinestatic |
Definition at line 117 of file python_variable_indexing.cpp.
References c10::IntType::get(), torch::jit::tracer::ArgumentStash::stashValue(), THPVariable_Check(), and THPVariable_Unpack().
Referenced by applySlicing(), THPVariable_getitem(), and THPVariable_setitem().
void torch::autograd::registerCppFunction | ( | const std::type_info & | type, |
PyTypeObject * | pytype | ||
) |
Definition at line 240 of file python_cpp_function.cpp.
References cpp_function_types, and type.
Referenced by addClass(), and torch::autograd::generated::addClass().
Definition at line 246 of file python_cpp_function.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, THPPointer< T >::get(), handle, torch::res, and THPFunctionClass.
Referenced by THPCppFunction_register_hook(), and THPFunction_register_hook().
variable_list torch::autograd::run_backward | ( | const variable_list & | outputs, |
const variable_list & | grad_outputs, | ||
bool | keep_graph, | ||
bool | create_graph, | ||
const variable_list & | inputs, | ||
bool | allow_unused, | ||
bool | accumulate_grad | ||
) |
Definition at line 65 of file autograd.cpp.
References torch::autograd::Engine::execute(), torch::autograd::Edge::function, torch::autograd::Engine::get_default_engine(), torch::autograd::impl::gradient_edge(), caffe2::input, inputs, num_inputs, at::native::metal::mpscnn::output, at::native::output_nr(), outputs, TORCH_CHECK, and torch::autograd::impl::try_get_grad_accumulator().
Referenced by backward(), and grad().
|
inlinestatic |
Definition at line 86 of file python_variable_indexing.cpp.
References torch::utils::indexing_tensor_from_data(), c10::kLong, and c10::nullopt.
Referenced by applySlicing().
|
static |
Definition at line 289 of file init.cpp.
References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and torch::autograd::AnomalyMode::set_enabled().
|
static |
Definition at line 210 of file init.cpp.
References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::autocast::set_enabled().
TORCH_API void torch::autograd::set_default_engine_stub | ( | EngineStub | stub | ) |
Definition at line 994 of file engine.cpp.
References engine_stub().
Referenced by THPEngine_initModule().
void torch::autograd::set_device | ( | int | device | ) |
Definition at line 552 of file engine.cpp.
References c10::COMPILE_TIME_MAX_DEVICE_TYPES, CPU_DEVICE, device, c10::impl::device_guard_impl_registry, c10::lib::cpp17::detail_::has_addressof_impl::impl(), and worker_device.
Referenced by torch::distributed::autograd::DistEngine::execute_graph_task_until_ready_queue_empty(), torch::autograd::Engine::execute_with_graph_task(), torch::autograd::Engine::reentrant_thread_init(), and torch::autograd::Engine::thread_init().
|
static |
Definition at line 249 of file init.cpp.
References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and setForwardADEnabled().
|
static |
Definition at line 269 of file init.cpp.
References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and at::GradMode::set_enabled().
|
inline |
Definition at line 57 of file utils.h.
References AT_ASSERT, at::native::output_nr(), torch::autograd::impl::set_gradient_edge(), TORCH_INTERNAL_ASSERT, and variable().
Referenced by _wrap_outputs(), torch::distributed::autograd::addRecvRpcBackward(), torch::autograd::Scatter::apply(), torch::autograd::Gather::apply(), and set_history().
|
inline |
Definition at line 73 of file utils.h.
References set_history(), and variable().
|
inline |
Definition at line 81 of file utils.h.
References set_history(), and variable().
void TORCH_API torch::autograd::setForwardADEnabled | ( | bool | value | ) |
Definition at line 86 of file forward_grad.cpp.
References value.
Referenced by set_forward_AD_enabled().
PyObject * torch::autograd::THPCppFunction_metadata | ( | THPCppFunction * | self, |
void * | _unused | ||
) |
Definition at line 126 of file python_cpp_function.cpp.
References matmul_dlmc_bench::metadata.
Definition at line 157 of file python_cpp_function.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, and THPUtils_packString().
PyObject * torch::autograd::THPCppFunction_next_functions | ( | THPCppFunction * | self, |
PyObject * | hook | ||
) |
Definition at line 106 of file python_cpp_function.cpp.
References functionToPyObject(), THPPointer< T >::get(), THPPointer< T >::release(), and THPUtils_packUInt32().
Definition at line 151 of file python_cpp_function.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, and registerFunctionHook().
Definition at line 138 of file python_cpp_function.cpp.
References caffe2.perfkernels.hp_emblookup_codegen::fn, THPVariable_Check(), and caffe2::var.
PyObject * torch::autograd::THPCppFunction_requires_grad | ( | THPCppFunction * | self, |
void * | unused | ||
) |
Definition at line 134 of file python_cpp_function.cpp.
Definition at line 55 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and torch::handle_torch_function().
|
static |
Definition at line 27 of file python_nn_functions.cpp.
References compare-fastrnn-results::args, device, END_HANDLE_TH_ERRORS, torch::getTHPDtype(), HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::autograd::utils::parse_to_conversion(), configure::parser, r, THPDevice_New(), THPMemoryFormat_New(), THPNNVariableFunctionsModule, and torch::autograd::utils::wrap().
|
static |
Definition at line 417 of file python_torch_functions.cpp.
References torch::utils::_sparse_coo_tensor_unsafe_ctor(), compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), HANDLE_TH_ERRORS, THPVariable_Wrap(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_CONSTRUCTOR.
Definition at line 72 of file python_variable_methods.cpp.
References torch::utils::apply_(), setup::arg, compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, handle, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch.jit._trace::make_tuple(), and THPVariable_Wrap().
|
static |
Definition at line 86 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, torch::utils::check_out_type_matches(), c10::TensorOptions::device(), dispatch_arange(), c10::TensorOptions::dtype(), matmul_dlmc_bench::end, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::TensorOptions::layout(), caffe2.perfkernels.hp_emblookup_codegen::options, configure::parser, c10::TensorOptions::pinned_memory(), r, c10::TensorOptions::requires_grad(), at::Tensor::set_requires_grad(), torch.cuda.profiler::start(), step, THPVariableFunctionsModule, TORCH_CHECK, and torch::autograd::utils::wrap().
|
static |
Definition at line 371 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, torch::utils::as_tensor(), END_HANDLE_TH_ERRORS, torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), HANDLE_TH_ERRORS, THPVariable_Wrap(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_CONSTRUCTOR.
|
static |
Definition at line 694 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::BFloat16, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 677 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::Bool, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 541 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 558 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 344 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), dispatch_to_CComplexDouble(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::jit::tracer::warn(), torch::jit::tracer::WARN_PYTHON_DATAFLOW, and torch::autograd::utils::wrap().
|
static |
Definition at line 235 of file python_variable_methods.cpp.
References torch::jit::tracer::addInputs(), torch::jit::tracer::addOutput(), compare-fastrnn-results::args, at::native::contiguous(), dispatch_contiguous(), END_HANDLE_TH_ERRORS, torch::jit::tracer::getTracingState(), HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::jit::tracer::isTracing(), c10::memory_format(), node, configure::parser, r, torch::jit::tracer::recordSourceLocation(), THPVariable_Wrap(), and THPVariableClass.
|
static |
Definition at line 278 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, dispatch_copy_(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_Wrap(), and THPVariableClass.
|
static |
Definition at line 435 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::CPU, dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_Wrap(), and THPVariableClass.
|
static |
Definition at line 489 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::CUDA, torch::utils::cuda_lazy_init(), device, torch.distributed.pipeline.sync._balance::Device, dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_Wrap(), THPVariableClass, and TORCH_CHECK.
Definition at line 182 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), data_ptr, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and torch::autograd::utils::wrap().
Definition at line 206 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and THPUtils_packInt64().
|
static |
Definition at line 575 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::Double, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 711 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and THPUtils_packInt64().
|
static |
Definition at line 592 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, c10::Float, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 333 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), dispatch_to_CDouble(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::jit::tracer::warn(), torch::jit::tracer::WARN_PYTHON_DATAFLOW, and torch::autograd::utils::wrap().
|
static |
Definition at line 381 of file python_torch_functions.cpp.
References setup::arg, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::utils::tensor_from_numpy(), THPVariable_Wrap(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_CONSTRUCTOR.
|
static |
Definition at line 219 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, torch::utils::check_out_type_matches(), c10::TensorOptions::device(), dispatch_full(), c10::TensorOptions::dtype(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::TensorOptions::layout(), microbenchmarks::names, c10::nullopt, caffe2.perfkernels.hp_emblookup_codegen::options, configure::parser, c10::TensorOptions::pinned_memory(), r, size, THPVariableFunctionsModule, TORCH_CHECK, and torch::autograd::utils::wrap().
|
static |
Definition at line 159 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, at::get_device(), HANDLE_TH_ERRORS, torch::handle_torch_function(), and torch::autograd::utils::wrap().
|
static |
Definition at line 435 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, configure::parser, r, and torch::autograd::utils::wrap().
Definition at line 270 of file python_variable_indexing.cpp.
References at::native::alias(), applySlicing(), torch::check_has_torch_function(), checkUnpackSlice(), count_specified_dimensions(), at::device_of(), at::indexing::dispatch_index(), at::indexing::Ellipsis, END_HANDLE_TH_ERRORS, THPPointer< T >::get(), at::indexing::get_item(), HANDLE_TH_ERRORS, torch::handle_torch_function_indexing(), index, caffe2::int64_t, torch.jit._trace::is_tracing(), torch::jit::tracer::isTracing(), at::indexing::None, recordSelectTrace(), recordSliceTrace(), torch.cuda.profiler::start(), step, torch.cuda.profiler::stop(), THPUtils_checkLong(), THPUtils_unpackLong(), THPVariable_Check(), THPVariable_CheckExact(), THPVariable_Unpack(), THPVariable_Wrap(), and wrapTuple().
|
static |
Definition at line 609 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, c10::Half, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 170 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), at::impl::has_names(), and torch::autograd::utils::wrap().
|
static |
Definition at line 374 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), dispatch_to_CLong(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::isIntegralType(), and torch::autograd::utils::wrap().
|
static |
Definition at line 626 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::Int, configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 355 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), dispatch_to_CDouble(), dispatch_to_CLong(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::isFloatingType(), THPUtils_packDoubleAsInt(), torch::jit::tracer::warn(), torch::jit::tracer::WARN_PYTHON_DATAFLOW, and torch::autograd::utils::wrap().
Definition at line 395 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), dispatch_invert(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::isIntegralType(), and THPVariable_Wrap().
|
static |
Definition at line 769 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, dispatch_is_contiguous(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::memory_format(), configure::parser, r, and torch::autograd::utils::wrap().
Definition at line 789 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::Bool, torch::check_has_torch_function(), dispatch_to_Bool(), dispatch_to_CComplexDouble(), dispatch_to_CDouble(), dispatch_to_CLong(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::jit::tracer::warn(), torch::jit::tracer::WARN_PYTHON_DATAFLOW, and torch::autograd::utils::wrap().
Py_ssize_t torch::autograd::THPVariable_length | ( | PyObject * | self | ) |
Definition at line 33 of file python_variable_indexing.cpp.
References torch::check_has_torch_function(), END_HANDLE_TH_ERRORS_RET, HANDLE_TH_ERRORS, torch::handle_torch_function(), and caffe2.contrib.playground.AnyExpOnTerm::ret.
|
static |
Definition at line 643 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 835 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::utils::map2_(), configure::parser, r, THPVariable_Wrap(), THPVariableClass, bench_ops::x, and at::native::metal::mpscnn::y.
|
static |
Definition at line 811 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::utils::map_(), at::meta::other, configure::parser, r, at::Tensor::requires_grad(), THPVariable_Wrap(), and THPVariableClass.
|
static |
Definition at line 858 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), at::device_of(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::utils::legacy_tensor_new(), at::legacyExtractDispatchKey(), and THPVariable_Wrap().
|
static |
Definition at line 870 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), at::device_of(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), at::legacyExtractDispatchKey(), torch::utils::new_ones(), and THPVariable_Wrap().
|
static |
Definition at line 882 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), at::device_of(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), at::legacyExtractDispatchKey(), torch::utils::new_tensor(), and THPVariable_Wrap().
|
static |
|
static |
Definition at line 466 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, dispatch_nonzero(), dispatch_nonzero_numpy(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariableClass, and torch::autograd::utils::wrap().
Definition at line 218 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and THPUtils_packInt64().
|
static |
Definition at line 724 of file python_variable_methods.cpp.
References torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::utils::tensor_to_numpy(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_PYTHON_DATAFLOW.
|
static |
Definition at line 15 of file python_legacy_variable.cpp.
References compare-fastrnn-results::args, c10::computeDeviceType(), data, c10::TensorOptions::device(), c10::dispatchKeyToBackend(), at::native::metal::empty(), END_HANDLE_TH_ERRORS, torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), torch::jit::tracer::getValueTrace(), HANDLE_TH_ERRORS, torch::jit::tracer::isTracing(), c10::TensorOptions::layout(), c10::layout_from_backend(), name, caffe2.perfkernels.hp_emblookup_codegen::options, r, fastrnns.scratch::requires_grad, detail::scalar_type(), torch::autograd::impl::set_name(), torch::jit::tracer::setValueTrace(), THPFunction_Check(), THPVariable_Check(), THPVariable_Wrap(), TORCH_CHECK, and caffe2::var.
|
static |
Definition at line 310 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, torch::utils::check_out_type_matches(), c10::TensorOptions::device(), device, dispatch_randint(), c10::TensorOptions::dtype(), caffe2::dtype, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), c10::TensorOptions::layout(), caffe2.perfkernels.hp_emblookup_codegen::options, configure::parser, r, c10::TensorOptions::requires_grad(), at::Tensor::set_requires_grad(), size, THPVariableFunctionsModule, and torch::autograd::utils::wrap().
|
static |
Definition at line 157 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, torch::utils::check_out_type_matches(), c10::TensorOptions::device(), dispatch_range(), c10::TensorOptions::dtype(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, c10::TensorOptions::layout(), caffe2.perfkernels.hp_emblookup_codegen::options, configure::parser, r, c10::TensorOptions::requires_grad(), caffe2.contrib.playground.AnyExpOnTerm::ret, at::Tensor::set_requires_grad(), and torch::autograd::utils::wrap().
|
static |
Definition at line 736 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, fastrnns.scratch::requires_grad, torch::autograd::utils::requires_grad_leaf_error(), THPVariable_Wrap(), THPVariableClass, and c10::typeMetaToScalarType().
Definition at line 347 of file python_variable_indexing.cpp.
References applySlicing(), torch::check_has_torch_function(), checkUnpackSlice(), at::indexing::copy_to(), count_specified_dimensions(), device, at::device_of(), at::indexing::dispatch_index_put_(), caffe2::dtype, at::indexing::Ellipsis, END_HANDLE_TH_ERRORS_RET, c10::ArrayRef< T >::equals(), THPPointer< T >::get(), HANDLE_TH_ERRORS, torch::handle_torch_function_indexing(), index, caffe2::int64_t, torch.jit._trace::is_tracing(), c10::isQIntType(), torch::jit::tracer::isTracing(), c10::kCPU, c10::kFloat, at::indexing::None, recordSelectTrace(), recordSliceTrace(), caffe2.contrib.playground.AnyExpOnTerm::ret, at::indexing::set_item(), at::indexing::slicePrefix1sSize(), torch.cuda.profiler::start(), step, torch.cuda.profiler::stop(), THPUtils_checkLong(), THPUtils_unpackLong(), THPVariable_Check(), THPVariable_CheckExact(), THPVariable_Unpack(), val, value, valueToTensor(), and wrapTuple().
|
static |
Definition at line 660 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_to_type(), and THPVariableClass.
|
static |
Definition at line 89 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, torch::jit::tracer::getSizeOf(), HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::jit::tracer::isTracing(), configure::parser, r, THPSize_New(), THPVariableClass, TORCH_INTERNAL_ASSERT, and torch::autograd::utils::wrap().
|
static |
Definition at line 409 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), HANDLE_TH_ERRORS, torch::utils::sparse_coo_tensor_ctor(), THPVariable_Wrap(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_CONSTRUCTOR.
Definition at line 894 of file python_variable_methods.cpp.
References torch::check_has_torch_function(), torch::createPyObject(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and torch::handle_torch_function().
|
static |
Definition at line 194 of file python_variable_methods.cpp.
References torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and torch::autograd::utils::wrap().
|
static |
Definition at line 905 of file python_variable_methods.cpp.
References torch::check_has_torch_function(), torch::createPyObject(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), and caffe2::storage.
|
static |
Definition at line 126 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, strides, THPUtils_packInt64Array(), THPVariableClass, and torch::autograd::utils::wrap().
|
static |
Definition at line 427 of file python_torch_functions.cpp.
References compare-fastrnn-results::args, END_HANDLE_TH_ERRORS, torch::tensors::get_default_dispatch_key(), torch::tensors::get_default_scalar_type(), HANDLE_TH_ERRORS, torch::utils::tensor_ctor(), THPVariable_Wrap(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_CONSTRUCTOR.
|
static |
Definition at line 919 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, c10::aten::copy(), torch::utils::cuda_lazy_init(), device, dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::autograd::utils::parse_to_conversion(), configure::parser, r, THPVariable_Wrap(), and THPVariableClass.
|
static |
Definition at line 534 of file python_variable_methods.cpp.
References dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, and THPVariable_Wrap().
Referenced by THPVariable_bfloat16(), THPVariable_bool(), THPVariable_byte(), THPVariable_char(), THPVariable_double(), THPVariable_float(), THPVariable_half(), THPVariable_int(), THPVariable_long(), and THPVariable_short().
Definition at line 961 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::check_has_torch_function(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), torch::utils::tensor_to_list(), torch::jit::tracer::warn(), and torch::jit::tracer::WARN_PYTHON_DATAFLOW.
|
static |
Definition at line 973 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, torch::utils::cuda_lazy_init(), device, torch.distributed.pipeline.sync._balance::Device, caffe2.python.gradient_check_test::device_type, dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), caffe2.perfkernels.hp_emblookup_codegen::options, torch::utils::options_from_string(), torch::utils::options_to_string(), configure::parser, r, detail::scalar_type(), THPDtype_Check(), THPUtils_checkString(), THPUtils_packString(), THPUtils_unpackString(), THPVariable_Wrap(), THPVariableClass, and c10::typeMetaToScalarType().
|
static |
Definition at line 512 of file python_variable_methods.cpp.
References compare-fastrnn-results::args, device, torch.distributed.pipeline.sync._balance::Device, dispatch_to(), END_HANDLE_TH_ERRORS, HANDLE_TH_ERRORS, torch::handle_torch_function(), configure::parser, r, THPVariable_Wrap(), THPVariableClass, TORCH_CHECK, and c10::XPU.
|
inline |
Definition at line 80 of file VariableTypeUtils.h.
References name, at::Tensor::requires_grad(), torch::tensor(), and TORCH_CHECK.
Referenced by throw_error_for_complex_autograd().
|
inline |
Definition at line 87 of file VariableTypeUtils.h.
References name, torch::tensor(), and throw_error_for_complex_autograd().
|
inline |
Definition at line 74 of file VariableTypeUtils.h.
|
inline |
Definition at line 312 of file VariableTypeUtils.h.
References caffe2::tensors.
|
inline |
Definition at line 304 of file VariableTypeUtils.h.
References caffe2::tensors.
std::enable_if<std::is_same<T,variable_list>::value,T&>::type torch::autograd::to_output_type | ( | variable_list & | output_list | ) |
Definition at line 197 of file custom_function.h.
std::enable_if<std::is_same<T,Variable>::value,T>::type torch::autograd::to_output_type | ( | variable_list & | output_list | ) |
Definition at line 200 of file custom_function.h.
|
inlinestatic |
Definition at line 211 of file python_variable_indexing.cpp.
References index, and THPVariable_Check().
Referenced by wrapTuple().
void torch::autograd::validate_outputs | ( | const edge_list & | edges, |
variable_list & | grads, | ||
const std::function< std::string(const std::string &)> & | format_error | ||
) |
Definition at line 572 of file engine.cpp.
References AT_ERROR, at::Tensor::defined(), at::Tensor::device(), at::Tensor::dim(), c10::ArrayRef< T >::equals(), grad(), is_compatible_type(), at::is_expandable_to(), c10::isComplexType(), c10::isFloatingType(), matmul_dlmc_bench::metadata, at::Tensor::options(), at::Tensor::scalar_type(), at::Tensor::sizes(), at::sum_to(), TORCH_CHECK, TORCH_INTERNAL_ASSERT, and c10::typeMetaToScalarType().
Referenced by call_function(), torch::autograd::Engine::execute(), and torch::distributed::autograd::DistEngine::validateRootsAndRetrieveEdges().
|
inlinestatic |
Definition at line 90 of file python_variable_indexing.cpp.
References device, caffe2.perfkernels.hp_emblookup_codegen::options, torch::utils::options_to_string(), at::indexing::scalarToTensor(), THPUtils_checkLong(), THPUtils_unpackComplexDouble(), THPUtils_unpackDouble(), THPUtils_unpackLong(), THPVariable_Check(), and value.
Referenced by THPVariable_setitem().
def torch.autograd.variable | ( | * | args, |
** | kwargs | ||
) |
Definition at line 246 of file __init__.py.
References variable().
Referenced by _mark_dirty(), _save_variables(), torch::distributed::autograd::DistAutogradContext.accumulateGrad(), torch::autograd::AccumulateGrad.accumulateGrad(), torch::autograd::generated::details.any_variable_defined(), any_variable_requires_grad(), torch::autograd::AccumulateGrad.apply(), torch::autograd::Gather.apply(), torch::autograd::AccumulateGrad.callHooks(), torch::autograd::utils.clone_obey_contract(), c10d::Reducer.copy_bucket_to_grad(), create_gradient_edge(), c10d::Reducer.finalize_bucket_dense(), c10d::Reducer.initialize_buckets(), torch::jit.initPythonIRBindings(), c10::detail.make_filename(), c10d::Reducer.mark_variable_ready_dense(), c10d::Reducer.mark_variable_ready_sparse(), torch::jit.module_state_to(), torch::jit::tensorexpr::PolynomialTransformer.mutate(), torch::autograd::utils.obeys_layout_contract(), torch::autograd::detail::MakeNextFunctionList.operator()(), c10d::Reducer.Reducer(), torch::distributed::autograd::DistAutogradContext.runGradCallbackForVariable(), c10d::Reducer.runGradCallbackForVariable(), torch::autograd::SavedVariable.SavedVariable(), set_history(), THPEngine_run_backward(), unpack_input(), variable(), and wrap_outputs().
TORCH_API variable_list torch::autograd::wrap_outputs | ( | const variable_list & | inputs, |
tensor_list && | outputs, | ||
const function_constructor & | ctr | ||
) |
Wraps the tensor outputs in variables and creates the grad_fn and sets the grad_fn if necessary.
Definition at line 12 of file utils.cpp.
References any_variable_requires_grad(), collect_next_edges(), create_gradient_edge(), inputs, at::GradMode::is_enabled(), at::native::metal::mpscnn::output, outputs, and variable().
Referenced by torch::autograd::DelayedError::apply(), torch::autograd::UndefinedGrad::apply(), and torch::autograd::PyNode::legacy_apply().
|
inlinestatic |
Definition at line 251 of file python_variable_indexing.cpp.
References index, torch::res, and treatSequenceAsTuple().
Referenced by THPVariable_getitem(), and THPVariable_setitem().
torch::autograd::$ |
Definition at line 41 of file python_fft_functions.cpp.
Referenced by torch::autograd::generated::initialize_autogenerated_functions().
Definition at line 24 of file __init__.py.
|
private |
Definition at line 26 of file __init__.py.
|
static |
Definition at line 70 of file engine.cpp.
Referenced by call_function(), and torch::autograd::Engine::is_checkpoint_valid().
|
static |
Definition at line 191 of file python_cpp_function.cpp.
Referenced by functionToPyObject(), and registerCppFunction().
|
staticconstexpr |
Definition at line 32 of file engine.h.
Referenced by torch::distributed::autograd::DistEngine::execute_graph_task_until_ready_queue_empty(), torch::autograd::Engine::execute_with_graph_task(), torch::autograd::Engine::ready_queue_by_index(), and set_device().
|
static |
Definition at line 73 of file engine.cpp.
Referenced by torch::autograd::Engine::execute_with_graph_task().
|
static |
Definition at line 21 of file function.cpp.
Referenced by torch::autograd::Node::assign_parent(), torch::autograd::NodeGuard::NodeGuard(), and torch::autograd::NodeGuard::~NodeGuard().
|
static |
Definition at line 82 of file engine.cpp.
Referenced by torch::autograd::GraphTaskGuard::GraphTaskGuard(), torch::autograd::Engine::queue_callback(), and torch::autograd::GraphTaskGuard::restore_current_graph_task().
|
static |
Definition at line 162 of file python_cpp_function.cpp.
Referenced by _initFunctionPyTypeObject().
|
static |
Definition at line 167 of file python_cpp_function.cpp.
Referenced by _initFunctionPyTypeObject().
TORCH_API const char * torch::autograd::ERR_BACKWARD_TWICE |
Definition at line 122 of file saved_variable.cpp.
Referenced by torch::autograd::CopySlices::apply(), torch::autograd::AutogradContext::get_saved_variables(), torch::autograd::SavedVariable::unpack(), and unpack_saved_variables().
|
static |
Definition at line 98 of file engine.cpp.
Referenced by torch::autograd::Engine::execute(), torch::autograd::Engine::init_local_ready_queue(), torch::autograd::Engine::reentrant_thread_init(), and torch::autograd::Engine::thread_main().
|
staticconstexpr |
|
static |
Definition at line 332 of file init.cpp.
Referenced by python_functions().
|
staticconstexpr |
Definition at line 31 of file engine.h.
Referenced by torch::autograd::Engine::execute(), and torch::autograd::Engine::execute_with_graph_task().
|
static |
Definition at line 48 of file python_fft_functions.cpp.
Referenced by initFFTFunctions().
PyTypeObject torch::autograd::THPLegacyVariableType |
Definition at line 91 of file python_legacy_variable.cpp.
Referenced by init_legacy_variable().
|
static |
Definition at line 34 of file python_linalg_functions.cpp.
Referenced by initLinalgFunctions().
|
static |
Definition at line 25 of file python_nn_functions.cpp.
Referenced by initNNFunctions(), and THPVariable__parse_to().
|
static |
Definition at line 498 of file python_torch_functions.cpp.
Referenced by initTorchFunctions().
|
static |
Definition at line 62 of file python_torch_functions.cpp.
Referenced by initTorchFunctions(), THPVariable_arange(), THPVariable_full(), and THPVariable_randint().
|
static |
Definition at line 78 of file engine.cpp.
Referenced by torch::autograd::Engine::execute(), torch::autograd::Engine::execute_with_graph_task(), and torch::autograd::Engine::reentrant_thread_init().
|
extern |
Referenced by THPVariable_initModule().
at::impl::VariableHooksRegisterer registerVariableHooks & torch::autograd::variableHooks |
Definition at line 340 of file variable.cpp.
|
static |
Definition at line 65 of file engine.cpp.
Referenced by torch::autograd::Engine::execute(), torch::autograd::Engine::execute_with_graph_task(), set_device(), and torch::autograd::Engine::thread_main().