pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

im2col_op.cc
Go to the documentation of this file.
2
3namespace caffe2 {
6
9 vector<OperatorDef> GetGradientDefs() override {
10 return SingleGradientDef(
11 "Col2Im",
12 "",
13 std::vector<string>{GO(0), I(0)},
14 std::vector<string>{GI(0)});
15 }
16};
18
21 vector<OperatorDef> GetGradientDefs() override {
22 return SingleGradientDef(
23 "Im2Col", "", std::vector<string>{GO(0)}, std::vector<string>{GI(0)});
24 }
25};
27
29 .NumInputs(1)
30 .NumOutputs(1)
31 .SetDoc("The Im2Col operator from Matlab.")
32 .TensorInferenceFunction(
33 [](const OperatorDef& def, const vector<TensorShape>& in) {
35 auto pad = helper.GetSingleArgument<int>("pad", 0);
37 "kernel_h", helper.GetSingleArgument<int>("kernel", 0));
39 "kernel_w", helper.GetSingleArgument<int>("kernel", 0));
41 "dilation_h", helper.GetSingleArgument<int>("dilation", 1));
43 "dilation_w", helper.GetSingleArgument<int>("dilation", 1));
45 "stride_h", helper.GetSingleArgument<int>("stride", 1));
47 "stride_w", helper.GetSingleArgument<int>("stride", 1));
49 helper.GetSingleArgument<string>("order", "NCHW"));
50
51 const TensorShape& X = in[0];
52 int N = 0, C = 0, H = 0, W = 0;
53 switch (order) {
55 N = X.dims(0);
56 C = X.dims(1);
57 H = X.dims(2);
58 W = X.dims(3);
59 break;
61 N = X.dims(0);
62 H = X.dims(1);
63 W = X.dims(2);
64 C = X.dims(3);
65 break;
66 default:
67 CAFFE_THROW("Unknown storage order: ", order);
68 }
69
70 const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
71 const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
74 const int out_h = (H + 2 * pad - dkernel_h) / stride_h + 1;
75 const int out_w = (W + 2 * pad - dkernel_w) / stride_w + 1;
76
78 switch (order) {
83 break;
88 break;
89 default:
90 CAFFE_THROW("Unknown storage order: ", order);
91 }
92
93 return out;
94 })
95 .Input(0, "X", "4-tensor in NCHW or NHWC.")
96 .Output(
97 0,
98 "Y",
99 "4-tensor. For NCHW: N x (C x kH x kW) x outH x outW."
100 "For NHWC: N x outH x outW x (kH x kW x C");
101
102OPERATOR_SCHEMA(Col2Im).NumInputs(2).NumOutputs(1);
103
104} // namespace caffe2
#define CAFFE_THROW(...)
Definition: Logging.h:125
A helper class to index into arguments.
Definition: proto_utils.h:203
static T GetSingleArgument(const Def &def, const string &name, const T &default_value)
Definition: proto_utils.h:211
vector< OperatorDef > GetGradientDefs() override
Definition: im2col_op.cc:21
vector< OperatorDef > GetGradientDefs() override
Definition: im2col_op.cc:9
GradientMakerBase(const OperatorDef &def, const vector< GradientWrapper > &g_output)
static vector< OperatorDef > SingleGradientDef(const Args &... args)
a helper function to allow one to create one single operator def, which is usually the case for many ...
TORCH_API void Col2Im(const int channels, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T *col_data, T *img_data, Context *context, const int groups=1)
TORCH_API void Im2Col(const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T *img_data, T *col_data, Context *context, const int groups=1)
Copyright (c) 2016-present, Facebook, Inc.
Definition: blob.h:13
auto dilation_w
Definition: im2col_op.cc:42
int C
Definition: im2col_op.cc:52
REGISTER_CPU_OPERATOR(ATen, ATenOp< CPUContext >)
int W
Definition: im2col_op.cc:52
const int dkernel_h
Definition: im2col_op.cc:70
parameter efficient embedding termed TT which can be plugged in into any model and trained end to end The benefits of our compressed TT layer are twofold instead of storing huge embedding it stores a sequence of much smaller dimensional and dimensional necessary for reconstructing the required which allows compressing the model significantly at the cost of a negligible performance drop the overall number of parameters can be relatively which allows to use larger batches or train efficiently in a case of limited resources DOC vector< int >
OPERATOR_SCHEMA(ATen)
auto stride_w
Definition: im2col_op.cc:46
return vector< TensorShape >
Definition: slice_op.cc:110
ArgumentHelper helper(def)
int H
Definition: im2col_op.cc:52
auto stride_h
Definition: im2col_op.cc:44
SparseLengths8BitsRowwiseOp< CPUContext, 0, 1 >::LENGTHS uint8 tensor obtained with Vector with the same sum of elements as the first dimension of DATA Input(3, "scale_bias", "Matrix of floats, each row r_i of which stores a pair " "s_i, b_i -- scale and bias for i-th row") .Output(0
auto dilation_h
Definition: im2col_op.cc:40
const vector< TensorShape > & in
TensorShape CreateTensorShape(vector< T_I > dims, ::caffe2::TensorProto_DataType dt)
auto pad
Definition: im2col_op.cc:35
StorageOrder StringToStorageOrder(const string &str)
Definition: types.h:23
Output tensor quantization scale X
@ NCHW
Definition: types.h:20
@ NHWC
Definition: types.h:19
auto kernel_w
Definition: im2col_op.cc:38
REGISTER_GRADIENT(CTC, GetCTCGradient)
auto kernel_h
Definition: im2col_op.cc:36
const int out_h
Definition: im2col_op.cc:74
const int out_w
Definition: im2col_op.cc:75
int N
Definition: im2col_op.cc:52
const int dkernel_w
Definition: im2col_op.cc:71
CAFFE_ENFORCE(dims.front() >=0, "Dimension ids must be non-negative.")
INT_MAX Subnet with blob bindings Indices of corresponding outer workspace in order
Definition: do_op.cc:18