pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

spatial_softmax_with_loss_op.cc
Go to the documentation of this file.
2
3namespace caffe2 {
4
6 SpatialSoftmaxWithLoss,
9 SpatialSoftmaxWithLossGradient,
11
12// Input: X (logits), T (labels); Output: P (probs), Y
13OPERATOR_SCHEMA(SpatialSoftmaxWithLoss)
14 .NumInputs(2, 3)
15 .NumOutputs(2)
16 .TensorInferenceFunction([](const OperatorDef& def,
17 const vector<TensorShape>& in) {
20
21 auto logits = in[0]; // Tensor with Shape [batch_size, num_classes]
22 auto labels = in[1]; // Tensor with shape [batch_size, ]
23 auto batch_size = logits.dims().Get(0);
24 auto num_classes = logits.dims().Get(1);
25
26 CAFFE_ENFORCE_EQ(logits.dims_size(), 4);
27 CAFFE_ENFORCE_EQ(labels.dims_size(), 3);
28 out[0].set_data_type(logits.data_type());
29 out[0].add_dims(batch_size);
30 out[0].add_dims(num_classes);
31 out[0].add_dims(in[0].dims(2));
32 out[0].add_dims(in[0].dims(3));
33 // Output 2 is scalar shape, so no dims added
34 return out;
35 })
36 .SetDoc(R"DOC(
37Combined Spatial Softmax and Cross-Entropy loss operator.
38Similar to SoftmaxWithLoss, this operator computes the spatial softmax
39normalized values for each layer in the batch of the given input, after which
40cross-entropy loss is computed. This operator is numerically more stable than
41separate Softmax and CrossEntropy ops. The inputs are a 2-D tensor
42(Tensor) of size (batch_size x input_feature_dimensions) and tensor of
43labels (ground truth).
44Output is tensor with the probability for each label in a pixel for each example
45(N x D x W x H) and averaged loss (scalar).
46For spatial softmax, weighting is by x,y position of the input.
47)DOC")
48 .Input(0, "logits", "Unscaled log probabilities")
49 .Input(1, "labels", "Ground truth")
50 .Input(
51 2,
52 "weight_tensor",
53 "Optional blob to be used to weight the samples for the loss. With\
54 spatial set, weighting is by x,y of the input")
55 .Output(0, "softmax", "Tensor with softmax cross entropy loss")
56 .Output(1, "loss", "Average loss");
57
58// Input: X, T, P, dY; Output: dX
59OPERATOR_SCHEMA(SpatialSoftmaxWithLossGradient).NumOutputs(1);
60
61#define DONT_CARE (-1)
62
63template <>
65 auto& X = Input(0); // Logits
66 auto& T = Input(1); // Labels / targets
67
68 int N, D;
69 N = X.dim32(0);
70 D = X.dim32(1);
71 auto* P =
72 Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
73
74 if (!sum_multiplier_.defined()) {
75 sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CPU));
77 D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
78 } else if (sum_multiplier_.numel() != D) {
79 sum_multiplier_.Resize(D);
81 D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
82 }
83
84 float* Pdata = P->template mutable_data<float>();
85 const float* weights = (InputSize() > 2 ? Input(2).data<float>() : nullptr);
86 CAFFE_ENFORCE_EQ(X.dim(), 4);
87 CAFFE_ENFORCE_EQ(T.dim(), 3);
88 CAFFE_ENFORCE_EQ(T.dim32(0), N);
89
90 int H = X.dim32(2);
91 int W = X.dim32(3);
92
93 const float* Xdata = X.data<float>();
94
95 for (int i = 0; i < N; ++i) {
96 for (int y = 0; y < H; ++y) {
97 for (int x = 0; x < W; ++x) {
98 // Subtract max on each cell for numerical reasons
99 float max_val = (-1e20f);
100 for (int c = 0; c < D; ++c) {
101 // TODO optimize
102 int idx = i * (H * W * D) + c * (H * W) + y * W + x;
103 max_val = std::max(max_val, Xdata[idx]);
104 }
105
106 // Exponentiate
107 float expsum = 0.0f;
108 for (int c = 0; c < D; ++c) {
109 int idx = i * (H * W * D) + c * (H * W) + y * W + x;
110 float expx = exp(Xdata[idx] - max_val);
111 Pdata[idx] = expx;
112 expsum += expx;
113 }
114
115 // Normalize
116 for (int c = 0; c < D; ++c) {
117 int idx = i * (H * W * D) + c * (H * W) + y * W + x;
118 Pdata[idx] /= expsum;
119 }
120 }
121 }
122 }
123
124 // Compute the avg cross-entropy loss
125 auto* avg_loss =
126 Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss
127 float* avg_loss_data = avg_loss->template mutable_data<float>();
128 const int* label_data = T.data<int>();
129
130 float sum_label_xent = 0.0f;
131 float total_weight = 0.0;
132
133 for (int y = 0; y < H; y++) {
134 for (int x = 0; x < W; x++) {
135 for (int i = 0; i < N; i++) {
136 int label_idx = i * H * W + y * W + x;
137 int label = label_data[label_idx];
138 if (label != DONT_CARE) {
140 label < D && label >= 0,
141 "Label seems incorrect:label value larger than number of classes",
142 label_data[i],
143 " vs ",
144 D);
145 int idx = i * (H * W * D) + label * (H * W) + y * W + x;
146 float w = weights ? weights[label_idx] : 1.0;
147 total_weight += w;
148 sum_label_xent += -log(std::max(Pdata[idx], 1e-20f)) * w;
149 }
150 }
152 }
153 if (total_weight != 0.0) {
154 *avg_loss_data = sum_label_xent / total_weight;
155 } else {
156 *avg_loss_data = 0.0;
157 }
158 return true;
159}
160
161template <>
163 auto& X = Input(0); // Logits
164 auto& T = Input(1); // Labels / targets
165 // Input(2) is weights if given
166 auto& P = Input(InputSize() - 2); // Probabilities from softmax
167 auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
168
169 const float* weights = (InputSize() > 4 ? Input(2).data<float>() : nullptr);
170 int N, D;
171 N = X.dim32(0);
172 D = X.dim32(1);
173 auto* dX = Output(0, X.sizes(), at::dtype<float>());
174 CAFFE_ENFORCE_EQ(T.dim32(0), N);
175 CAFFE_ENFORCE_EQ(X.dim(), 4);
176 CAFFE_ENFORCE_EQ(T.dim(), 3);
177
178 int H = X.dim32(2);
179 int W = X.dim32(3);
180
181 const float* Pdata = P.data<float>();
182 float* dX_data = dX->template mutable_data<float>();
183 const int* label_data = T.data<int>();
184
185 // Copy softmax probabilities into dX. All but the neuron
186 // corresponding to the correct label has gradient equaling e(x_j)
187 // which is the probability under softmax.
188 context_.CopyFromCPU<float>(P.numel(), Pdata, dX_data);
189
190 float total_weight = 0.0f;
191 for (int y = 0; y < H; ++y) {
192 for (int x = 0; x < W; ++x) {
193 for (int i = 0; i < N; ++i) {
194 int label_idx = i * H * W + y * W + x;
195 int label = label_data[label_idx];
196
197 if (label != DONT_CARE) {
198 int idx = i * (H * W * D) + label * (H * W) + y * W + x;
199
200 dX_data[idx] = (dX_data[idx] - 1.0);
201
202 if (weights != nullptr) {
203 float weight = weights[label_idx];
204 for (int c = 0; c < D; ++c) {
205 int k = i * (H * W * D) + c * (H * W) + y * W + x;
206 dX_data[k] *= weight;
207 }
208 total_weight += weight;
209 } else {
210 total_weight += 1.0;
211 }
212 } else {
213 // Set gradient to zero for coordinates where we have dont care
214 for (int c = 0; c < D; ++c) {
215 int idx = i * (H * W * D) + c * (H * W) + y * W + x;
216 dX_data[idx] = 0;
217 }
218 }
219 }
220 }
221 }
222
223 if (total_weight > 0) {
225 dX->numel(),
226 scale_ / total_weight,
227 dX->data<float>(),
228 dX_data,
229 &context_);
230 }
232 dX->numel(),
233 d_avg_loss.data<float>(),
234 dX->data<float>(),
235 dX->template mutable_data<float>(),
236 &context_);
237 return true;
238}
239
240namespace {
241class GetSoftmaxWithLossGradient : public GradientMakerBase {
243 vector<OperatorDef> GetGradientDefs() override {
244 vector<string> blob_names{
245 {I(0), I(1), O(0), GO(1)},
246 };
247
248 // Add weight blob, if given
249 if (def_.input_size() == 3) {
250 blob_names.emplace(blob_names.begin() + 2, I(2));
251 }
252 return SingleGradientDef(
253 "SpatialSoftmaxWithLossGradient",
254 "",
256 vector<string>{GI(0)});
257 }
258};
259
260REGISTER_GRADIENT(SpatialSoftmaxWithLoss, GetSoftmaxWithLossGradient);
261}
262} // namespace caffe2
OperatorDef def_
void CopyFromCPU(size_t n, const T *src, T *dst)
Definition: context_base.h:81
A helper class to index into arguments.
Definition: proto_utils.h:203
GradientMakerBase(const OperatorDef &def, const vector< GradientWrapper > &g_output)
#define D(name, bit)
Definition: cpuid.h:72
CPUContext * context_
const Tensor & weight
Definition: MPSCNNOps.mm:53
Tensor exp(const Tensor &self)
Definition: UnaryOps.cpp:253
constexpr Symbol idx(static_cast< unique_t >(_keys::attr_idx))
C10_EXPORT void Set< float, CPUContext >(const std::int64_t N, const float alpha, float *Y, CPUContext *)
Definition: elementwise.cc:520
C10_EXPORT void Scale< float, float, CPUContext >(const std::int64_t N, const float alpha, const float *X, float *Y, CPUContext *)
Definition: elementwise.cc:470
Copyright (c) 2016-present, Facebook, Inc.
Definition: blob.h:13
REGISTER_CPU_OPERATOR(ATen, ATenOp< CPUContext >)
int W
Definition: im2col_op.cc:52
OPERATOR_SCHEMA(ATen)
matrix of logits for each example and class weights
constexpr DeviceType CPU
Definition: caffe2_pb.h:9
CAFFE_ENFORCE_EQ(in.size(), 1, "New shape must not be specified by the input blob and the " "argument `shape` at the same time.")
return vector< TensorShape >
Definition: slice_op.cc:110
ArgumentHelper helper(def)
int H
Definition: im2col_op.cc:52
and label is applied to the tensor elementwise If y
float T
Definition: cc_bmm_bg_op.h:11
SparseLengths8BitsRowwiseOp< CPUContext, 0, 1 >::LENGTHS uint8 tensor obtained with Vector with the same sum of elements as the first dimension of DATA Input(3, "scale_bias", "Matrix of floats, each row r_i of which stores a pair " "s_i, b_i -- scale and bias for i-th row") .Output(0
const vector< TensorShape > & in
multiply the weights by a constant w
Unscaled log probabilities Optional blob to be used to weight the samples for the loss With spatial weighting is by x
Output tensor quantization scale X
*and produces a single output tensor *expanded *The op also takes an argument *dims *with a list of dimensions for where to add the single dimensional entries If the same blob is provided as input and the operation is copy free This is the exact inverse operation of *Squeeze *Github dims
Tensor empty(at::IntArrayRef dims, at::TensorOptions options)
Definition: tensor.cc:142
See RoIPoolF Gradient of forward dX
Prefix string to prepend extracted blobs blob_names
SparseLengths8BitsRowwiseOp< CPUContext, 0, 1 >::LENGTHS SetDoc(R"DOC( Variation of SparseLengthsMean operator, where DATA is stored using 8bits. DATA was quantized with 8Bit row-wise quantization (see doc to FloatToRowwiseQuantized8Bits operator). To restore DATA from 8Bit, we use additional input that stores scales and biases. )DOC") .Input(0
REGISTER_GRADIENT(CTC, GetCTCGradient)
SparseLengths8BitsRowwiseOp< CPUContext, 1, 1 >::LENGTHS uint8 tensor obtained with Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated Matrix of each row r_i of which stores a pair b_i scale and bias for i th row Output(0, "output", "output")
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and label(should have a number of elements equal to the number of additional " "outputs)") .Arg( "random_scale"
int N
Definition: im2col_op.cc:52
CAFFE_ENFORCE(dims.front() >=0, "Dimension ids must be non-negative.")
c10::BFloat16 max(c10::BFloat16 a, c10::BFloat16 b)
Definition: BFloat16-math.h:33
#define DONT_CARE