pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

blob_test.cc
Go to the documentation of this file.
1#include <iostream>
2#include <memory>
3#include <mutex>
4
5#include <gtest/gtest.h>
6#include "c10/util/Registry.h"
7#include "caffe2/core/blob.h"
10#include "caffe2/core/context.h"
11#include "caffe2/core/db.h"
13#include "caffe2/core/qtensor.h"
15#include "caffe2/core/tensor.h"
16#include "caffe2/core/test_utils.h"
17#include "caffe2/core/types.h"
21
22C10_DEFINE_int64(caffe2_test_big_tensor_size, 100000000, "");
23C10_DECLARE_int(caffe2_tensor_chunk_size);
24C10_DECLARE_bool(caffe2_serialize_fp16_as_bytes);
25C10_DECLARE_bool(caffe2_serialize_using_bytes_as_holder);
26
27namespace caffe2 {
28using namespace ::caffe2::db;
29namespace {
30class BlobTestFoo {
31 public:
32 int32_t val;
33};
34class BlobTestBar {};
35class BlobTestNonDefaultConstructible {
36 public:
37 BlobTestNonDefaultConstructible() = delete;
38 BlobTestNonDefaultConstructible(int x) : val(x) {}
39 int32_t val;
40};
41} // namespace
42
43CAFFE_KNOWN_TYPE(BlobTestFoo);
44CAFFE_KNOWN_TYPE(BlobTestBar);
45CAFFE_KNOWN_TYPE(BlobTestNonDefaultConstructible);
46
48 public:
51 /**
52 * Serializes a Blob. Note that this blob has to contain Tensor,
53 * otherwise this function produces a fatal error.
54 */
56 const void* pointer,
57 TypeMeta typeMeta,
58 const string& name,
59 SerializationAcceptor acceptor) override {
60 CAFFE_ENFORCE(typeMeta.Match<BlobTestFoo>());
61
62 BlobProto blob_proto;
63 blob_proto.set_name(name);
64 blob_proto.set_type("BlobTestFoo");
65 // For simplicity we will just serialize the 4-byte content as a string.
66 blob_proto.set_content(std::string(
67 reinterpret_cast<const char*>(
68 &static_cast<const BlobTestFoo*>(pointer)->val),
69 sizeof(int32_t)));
70 acceptor(name, SerializeBlobProtoAsString_EnforceCheck(blob_proto));
71 }
72};
73
75 public:
76 void Deserialize(const BlobProto& proto, Blob* blob) override {
77 blob->GetMutable<BlobTestFoo>()->val =
78 reinterpret_cast<const int32_t*>(proto.content().c_str())[0];
79 }
80};
81
82REGISTER_BLOB_SERIALIZER((TypeMeta::Id<BlobTestFoo>()), BlobTestFooSerializer);
84
85namespace {
86
87TEST(BlobTest, Blob) {
88 Blob blob;
89
90 int* int_unused CAFFE2_UNUSED = blob.GetMutable<int>();
91 EXPECT_TRUE(blob.IsType<int>());
92 EXPECT_FALSE(blob.IsType<BlobTestFoo>());
93 EXPECT_FALSE(BlobIsTensorType(blob, CPU));
94
95 BlobTestFoo* foo_unused CAFFE2_UNUSED = blob.GetMutable<BlobTestFoo>();
96 EXPECT_TRUE(blob.IsType<BlobTestFoo>());
97 EXPECT_FALSE(blob.IsType<int>());
98 EXPECT_FALSE(BlobIsTensorType(blob, CPU));
99
101 EXPECT_TRUE(BlobIsTensorType(blob, CPU));
102 EXPECT_FALSE(blob.IsType<BlobTestFoo>());
103 EXPECT_FALSE(blob.IsType<int>());
104}
105
106TEST(BlobTest, BlobUninitialized) {
107 Blob blob;
108 ASSERT_THROW(blob.Get<int>(), EnforceNotMet);
109}
110
111TEST(BlobTest, BlobWrongType) {
112 Blob blob;
113 BlobTestFoo* foo_unused CAFFE2_UNUSED = blob.GetMutable<BlobTestFoo>();
114 EXPECT_TRUE(blob.IsType<BlobTestFoo>());
115 EXPECT_FALSE(blob.IsType<int>());
116 // When not null, we should only call with the right type.
117 EXPECT_NE(&blob.Get<BlobTestFoo>(), nullptr);
118 ASSERT_THROW(blob.Get<int>(), EnforceNotMet);
119}
120
121TEST(BlobTest, BlobReset) {
122 Blob blob;
123 std::unique_ptr<BlobTestFoo> foo(new BlobTestFoo());
124 EXPECT_TRUE(blob.Reset(foo.release()) != nullptr);
125 // Also test that Reset works.
126 blob.Reset();
127}
128
129TEST(BlobTest, BlobMove) {
130 Blob blob1;
131 std::unique_ptr<BlobTestFoo> foo(new BlobTestFoo());
132 auto* fooPtr = foo.get();
133 EXPECT_TRUE(blob1.Reset(foo.release()) != nullptr);
134 Blob blob2;
135 blob2 = std::move(blob1);
136 ASSERT_THROW(blob1.Get<BlobTestFoo>(), EnforceNotMet);
137 EXPECT_EQ(&blob2.Get<BlobTestFoo>(), fooPtr);
138 Blob blob3{std::move(blob2)};
139 EXPECT_EQ(&blob3.Get<BlobTestFoo>(), fooPtr);
140}
141
142TEST(BlobTest, BlobNonConstructible) {
143 Blob blob;
144 ASSERT_THROW(blob.Get<BlobTestNonDefaultConstructible>(), EnforceNotMet);
145 // won't work because it's not default constructible
146 // blob.GetMutable<BlobTestNonDefaultConstructible>();
147 EXPECT_FALSE(
148 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>() != nullptr);
149 EXPECT_TRUE(blob.Reset(new BlobTestNonDefaultConstructible(42)) != nullptr);
150 ASSERT_NO_THROW(blob.Get<BlobTestNonDefaultConstructible>());
152 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>() != nullptr);
153 EXPECT_EQ(blob.Get<BlobTestNonDefaultConstructible>().val, 42);
154 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>()->val = 37;
155 EXPECT_EQ(blob.Get<BlobTestNonDefaultConstructible>().val, 37);
156}
157
158TEST(BlobTest, BlobShareExternalPointer) {
159 Blob blob;
160 std::unique_ptr<BlobTestFoo> foo(new BlobTestFoo());
161 EXPECT_EQ(blob.ShareExternal<BlobTestFoo>(foo.get()), foo.get());
162 EXPECT_TRUE(blob.IsType<BlobTestFoo>());
163 // Also test that Reset works.
164 blob.Reset();
165}
166
167TEST(BlobTest, BlobShareExternalObject) {
168 Blob blob;
169 BlobTestFoo foo;
170 EXPECT_EQ(blob.ShareExternal<BlobTestFoo>(&foo), &foo);
171 EXPECT_TRUE(blob.IsType<BlobTestFoo>());
172 // Also test that Reset works.
173 blob.Reset();
174}
175
176TEST(BlobTest, StringSerialization) {
177 const std::string kTestString = "Hello world?";
178 Blob blob;
179 *blob.GetMutable<std::string>() = kTestString;
180
181 string serialized = SerializeBlob(blob, "test");
182 BlobProto proto;
183 CHECK(proto.ParseFromString(serialized));
184 EXPECT_EQ(proto.name(), "test");
185 EXPECT_EQ(proto.type(), "std::string");
186 EXPECT_FALSE(proto.has_tensor());
187 EXPECT_EQ(proto.content(), kTestString);
188}
189
190TEST(TensorNonTypedTest, TensorChangeType) {
191 vector<int> dims(3);
192 dims[0] = 2;
193 dims[1] = 3;
194 dims[2] = 5;
196
197 auto* ptr = tensor.mutable_data<int>();
198 EXPECT_TRUE(ptr != nullptr);
199 EXPECT_TRUE(tensor.data<int>() != nullptr);
200 EXPECT_TRUE(tensor.dtype().Match<int>());
201
202 // int and float are same size, so should retain the pointer
203 // NB: this is only true when the use_count of the underlying Storage is 1, if
204 // the underlying Storage is shared between multiple Tensors We'll create a
205 // new Storage when the data type changes
206 EXPECT_TRUE(tensor.mutable_data<float>() == (float*)ptr);
207 EXPECT_TRUE(tensor.data<float>() == (const float*)ptr);
208 EXPECT_TRUE(tensor.dtype().Match<float>());
209
210 // at::Half is smaller, so still should share buffer
211 EXPECT_TRUE(tensor.mutable_data<at::Half>() == (at::Half*)ptr);
212 EXPECT_TRUE(tensor.data<at::Half>() == (const at::Half*)ptr);
213 EXPECT_TRUE(tensor.dtype().Match<at::Half>());
214
215 // share the data with other tensor so that the pointer won't be reused
216 // when we reallocate
217 Tensor other_tensor = tensor.Alias();
218 // but double is bigger, so it should allocate a new one
219 auto* doubleptr = tensor.mutable_data<double>();
220 EXPECT_TRUE(doubleptr != (double*)ptr);
221 EXPECT_TRUE(doubleptr != nullptr);
222 EXPECT_TRUE(tensor.data<double>() != nullptr);
223 EXPECT_TRUE(tensor.dtype().Match<double>());
224}
225
226TEST(TensorNonTypedTest, NonDefaultConstructible) {
227 vector<int> dims(3);
228 dims[0] = 2;
229 dims[1] = 3;
230 dims[2] = 5;
232
233 // this doesn't compile - good!
234 // auto* ptr = tensor.mutable_data<BlobTestNonDefaultConstructible>();
235 EXPECT_THROW(
236 tensor.raw_mutable_data(
237 TypeMeta::Make<BlobTestNonDefaultConstructible>()),
239}
240
241template <typename T>
242class TensorCPUTest : public ::testing::Test {};
243template <typename T>
244class TensorCPUDeathTest : public ::testing::Test {};
245typedef ::testing::Types<char, int, float> TensorTypes;
246TYPED_TEST_CASE(TensorCPUTest, TensorTypes);
247TYPED_TEST_CASE(TensorCPUDeathTest, TensorTypes);
248
249TYPED_TEST(TensorCPUTest, TensorInitializedEmpty) {
251 EXPECT_EQ(tensor.dim(), 1);
252 EXPECT_EQ(tensor.numel(), 0);
253 vector<int> dims(3);
254 dims[0] = 2;
255 dims[1] = 3;
256 dims[2] = 5;
257 tensor.Resize(dims);
258 EXPECT_EQ(tensor.dim(), 3);
259 EXPECT_EQ(tensor.dim32(0), 2);
260 EXPECT_EQ(tensor.dim32(1), 3);
261 EXPECT_EQ(tensor.dim32(2), 5);
262 EXPECT_EQ(tensor.numel(), 2 * 3 * 5);
263 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
264 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
265}
266
267TYPED_TEST(TensorCPUTest, TensorInitializedNonEmpty) {
268 vector<int> dims(3);
269 dims[0] = 2;
270 dims[1] = 3;
271 dims[2] = 5;
273 EXPECT_EQ(tensor.dim(), 3);
274 EXPECT_EQ(tensor.dim32(0), 2);
275 EXPECT_EQ(tensor.dim32(1), 3);
276 EXPECT_EQ(tensor.dim32(2), 5);
277 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
278 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
279 dims[0] = 7;
280 dims[1] = 11;
281 dims[2] = 13;
282 dims.push_back(17);
283 tensor.Resize(dims);
284 EXPECT_EQ(tensor.dim(), 4);
285 EXPECT_EQ(tensor.dim32(0), 7);
286 EXPECT_EQ(tensor.dim32(1), 11);
287 EXPECT_EQ(tensor.dim32(2), 13);
288 EXPECT_EQ(tensor.dim32(3), 17);
289 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
290 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
291}
292
293TYPED_TEST(TensorCPUTest, TensorInitializedZeroDim) {
294 vector<int> dims(3);
295 dims[0] = 2;
296 dims[1] = 0;
297 dims[2] = 5;
299 EXPECT_EQ(tensor.dim(), 3);
300 EXPECT_EQ(tensor.dim32(0), 2);
301 EXPECT_EQ(tensor.dim32(1), 0);
302 EXPECT_EQ(tensor.dim32(2), 5);
303 EXPECT_TRUE(tensor.mutable_data<TypeParam>() == nullptr);
304 EXPECT_TRUE(tensor.data<TypeParam>() == nullptr);
305}
306
307TYPED_TEST(TensorCPUTest, TensorResizeZeroDim) {
308 vector<int> dims(3);
309 dims[0] = 2;
310 dims[1] = 3;
311 dims[2] = 5;
313 EXPECT_EQ(tensor.dim(), 3);
314 EXPECT_EQ(tensor.dim32(0), 2);
315 EXPECT_EQ(tensor.dim32(1), 3);
316 EXPECT_EQ(tensor.dim32(2), 5);
317 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
318 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
319
320 dims[0] = 7;
321 dims[1] = 0;
322 dims[2] = 13;
323 tensor.Resize(dims);
324 EXPECT_EQ(tensor.numel(), 0);
325 EXPECT_EQ(tensor.dim(), 3);
326 EXPECT_EQ(tensor.dim32(0), 7);
327 EXPECT_EQ(tensor.dim32(1), 0);
328 EXPECT_EQ(tensor.dim32(2), 13);
329 // output value can be arbitrary, but the call to data() shouldn't crash
330 tensor.mutable_data<TypeParam>();
331 tensor.data<TypeParam>();
332}
333
334TYPED_TEST(TensorCPUTest, TensorInitializedScalar) {
337 EXPECT_EQ(tensor.dim(), 0);
338 EXPECT_EQ(tensor.numel(), 1);
339 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
340 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
341}
342
343TYPED_TEST(TensorCPUTest, TensorAlias) {
344 vector<int> dims(3);
345 dims[0] = 2;
346 dims[1] = 3;
347 dims[2] = 5;
349 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
350 Tensor other_tensor = tensor.Alias();
351 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
352 EXPECT_TRUE(other_tensor.data<TypeParam>() != nullptr);
353 EXPECT_EQ(tensor.data<TypeParam>(), other_tensor.data<TypeParam>());
354 // Set one value, check the other
355 for (int i = 0; i < tensor.numel(); ++i) {
356 tensor.mutable_data<TypeParam>()[i] = i;
357 EXPECT_EQ(other_tensor.data<TypeParam>()[i], i);
358 }
359}
360
361TYPED_TEST(TensorCPUTest, TensorShareDataRawPointer) {
362 vector<int> dims(3);
363 dims[0] = 2;
364 dims[1] = 3;
365 dims[2] = 5;
366 std::unique_ptr<TypeParam[]> raw_buffer(new TypeParam[2 * 3 * 5]);
368 tensor.ShareExternalPointer(raw_buffer.get());
369 EXPECT_EQ(tensor.mutable_data<TypeParam>(), raw_buffer.get());
370 EXPECT_EQ(tensor.data<TypeParam>(), raw_buffer.get());
371 // Set one value, check the other
372 for (int i = 0; i < tensor.numel(); ++i) {
373 raw_buffer.get()[i] = i;
374 EXPECT_EQ(tensor.data<TypeParam>()[i], i);
375 }
376}
377
378TYPED_TEST(TensorCPUTest, TensorShareDataRawPointerWithMeta) {
379 vector<int> dims(3);
380 dims[0] = 2;
381 dims[1] = 3;
382 dims[2] = 5;
383 std::unique_ptr<TypeParam[]> raw_buffer(new TypeParam[2 * 3 * 5]);
385 TypeMeta meta = TypeMeta::Make<TypeParam>();
386 tensor.ShareExternalPointer(raw_buffer.get(), meta);
387 EXPECT_EQ(tensor.mutable_data<TypeParam>(), raw_buffer.get());
388 EXPECT_EQ(tensor.data<TypeParam>(), raw_buffer.get());
389 // Set one value, check the other
390 for (int i = 0; i < tensor.numel(); ++i) {
391 raw_buffer.get()[i] = i;
392 EXPECT_EQ(tensor.data<TypeParam>()[i], i);
393 }
394}
395
396TYPED_TEST(TensorCPUTest, TensorAliasCanUseDifferentShapes) {
397 vector<int> dims(3);
398 dims[0] = 2;
399 dims[1] = 3;
400 dims[2] = 5;
401 vector<int> alternate_dims(1);
402 alternate_dims[0] = 2 * 3 * 5;
404 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
405 Tensor other_tensor = tensor.Alias();
406 other_tensor.Resize(alternate_dims);
407 EXPECT_EQ(other_tensor.dim(), 1);
408 EXPECT_EQ(other_tensor.dim32(0), alternate_dims[0]);
409 EXPECT_TRUE(tensor.data<TypeParam>() != nullptr);
410 EXPECT_TRUE(other_tensor.data<TypeParam>() != nullptr);
411 EXPECT_EQ(tensor.data<TypeParam>(), other_tensor.data<TypeParam>());
412 // Set one value, check the other
413 for (int i = 0; i < tensor.numel(); ++i) {
414 tensor.mutable_data<TypeParam>()[i] = i;
415 EXPECT_EQ(other_tensor.data<TypeParam>()[i], i);
416 }
417}
418
419TYPED_TEST(TensorCPUTest, NoLongerAliassAfterNumelChanges) {
420 vector<int> dims(3);
421 dims[0] = 2;
422 dims[1] = 3;
423 dims[2] = 5;
425 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
426 Tensor other_tensor = tensor.Alias();
427 EXPECT_EQ(tensor.data<TypeParam>(), other_tensor.data<TypeParam>());
428 auto* old_pointer = other_tensor.data<TypeParam>();
429
430 dims[0] = 7;
431 tensor.Resize(dims);
432 EXPECT_EQ(old_pointer, other_tensor.data<TypeParam>());
433 EXPECT_NE(old_pointer, tensor.mutable_data<TypeParam>());
434}
435
436TYPED_TEST(TensorCPUTest, NoLongerAliasAfterFreeMemory) {
437 vector<int> dims(3);
438 dims[0] = 2;
439 dims[1] = 3;
440 dims[2] = 5;
442 EXPECT_TRUE(tensor.mutable_data<TypeParam>() != nullptr);
443 Tensor other_tensor = tensor.Alias();
444 EXPECT_EQ(tensor.data<TypeParam>(), other_tensor.data<TypeParam>());
445 auto* old_pointer = other_tensor.data<TypeParam>();
446
447 tensor.FreeMemory();
448 EXPECT_EQ(old_pointer, other_tensor.data<TypeParam>());
449 EXPECT_NE(old_pointer, tensor.mutable_data<TypeParam>());
450}
451
452TYPED_TEST(TensorCPUTest, KeepOnShrink) {
453 // Set flags (defaults)
454 FLAGS_caffe2_keep_on_shrink = true;
455 FLAGS_caffe2_max_keep_on_shrink_memory = LLONG_MAX;
456
457 vector<int> dims{2, 3, 5};
459 TypeParam* ptr = tensor.mutable_data<TypeParam>();
460 EXPECT_TRUE(ptr != nullptr);
461 // Expanding - will reallocate
462 tensor.Resize(3, 4, 6);
463 TypeParam* larger_ptr = tensor.mutable_data<TypeParam>();
464 EXPECT_TRUE(larger_ptr != nullptr);
465
466 // This check can fail when malloc() returns the same recently freed address
467 // EXPECT_NE(ptr, larger_ptr);
468
469 // Shrinking - will not reallocate
470 tensor.Resize(1, 2, 4);
471 TypeParam* smaller_ptr = tensor.mutable_data<TypeParam>();
472 EXPECT_TRUE(smaller_ptr != nullptr);
473 EXPECT_EQ(larger_ptr, smaller_ptr);
474 // resize to 0 in the meantime;
475 tensor.Resize(3, 0, 6);
476 // Expanding but still under capacity - will not reallocate
477 tensor.Resize(2, 3, 5);
478 TypeParam* new_ptr = tensor.mutable_data<TypeParam>();
479 EXPECT_TRUE(new_ptr != nullptr);
480 EXPECT_EQ(larger_ptr, new_ptr);
481}
482
483TYPED_TEST(TensorCPUTest, MaxKeepOnShrink) {
484 // Set flags
485 FLAGS_caffe2_keep_on_shrink = true;
486 FLAGS_caffe2_max_keep_on_shrink_memory = 8 * 4 * sizeof(TypeParam);
487
488 vector<int> dims{1, 8, 8};
490 TypeParam* ptr = tensor.mutable_data<TypeParam>();
491 EXPECT_TRUE(ptr != nullptr);
492 // Shrinking - will not reallocate
493 tensor.Resize(1, 7, 8);
494 TypeParam* smaller_ptr = tensor.mutable_data<TypeParam>();
495 EXPECT_TRUE(smaller_ptr != nullptr);
496 EXPECT_EQ(ptr, smaller_ptr);
497 // Resize to more than maximum shrink, should reallocate
498 tensor.Resize(1, 1, 8);
499 TypeParam* new_ptr = tensor.mutable_data<TypeParam>();
500 EXPECT_TRUE(new_ptr != nullptr);
501
502 // This check can fail when malloc() returns the same recently freed address
503 // EXPECT_NE(ptr, new_ptr);
504
505 // Restore default flags
506 FLAGS_caffe2_max_keep_on_shrink_memory = LLONG_MAX;
507}
508
509TYPED_TEST(TensorCPUDeathTest, CannotAccessRawDataWhenEmpty) {
511 EXPECT_EQ(tensor.dim(), 1);
512 EXPECT_EQ(tensor.numel(), 0);
513 ASSERT_ANY_THROW(tensor.raw_data());
514}
515
516TYPED_TEST(TensorCPUDeathTest, CannotAccessDataWhenEmpty) {
518 EXPECT_EQ(tensor.dim(), 1);
519 EXPECT_EQ(tensor.numel(), 0);
520 ASSERT_ANY_THROW(tensor.data<TypeParam>());
521}
522
523TEST(TensorTest, TensorNonFundamentalType) {
524 Tensor tensor(vector<int>{2, 3, 4}, CPU);
525 EXPECT_TRUE(tensor.mutable_data<std::string>() != nullptr);
526 const std::string* ptr = tensor.data<std::string>();
527 for (int i = 0; i < tensor.numel(); ++i) {
528 EXPECT_TRUE(ptr[i] == "");
529 }
530}
531
532TEST(TensorTest, TensorNonFundamentalTypeClone) {
533 Tensor tensor(vector<int>{2, 3, 4}, CPU);
534 std::string* ptr = tensor.mutable_data<std::string>();
535 EXPECT_TRUE(ptr != nullptr);
536 for (int i = 0; i < tensor.numel(); ++i) {
537 EXPECT_TRUE(ptr[i] == "");
538 ptr[i] = "filled";
539 }
540 Tensor dst_tensor = tensor.Clone();
541 const std::string* dst_ptr = dst_tensor.data<std::string>();
542 for (int i = 0; i < dst_tensor.numel(); ++i) {
543 EXPECT_TRUE(dst_ptr[i] == "filled");
544 }
545 // Change the original tensor
546 for (int i = 0; i < tensor.numel(); ++i) {
547 EXPECT_TRUE(ptr[i] == "filled");
548 ptr[i] = "changed";
549 }
550 // Confirm that the cloned tensor is not affect
551 for (int i = 0; i < dst_tensor.numel(); ++i) {
552 EXPECT_TRUE(dst_ptr[i] == "filled");
553 }
554}
555
556TEST(TensorTest, Tensor64BitDimension) {
557 // Initialize a large tensor.
558 int64_t large_number =
559 static_cast<int64_t>(std::numeric_limits<int>::max()) + 1;
560 Tensor tensor(vector<int64_t>{large_number}, CPU);
561 EXPECT_EQ(tensor.dim(), 1);
562 EXPECT_EQ(tensor.size(0), large_number);
563 EXPECT_EQ(tensor.numel(), large_number);
564 try {
565 EXPECT_TRUE(tensor.mutable_data<char>() != nullptr);
566 } catch (const EnforceNotMet& e) {
567 string msg = e.what();
568 size_t found = msg.find("posix_memalign");
569 if (found != string::npos) {
570 msg = msg.substr(0, msg.find('\n'));
571 LOG(WARNING) << msg;
572 LOG(WARNING) << "Out of memory issue with posix_memalign;\n";
573 return;
574 } else {
575 throw e;
576 }
577 }
578 EXPECT_EQ(tensor.nbytes(), large_number * sizeof(char));
579 EXPECT_EQ(tensor.itemsize(), sizeof(char));
580 // Try to go even larger, but this time we will not do mutable_data because we
581 // do not have a large enough memory.
582 tensor.Resize(large_number, 100);
583 EXPECT_EQ(tensor.dim(), 2);
584 EXPECT_EQ(tensor.size(0), large_number);
585 EXPECT_EQ(tensor.size(1), 100);
586 EXPECT_EQ(tensor.numel(), large_number * 100);
587}
588
589TEST(TensorTest, UndefinedTensor) {
590 Tensor x;
591 EXPECT_FALSE(x.defined());
592}
593
594TEST(TensorTest, CopyAndAssignment) {
595 Tensor x(CPU);
596 x.Resize(16, 17);
597 testing::randomFill(x.template mutable_data<float>(), 16 * 17);
598 EXPECT_TRUE(x.defined());
599
600 Tensor y(x);
601 Tensor z = x;
602 testing::assertTensorEquals(x, y);
603 testing::assertTensorEquals(x, z);
604}
605
606TEST(TensorDeathTest, CannotCastDownLargeDims) {
607 int64_t large_number =
608 static_cast<int64_t>(std::numeric_limits<int>::max()) + 1;
609 Tensor tensor(vector<int64_t>{large_number}, CPU);
610 EXPECT_EQ(tensor.dim(), 1);
611 EXPECT_EQ(tensor.size(0), large_number);
612 ASSERT_THROW(tensor.dim32(0), EnforceNotMet);
613}
614
615#define TEST_SERIALIZATION_WITH_TYPE(TypeParam, field_name) \
616 TEST(TensorTest, TensorSerialization_##TypeParam) { \
617 Blob blob; \
618 Tensor* tensor = BlobGetMutableTensor(&blob, CPU); \
619 tensor->Resize(2, 3); \
620 for (int i = 0; i < 6; ++i) { \
621 tensor->mutable_data<TypeParam>()[i] = static_cast<TypeParam>(i); \
622 } \
623 string serialized = SerializeBlob(blob, "test"); \
624 BlobProto proto; \
625 CHECK(proto.ParseFromString(serialized)); \
626 EXPECT_EQ(proto.name(), "test"); \
627 EXPECT_EQ(proto.type(), "Tensor"); \
628 EXPECT_TRUE(proto.has_tensor()); \
629 const TensorProto& tensor_proto = proto.tensor(); \
630 EXPECT_EQ( \
631 tensor_proto.data_type(), \
632 TypeMetaToDataType(TypeMeta::Make<TypeParam>())); \
633 EXPECT_EQ(tensor_proto.field_name##_size(), 6); \
634 for (int i = 0; i < 6; ++i) { \
635 EXPECT_EQ(tensor_proto.field_name(i), static_cast<TypeParam>(i)); \
636 } \
637 Blob new_blob; \
638 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob)); \
639 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU)); \
640 const TensorCPU& new_tensor = blob.Get<TensorCPU>(); \
641 EXPECT_EQ(new_tensor.dim(), 2); \
642 EXPECT_EQ(new_tensor.size(0), 2); \
643 EXPECT_EQ(new_tensor.size(1), 3); \
644 for (int i = 0; i < 6; ++i) { \
645 EXPECT_EQ( \
646 tensor->data<TypeParam>()[i], new_tensor.data<TypeParam>()[i]); \
647 } \
648 } \
649 \
650 TEST(EmptyTensorTest, TensorSerialization_##TypeParam) { \
651 Blob blob; \
652 TensorCPU* tensor = BlobGetMutableTensor(&blob, CPU); \
653 tensor->Resize(0, 3); \
654 tensor->mutable_data<TypeParam>(); \
655 string serialized = SerializeBlob(blob, "test"); \
656 BlobProto proto; \
657 CHECK(proto.ParseFromString(serialized)); \
658 EXPECT_EQ(proto.name(), "test"); \
659 EXPECT_EQ(proto.type(), "Tensor"); \
660 EXPECT_TRUE(proto.has_tensor()); \
661 const TensorProto& tensor_proto = proto.tensor(); \
662 EXPECT_EQ( \
663 tensor_proto.data_type(), \
664 TypeMetaToDataType(TypeMeta::Make<TypeParam>())); \
665 EXPECT_EQ(tensor_proto.field_name##_size(), 0); \
666 Blob new_blob; \
667 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob)); \
668 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU)); \
669 const TensorCPU& new_tensor = blob.Get<TensorCPU>(); \
670 EXPECT_EQ(new_tensor.dim(), 2); \
671 EXPECT_EQ(new_tensor.size(0), 0); \
672 EXPECT_EQ(new_tensor.size(1), 3); \
673 }
674
675TEST_SERIALIZATION_WITH_TYPE(bool, int32_data)
676TEST_SERIALIZATION_WITH_TYPE(double, double_data)
677TEST_SERIALIZATION_WITH_TYPE(float, float_data)
678TEST_SERIALIZATION_WITH_TYPE(int, int32_data)
679TEST_SERIALIZATION_WITH_TYPE(int8_t, int32_data)
680TEST_SERIALIZATION_WITH_TYPE(int16_t, int32_data)
681TEST_SERIALIZATION_WITH_TYPE(uint8_t, int32_data)
682TEST_SERIALIZATION_WITH_TYPE(uint16_t, int32_data)
684
685TEST(TensorTest, TensorSerialization_CustomType) {
686 Blob blob;
688 tensor->Resize(2, 3);
689 for (int i = 0; i < 6; ++i) {
690 tensor->mutable_data<BlobTestFoo>()[i].val = i;
691 }
692 string serialized = SerializeBlob(blob, "test");
693 BlobProto proto;
694 CHECK(proto.ParseFromString(serialized));
695 EXPECT_EQ(proto.name(), "test");
696 EXPECT_EQ(proto.type(), "Tensor");
697 Blob new_blob;
698 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob));
699 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU));
700 const TensorCPU& new_tensor = blob.Get<TensorCPU>();
701 EXPECT_EQ(new_tensor.dim(), 2);
702 EXPECT_EQ(new_tensor.size(0), 2);
703 EXPECT_EQ(new_tensor.size(1), 3);
704 for (int i = 0; i < 6; ++i) {
705 EXPECT_EQ(
706 new_tensor.data<BlobTestFoo>()[i].val,
707 tensor->data<BlobTestFoo>()[i].val);
708 }
709}
710
711TEST(TensorTest, Half) {
712 const int64_t kSize = 3000000;
713 Blob blob;
715 tensor->Resize(kSize);
716 for (int i = 0; i < tensor->numel(); ++i) {
717 tensor->mutable_data<at::Half>()[i].x = i % 10000;
718 }
719 string serialized = SerializeBlob(blob, "test");
720 BlobProto proto;
721 CHECK(proto.ParseFromString(serialized));
722 EXPECT_EQ(proto.name(), "test");
723 EXPECT_EQ(proto.type(), "Tensor");
724 EXPECT_TRUE(proto.has_tensor());
725 const TensorProto& tensor_proto = proto.tensor();
726 EXPECT_EQ(
727 tensor_proto.data_type(), TypeMetaToDataType(TypeMeta::Make<at::Half>()));
728 if (FLAGS_caffe2_serialize_fp16_as_bytes) {
729 EXPECT_EQ(tensor_proto.byte_data().size(), 2 * kSize);
730 for (int i = 0; i < kSize; ++i) {
731 auto value = tensor->mutable_data<at::Half>()[i].x;
732 auto low_bits = static_cast<char>(value & 0xff);
733 auto high_bits = static_cast<char>(value >> 8);
734 EXPECT_EQ(tensor_proto.byte_data()[2 * i], low_bits);
735 EXPECT_EQ(tensor_proto.byte_data()[2 * i + 1], high_bits);
736 }
737 } else {
738 EXPECT_EQ(tensor_proto.int32_data().size(), kSize);
739 }
740 Blob new_blob;
741 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob));
742 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU));
743 const TensorCPU& new_tensor = blob.Get<TensorCPU>();
744 EXPECT_EQ(new_tensor.dim(), 1);
745 EXPECT_EQ(new_tensor.size(0), kSize);
746 for (int i = 0; i < kSize; ++i) {
747 EXPECT_EQ(new_tensor.data<at::Half>()[i].x, i % 10000);
748 }
749}
750
751TEST(TensorTest, TensorFactory) {
752 Tensor a = empty({1, 2, 3}, at::device(CPU).dtype<float>());
753 EXPECT_NE(a.data<float>(), nullptr);
754 a.mutable_data<float>()[0] = 3.0;
755 Tensor b = empty({1, 2, 3}, at::device(CPU).dtype<int>());
756 EXPECT_NE(b.data<int>(), nullptr);
757 b.mutable_data<int>()[0] = 3;
758}
759
760TEST(QTensorTest, QTensorSerialization) {
761 Blob blob;
762 QTensor<CPUContext>* qtensor = blob.GetMutable<QTensor<CPUContext>>();
763 qtensor->SetPrecision(5);
764 qtensor->SetSigned(false);
765 qtensor->SetScale(1.337);
766 qtensor->SetBias(-1.337);
767 qtensor->Resize(std::vector<int>{2, 3});
768 // "Randomly" set bits.
769 srand(0);
770 for (int i = 0; i < 6; ++i) {
771 for (int j = 0; j < 5; ++j) {
772 qtensor->SetBitAtIndex(j, i, rand() % 2);
773 }
774 }
775
776 string serialized = SerializeBlob(blob, "test");
777 BlobProto proto;
778 CHECK(proto.ParseFromString(serialized));
779 EXPECT_EQ(proto.name(), "test");
780 EXPECT_EQ(proto.type(), "QTensor");
781 EXPECT_TRUE(proto.has_qtensor());
782 const QTensorProto& qtensor_proto = proto.qtensor();
783
784 EXPECT_EQ(qtensor_proto.precision(), qtensor->precision());
785 EXPECT_EQ(qtensor_proto.scale(), qtensor->scale());
786 EXPECT_EQ(qtensor_proto.bias(), qtensor->bias());
787 EXPECT_EQ(qtensor_proto.is_signed(), qtensor->is_signed());
788
789 Blob new_blob;
790 DeserializeBlob(serialized, &new_blob);
791 EXPECT_TRUE(new_blob.IsType<QTensor<CPUContext>>());
792 const QTensor<CPUContext>& new_qtensor = blob.Get<QTensor<CPUContext>>();
793 EXPECT_EQ(new_qtensor.ndim(), 2);
794 EXPECT_EQ(new_qtensor.dim32(0), 2);
795 EXPECT_EQ(new_qtensor.dim32(1), 3);
796 for (int i = 0; i < 6; ++i) {
797 for (int j = 0; j < 5; ++j) {
798 EXPECT_EQ(qtensor->GetBitAtIndex(j, i), new_qtensor.GetBitAtIndex(j, i));
799 }
800 }
801}
802
803using StringMap = std::vector<std::pair<string, string>>;
804
805class VectorCursor : public db::Cursor {
806 public:
807 explicit VectorCursor(StringMap* data) : data_(data) {
808 pos_ = 0;
809 }
810 ~VectorCursor() override {}
811 void Seek(const string& /* unused */) override {}
812 void SeekToFirst() override {}
813 void Next() override {
814 ++pos_;
815 }
816 string key() override {
817 return (*data_)[pos_].first;
818 }
819 string value() override {
820 return (*data_)[pos_].second;
821 }
822 bool Valid() override {
823 return pos_ < data_->size();
824 }
825
826 private:
827 StringMap* data_ = nullptr;
828 size_t pos_ = 0;
829};
830
831class VectorDB : public db::DB {
832 public:
833 VectorDB(const string& source, db::Mode mode)
834 : DB(source, mode), name_(source) {}
835 ~VectorDB() override {
836 data_.erase(name_);
837 }
838 void Close() override {}
839 std::unique_ptr<db::Cursor> NewCursor() override {
840 return make_unique<VectorCursor>(getData());
841 }
842 std::unique_ptr<db::Transaction> NewTransaction() override {
843 CAFFE_THROW("Not implemented");
844 }
845 static void registerData(const string& name, StringMap&& data) {
846 std::lock_guard<std::mutex> guard(dataRegistryMutex_);
848 }
849
850 private:
851 StringMap* getData() {
852 auto it = data_.find(name_);
853 CAFFE_ENFORCE(it != data_.end(), "Can't find ", name_);
854 return &(it->second);
855 }
856
857 private:
858 string name_;
860 static std::map<string, StringMap> data_;
861};
862
864std::map<string, StringMap> VectorDB::data_;
865
866REGISTER_CAFFE2_DB(vector_db, VectorDB);
867
868template <typename TypeParam>
869class TypedTensorTest : public ::testing::Test {};
870typedef ::testing::
871 Types<float, bool, double, int, int8_t, int16_t, uint8_t, uint16_t, int64_t>
872 TensorDataTypes;
873TYPED_TEST_CASE(TypedTensorTest, TensorDataTypes);
874
875TYPED_TEST(TypedTensorTest, BigTensorSerialization) {
876 int64_t d1 = 2;
877 int64_t d2 = FLAGS_caffe2_test_big_tensor_size
878 ? FLAGS_caffe2_test_big_tensor_size / d1
879 : static_cast<int64_t>(std::numeric_limits<int>::max()) + 1;
880 int64_t size = d1 * d2;
881 string db_source = (string)std::tmpnam(nullptr);
882 VLOG(1) << "db_source: " << db_source;
883
884 {
885 VLOG(1) << "Test begin";
886 Blob blob;
888 VLOG(1) << "Allocating blob";
889 tensor->Resize(d1, d2);
890 auto mutableData = tensor->mutable_data<TypeParam>();
891 VLOG(1) << "Filling out the blob";
892 for (int64_t i = 0; i < size; ++i) {
893 mutableData[i] = static_cast<TypeParam>(i);
894 }
895 StringMap data;
897 /*auto db = CreateDB("minidb", db_source, WRITE);*/
898 auto acceptor = [&](const std::string& key, const std::string& value) {
899 std::lock_guard<std::mutex> guard(mutex);
900 /*db->NewTransaction()->Put(key, value);*/
901 data.emplace_back(key, value);
902 };
903 SerializeBlob(blob, "test", acceptor);
904 VectorDB::registerData(db_source, std::move(data));
905 VLOG(1) << "finished writing to DB";
906 }
907
908 {
910 option.set_device_type(PROTO_CPU);
911 Argument db_type_arg = MakeArgument<string>("db_type", "vector_db");
912 Argument absolute_path_arg = MakeArgument<bool>("absolute_path", true);
913 Argument db_source_arg = MakeArgument<string>("db", db_source);
914 auto op_def = CreateOperatorDef(
915 "Load",
916 "",
917 std::vector<string>{},
918 std::vector<string>({"test"}),
919 std::vector<Argument>{db_type_arg, db_source_arg, absolute_path_arg},
920 option,
921 "DUMMY_ENGINE");
922 Workspace ws;
923 auto load_op = CreateOperator(op_def, &ws);
924 EXPECT_TRUE(load_op != nullptr);
925 VLOG(1) << "Running operator";
926
927 load_op->Run();
928 VLOG(1) << "Reading blob from workspace";
929 auto new_blob = ws.GetBlob("test");
930 EXPECT_TRUE(BlobIsTensorType(*new_blob, CPU));
931 const auto& new_tensor = new_blob->Get<TensorCPU>();
932
933 EXPECT_EQ(new_tensor.dim(), d1);
934 EXPECT_EQ(new_tensor.size(0), d1);
935 EXPECT_EQ(new_tensor.size(1), d2);
936 for (int64_t i = 0; i < size; ++i) {
937 EXPECT_EQ(static_cast<TypeParam>(i), new_tensor.data<TypeParam>()[i]);
938 }
939 }
940}
941
942struct DummyType {
943 /* This struct is used to test serialization and deserialization of huge
944 * blobs, that are not tensors.
945 */
946
947 /* implicit */ DummyType(int n_chunks_init = 0) : n_chunks(n_chunks_init) {}
948 std::string serialize(const std::string& name, const int32_t chunk_id) const {
949 BlobProto blobProto;
950 blobProto.set_name(name);
951 blobProto.set_type("DummyType");
952 std::string content("");
953 blobProto.set_content(content);
954 blobProto.set_content_num_chunks(n_chunks);
955 blobProto.set_content_chunk_id(chunk_id);
956 return blobProto.SerializeAsString();
957 }
958 void deserialize(const BlobProto& /* unused */) {
959 ++n_chunks;
960 }
962};
963
964class DummyTypeSerializer : public BlobSerializerBase {
965 public:
966 DummyTypeSerializer() {}
967 ~DummyTypeSerializer() override {}
968 void Serialize(
969 const void* pointer,
970 TypeMeta typeMeta,
971 const string& name,
972 SerializationAcceptor acceptor) override {
973 CAFFE_ENFORCE(typeMeta.Match<DummyType>());
974 const auto& container = *static_cast<const DummyType*>(pointer);
975 for (int k = 0; k < container.n_chunks; ++k) {
976 std::string serialized_chunk = container.serialize(name, k);
977 acceptor(c10::str(name, kChunkIdSeparator, k), serialized_chunk);
978 }
979 }
980};
981
982class DummyTypeDeserializer : public BlobDeserializerBase {
983 public:
984 void Deserialize(const BlobProto& proto, Blob* blob) override {
985 auto* container = blob->GetMutable<DummyType>();
986 container->deserialize(proto);
987 }
988};
989} // namespace
990
992
993namespace {
994REGISTER_BLOB_SERIALIZER((TypeMeta::Id<DummyType>()), DummyTypeSerializer);
996 BlobDeserializerRegistry,
997 "DummyType",
998 DummyTypeDeserializer);
999
1000TEST(ContentChunks, Serialization) {
1001 string db_source = (string)std::tmpnam(nullptr);
1002 VLOG(1) << "db_source: " << db_source;
1003
1004 {
1005 VLOG(1) << "Test begin";
1006 Blob blob;
1007 DummyType* container = blob.GetMutable<DummyType>();
1008 VLOG(1) << "Allocating blob";
1009 container->n_chunks = 10;
1010 VLOG(1) << "Filling out the blob";
1011 StringMap data;
1013 auto acceptor = [&](const std::string& key, const std::string& value) {
1014 std::lock_guard<std::mutex> guard(mutex);
1015 data.emplace_back(key, value);
1016 };
1017 SerializeBlob(blob, "test", acceptor);
1018 VectorDB::registerData(db_source, std::move(data));
1019 VLOG(1) << "finished writing to DB";
1020 }
1021
1022 {
1024 option.set_device_type(PROTO_CPU);
1025 Argument db_type_arg = MakeArgument<string>("db_type", "vector_db");
1026 Argument absolute_path_arg = MakeArgument<bool>("absolute_path", true);
1027 Argument db_source_arg = MakeArgument<string>("db", db_source);
1028 auto op_def = CreateOperatorDef(
1029 "Load",
1030 "",
1031 std::vector<string>{},
1032 std::vector<string>({"test"}),
1033 std::vector<Argument>{db_type_arg, db_source_arg, absolute_path_arg},
1034 option,
1035 "DUMMY_ENGINE");
1036 Workspace ws;
1037 auto load_op = CreateOperator(op_def, &ws);
1038 EXPECT_TRUE(load_op != nullptr);
1039 VLOG(1) << "Running operator";
1040
1041 load_op->Run();
1042 VLOG(1) << "Reading blob from workspace";
1043 auto new_blob = ws.GetBlob("test");
1044 EXPECT_TRUE(new_blob->IsType<DummyType>());
1045 const auto& container = new_blob->Get<DummyType>();
1046 EXPECT_EQ(container.n_chunks, 10);
1047 }
1048}
1049
1050TEST(CustomChunkSize, BigTensorSerialization) {
1051 int64_t d1 = 2;
1052 int64_t d2 = FLAGS_caffe2_test_big_tensor_size
1053 ? FLAGS_caffe2_test_big_tensor_size / d1
1054 : static_cast<int64_t>(std::numeric_limits<int>::max()) + 1;
1055 int64_t size = d1 * d2;
1056
1057 Blob blob;
1059 tensor->Resize(d1, d2);
1060 tensor->mutable_data<float>();
1062 int counter = 0;
1063 auto acceptor = [&](const std::string& /*key*/,
1064 const std::string& /*value*/) {
1065 std::lock_guard<std::mutex> guard(mutex);
1066 counter++;
1067 };
1068 SerializeBlob(blob, "test", acceptor, size);
1069 EXPECT_EQ(counter, 1);
1070
1071 counter = 0;
1072 SerializeBlob(blob, "test", acceptor, (size / 2) + 1);
1073 EXPECT_EQ(counter, 2);
1074
1075 counter = 0;
1076 SerializeBlob(blob, "test", acceptor, kNoChunking);
1077 EXPECT_EQ(counter, 1);
1078}
1079
1080TEST(QTensor, QTensorSizingTest) {
1081 vector<int> dims(3);
1082 dims[0] = 2;
1083 dims[1] = 3;
1084 dims[2] = 5;
1085 QTensor<CPUContext> qtensor(dims, 3);
1086 EXPECT_TRUE(qtensor.mutable_data() != nullptr);
1087 EXPECT_EQ(qtensor.nbytes(), 12);
1088 EXPECT_EQ(qtensor.size(), 30);
1089}
1090
1091TEST(BlobTest, CastingMessage) {
1092 Blob b;
1093 b.GetMutable<BlobTestFoo>();
1094 b.Get<BlobTestFoo>();
1095 try {
1096 b.Get<BlobTestBar>();
1097 FAIL() << "Should have thrown";
1098 } catch (const EnforceNotMet& e) {
1099 string msg = e.what_without_backtrace();
1100 LOG(INFO) << msg;
1101 EXPECT_NE(msg.find("BlobTestFoo"), std::string::npos) << msg;
1102 EXPECT_NE(msg.find("BlobTestBar"), std::string::npos) << msg;
1103 }
1104}
1105
1106TEST(TensorConstruction, UninitializedCopyTest) {
1107 Tensor x(CPU);
1108 Tensor y(x, CPU);
1109 Tensor z = x.Clone();
1110 EXPECT_FALSE(x.dtype_initialized());
1111 EXPECT_FALSE(y.dtype_initialized());
1112 LOG(INFO) << "z.size()" << z.numel();
1113 EXPECT_FALSE(z.dtype_initialized());
1114}
1115
1116TEST(TensorConstruction, CopyConstructorTest) {
1117 Tensor x(CPU);
1118 x.Resize(5);
1119 x.mutable_data<float>()[0] = 1;
1120 Tensor y = x.Clone();
1121 Tensor z(x, CPU);
1122
1123 EXPECT_EQ(*x.data<float>(), 1);
1124 EXPECT_EQ(*y.data<float>(), 1);
1125 EXPECT_EQ(*z.data<float>(), 1);
1126 x.mutable_data<float>()[0] = 5;
1127 EXPECT_EQ(*x.data<float>(), 5);
1128 EXPECT_EQ(*y.data<float>(), 1);
1129 EXPECT_EQ(*z.data<float>(), 1);
1130}
1131
1132TEST(TensorConstruction, MoveAssignmentOpTest) {
1133 Tensor x(CPU);
1134 x.Resize(5);
1135 x.mutable_data<float>()[0] = 1;
1136 Tensor y(CPU);
1137 y = std::move(x);
1138
1139 EXPECT_EQ(*y.data<float>(), 1);
1140}
1141
1142TEST(TensorSerialization, MistakenlySerializingDtypeUninitializedTensor) {
1143 // This test preserves a legacy behavior that dtype-unitialized tensors can
1144 // go through serialization. We want to kill this behavior - when it's done,
1145 // remove this test
1146 Blob blob;
1148 x->Resize(0);
1149 string output;
1151 blob,
1152 "foo",
1153 [&output](const string& /*blobName*/, const std::string& data) {
1154 output = data;
1155 });
1156 BlobProto b;
1157 CHECK(b.ParseFromString(output));
1158 LOG(INFO) << "serialized proto: " << b.DebugString();
1159
1160 Blob new_blob;
1161 // Deserializing an empty Tensor gives a {0}-dim, float CPU Tensor
1162 DeserializeBlob(output, &new_blob);
1163 const Tensor& new_tensor = new_blob.Get<Tensor>();
1164 LOG(INFO) << "tensor " << new_tensor.DebugString();
1165 EXPECT_TRUE(new_tensor.dtype_initialized());
1166 LOG(INFO) << "dtype:" << new_tensor.dtype();
1167 EXPECT_EQ(0, new_tensor.numel());
1168 EXPECT_EQ(1, new_tensor.dim());
1169}
1170
1171static caffe2::BlobProto CreateProtoWithInt32Data(
1172 const caffe2::TensorProto::DataType& dataType,
1173 size_t numEl,
1174 bool useCached = true) {
1175 static std::map<caffe2::TensorProto::DataType, caffe2::BlobProto> protos;
1176 if (useCached && protos.count(dataType)) {
1177 return protos[dataType];
1178 }
1179 caffe2::BlobProto proto;
1180 proto.set_type("Tensor");
1181 auto tensor = proto.mutable_tensor();
1182 tensor->add_dims(numEl);
1183 tensor->add_dims(1);
1184 tensor->set_data_type(dataType);
1185 tensor->set_name("test_feature");
1186 tensor->mutable_device_detail()->set_device_type(0);
1187 tensor->mutable_segment()->set_begin(0);
1188 tensor->mutable_segment()->set_end(numEl);
1189 for (size_t i = 0; i < numEl; ++i) {
1190 int32_t data = 0;
1191 switch (dataType) {
1192 case caffe2::TensorProto_DataType_INT32:
1193 data = static_cast<int32_t>(rand() % 0xffffffff);
1194 break;
1195 case caffe2::TensorProto_DataType_BOOL:
1196 data = static_cast<uint8_t>(rand() % 0x00000001);
1197 break;
1198 case caffe2::TensorProto_DataType_UINT8:
1199 data = static_cast<uint8_t>(rand() % 0x000000ff);
1200 break;
1201 case caffe2::TensorProto_DataType_INT8:
1202 data = static_cast<int8_t>(rand() % 0x000000ff);
1203 break;
1204 case caffe2::TensorProto_DataType_UINT16:
1205 data = static_cast<uint16_t>(rand() % 0x0000ffff);
1206 break;
1207 case caffe2::TensorProto_DataType_INT16:
1208 data = static_cast<int16_t>(rand() % 0x0000ffff);
1209 break;
1210 case caffe2::TensorProto_DataType_FLOAT16:
1211 data = static_cast<uint16_t>(rand() % 0x0000ffff);
1212 break;
1213 default:
1214 continue;
1215 }
1216 tensor->add_int32_data(data);
1217 }
1218 protos[dataType] = proto;
1219 return proto;
1220}
1221
1222void TestDataType(
1223 const caffe2::TensorProto::DataType& dataType,
1224 std::string dataTypeName) {
1225 LOG(INFO) << dataTypeName;
1226 FLAGS_caffe2_serialize_using_bytes_as_holder = true;
1227 size_t numEl = 1000;
1228 // Proto with int32
1229 auto protoInt32 = CreateProtoWithInt32Data(dataType, numEl, false);
1230 caffe2::Blob blobInt32;
1231 DeserializeBlob(protoInt32, &blobInt32);
1232 auto serializedStr = SerializeBlob(blobInt32, protoInt32.name());
1233 caffe2::BlobProto protoBytes;
1234 // Proto with bytes
1235 protoBytes.ParseFromString(serializedStr);
1236 caffe2::Blob blobBytes;
1237 DeserializeBlob(protoBytes, &blobBytes);
1238 FLAGS_caffe2_serialize_using_bytes_as_holder = false;
1239 // Proto with int32 from proto with bytes
1240 protoBytes.ParseFromString(SerializeBlob(blobBytes, protoBytes.name()));
1241 EXPECT_EQ(numEl, protoInt32.tensor().int32_data_size());
1242 EXPECT_EQ(numEl, protoBytes.tensor().int32_data_size());
1243 for (int i = 0; i < numEl; ++i) {
1244 EXPECT_EQ(
1245 protoInt32.tensor().int32_data(i), protoBytes.tensor().int32_data(i));
1246 }
1247}
1248
1249TEST(TensorSerialization, TestCorrectness) {
1250 FLAGS_caffe2_serialize_using_bytes_as_holder = true;
1251 TestDataType(
1252 caffe2::TensorProto_DataType_INT32, "TensorProto_DataType_INT32");
1253 TestDataType(caffe2::TensorProto_DataType_BOOL, "TensorProto_DataType_BOOL");
1254 TestDataType(
1255 caffe2::TensorProto_DataType_UINT8, "TensorProto_DataType_UINT8");
1256 TestDataType(caffe2::TensorProto_DataType_INT8, "TensorProto_DataType_INT8");
1257 TestDataType(
1258 caffe2::TensorProto_DataType_UINT16, "TensorProto_DataType_UINT16");
1259 TestDataType(
1260 caffe2::TensorProto_DataType_INT16, "TensorProto_DataType_INT16");
1261 TestDataType(
1262 caffe2::TensorProto_DataType_FLOAT16, "TensorProto_DataType_FLOAT16");
1263}
1264
1265} // namespace
1266} // namespace caffe2
void * ptr
std::mutex mutex
#define CAFFE_THROW(...)
Definition: Logging.h:125
#define C10_REGISTER_TYPED_CLASS(RegistryName, key,...)
Definition: Registry.h:246
uint32_t max
Definition: Resource.cpp:270
C10_DEFINE_int64(caffe2_test_big_tensor_size, 100000000, "")
C10_DECLARE_int(caffe2_tensor_chunk_size)
StringMap * data_
Definition: blob_test.cc:827
C10_DECLARE_bool(caffe2_serialize_fp16_as_bytes)
#define TEST_SERIALIZATION_WITH_TYPE(TypeParam, field_name)
Definition: blob_test.cc:615
int n_chunks
Definition: blob_test.cc:961
static std::mutex dataRegistryMutex_
Definition: blob_test.cc:859
string name_
Definition: blob_test.cc:858
int32_t val
Definition: blob_test.cc:32
size_t pos_
Definition: blob_test.cc:828
#define CAFFE2_UNUSED
Definition: common.h:61
BlobDeserializerBase is an abstract class that deserializes a blob from a BlobProto or a TensorProto.
BlobSerializerBase is an abstract class that serializes a blob to a string.
std::function< void(const std::string &blobName, const std::string &data)> SerializationAcceptor
void Deserialize(const BlobProto &proto, Blob *blob) override
Definition: blob_test.cc:76
void Serialize(const void *pointer, TypeMeta typeMeta, const string &name, SerializationAcceptor acceptor) override
Serializes a Blob.
Definition: blob_test.cc:55
Blob is a general container that hosts a typed pointer.
Definition: blob.h:24
TypeMeta is a thin class that allows us to store the type of a container such as a blob,...
Definition: typeid.h:325
bool Match() const noexcept
Definition: typeid.h:429
#define REGISTER_CAFFE2_DB(name,...)
Definition: db.h:110
#define ASSERT_TRUE(cmd)
std::string name
#define VLOG(n)
#define CHECK(condition)
#define LOG(n)
Tensor rand(IntArrayRef size, const TensorOptions &options)
std::tuple< Tensor, Tensor > mode(const Tensor &self, int64_t dim, bool keepdim)
Tensor new_qtensor(IntArrayRef sizes, const TensorOptions &options, QuantizerPtr quantizer)
Definition: Quantizer.cpp:92
constexpr remove_reference_t< T > && move(T &&t) noexcept
Definition: variant.h:418
::c10::Error EnforceNotMet
Definition: Logging.h:99
decltype(auto) str(const Args &... args)
Definition: StringUtil.h:88
Definition: db.cc:13
Mode
The mode of the database, whether we are doing a read, write, or creating a new database.
Definition: db.h:17
def DeviceOption(device_type, device_id=0, random_seed=None, node_name=None, numa_node_id=None, extra_info=None)
Definition: core.py:101
Copyright (c) 2016-present, Facebook, Inc.
Definition: blob.h:13
the data types supported are *float *int32 *int64 and *bool *If the dtype argument is not the data type of value is used The output tensor shape is either specified by the shape argument or will match the shape of the input tensor if one is the input should be a tensor containing the desired output fill the output with the first element of V When specifying dtype use the integer keys from the *DataType *enum in TensorProto
Definition: filler_op.cc:70
Tensor * BlobGetMutableTensor(Blob *blob, at::IntArrayRef dims, at::TensorOptions options)
Definition: blob.h:65
parameter efficient embedding termed TT which can be plugged in into any model and trained end to end The benefits of our compressed TT layer are twofold instead of storing huge embedding it stores a sequence of much smaller dimensional and dimensional necessary for reconstructing the required which allows compressing the model significantly at the cost of a negligible performance drop the overall number of parameters can be relatively which allows to use larger batches or train efficiently in a case of limited resources DOC vector< int >
constexpr DeviceType CPU
Definition: caffe2_pb.h:9
CAFFE_KNOWN_TYPE(c10::intrusive_ptr< LinearPackedParamsBase >)
runs the row wise sparse AdaGrad update and Parameters to be updated Integer vector containing indices of the first dimension of param for the slices that are being updated learning rate Updated parameters rounding option
reconstruct values together according to masks A comprehensive False False True Reconstruct Note that for all mask there must be at least one True This is not False False we accept the first value
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and shortest side desired for image resize Defaults to[-1, -1] or no random resize desired data
std::string SerializeBlobProtoAsString_EnforceCheck(const BlobProto &blob)
and label is applied to the tensor elementwise If y
we first initialize the output tensor to all and then do accumulation Any further calls to the The input tensor that has to be accumulated to the output tensor If the output size is not the same as input size
Output tensor quantization scale the filter blob
REGISTER_BLOB_SERIALIZER((TypeMeta::Id< std::unique_ptr< Module > >()), ScriptModuleSerializer)
TensorProto::DataType TypeMetaToDataType(const TypeMeta meta)
Definition: types.cc:11
The common world The allreduced tensor
Tensor TensorCPU
Definition: tensor.h:542
true SparseLengthsFused4BitRowwiseFakeFP16Op< CPUContext, true >::WEIGHTS uint8 tensor obtained with Vector with the same sum of elements as the first dimension of DATA output
REGISTER_BLOB_DESERIALIZER(torch::jit::Module, ScriptModuleDeserializer)
Unscaled log probabilities Optional blob to be used to weight the samples for the loss With spatial weighting is by x
unique_ptr< OperatorBase > CreateOperator(const OperatorDef &operator_def, Workspace *ws, int net_position)
Definition: operator.cc:356
*and produces a single output tensor *expanded *The op also takes an argument *dims *with a list of dimensions for where to add the single dimensional entries If the same blob is provided as input and the operation is copy free This is the exact inverse operation of *Squeeze *Github dims
Tensor empty(at::IntArrayRef dims, at::TensorOptions options)
Definition: tensor.cc:142
required base learning rate default used only for inv policy type default sampling rate on iterations default True in alter policy int64_t
constexpr auto kChunkIdSeparator
constexpr int kNoChunking
void DeserializeBlob(const string &content, Blob *result)
Deserializes from a string containing either BlobProto or TensorProto.
void SerializeBlob(const Blob &blob, const string &name, BlobSerializerBase::SerializationAcceptor acceptor, int chunk_size)
Serializes the given blob, if possible.
we add to it
bool BlobIsTensorType(const Blob &blob, DeviceType device_type)
Definition: blob.h:23
OperatorDef CreateOperatorDef(const string &type, const string &name, const IterableInputs &inputs, const IterableOutputs &outputs, const IterableArgs &args, const DeviceOption &device_option=DeviceOption(), const string &engine="")
Definition: proto_utils.h:141
TEST(CommonTest, TestStoi)
Definition: common_test.cc:16
CAFFE_ENFORCE(dims.front() >=0, "Dimension ids must be non-negative.")
def deserialize(binary_data, tensor_table)
Definition: internal.py:175
void serialize(serialize::OutputArchive &archive, const ska::flat_hash_map< std::string, std::unique_ptr< OptimizerParamState > > &state)
Definition: serialize.h:17
Tensor new_tensor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject *args, PyObject *kwargs)
Definition: tensor_new.cpp:774
uint8_t * data
unsigned short x
Definition: Half.h:339