5#include <gtest/gtest.h>
16#include "caffe2/core/test_utils.h"
35class BlobTestNonDefaultConstructible {
37 BlobTestNonDefaultConstructible() =
delete;
38 BlobTestNonDefaultConstructible(
int x) :
val(
x) {}
63 blob_proto.set_name(
name);
64 blob_proto.set_type(
"BlobTestFoo");
66 blob_proto.set_content(std::string(
67 reinterpret_cast<const char*
>(
68 &
static_cast<const BlobTestFoo*
>(pointer)->
val),
77 blob->GetMutable<BlobTestFoo>()->
val =
78 reinterpret_cast<const int32_t*
>(proto.content().c_str())[0];
91 EXPECT_TRUE(
blob.IsType<
int>());
92 EXPECT_FALSE(
blob.IsType<BlobTestFoo>());
96 EXPECT_TRUE(
blob.IsType<BlobTestFoo>());
97 EXPECT_FALSE(
blob.IsType<
int>());
102 EXPECT_FALSE(
blob.IsType<BlobTestFoo>());
103 EXPECT_FALSE(
blob.IsType<
int>());
106TEST(BlobTest, BlobUninitialized) {
111TEST(BlobTest, BlobWrongType) {
114 EXPECT_TRUE(
blob.IsType<BlobTestFoo>());
115 EXPECT_FALSE(
blob.IsType<
int>());
117 EXPECT_NE(&
blob.Get<BlobTestFoo>(),
nullptr);
121TEST(BlobTest, BlobReset) {
123 std::unique_ptr<BlobTestFoo> foo(
new BlobTestFoo());
124 EXPECT_TRUE(
blob.Reset(foo.release()) !=
nullptr);
129TEST(BlobTest, BlobMove) {
131 std::unique_ptr<BlobTestFoo> foo(
new BlobTestFoo());
132 auto* fooPtr = foo.get();
133 EXPECT_TRUE(blob1.Reset(foo.release()) !=
nullptr);
137 EXPECT_EQ(&blob2.Get<BlobTestFoo>(), fooPtr);
139 EXPECT_EQ(&blob3.Get<BlobTestFoo>(), fooPtr);
142TEST(BlobTest, BlobNonConstructible) {
148 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>() !=
nullptr);
149 EXPECT_TRUE(
blob.Reset(
new BlobTestNonDefaultConstructible(42)) !=
nullptr);
150 ASSERT_NO_THROW(
blob.Get<BlobTestNonDefaultConstructible>());
152 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>() !=
nullptr);
153 EXPECT_EQ(
blob.Get<BlobTestNonDefaultConstructible>().val, 42);
154 blob.GetMutableOrNull<BlobTestNonDefaultConstructible>()->
val = 37;
155 EXPECT_EQ(
blob.Get<BlobTestNonDefaultConstructible>().val, 37);
158TEST(BlobTest, BlobShareExternalPointer) {
160 std::unique_ptr<BlobTestFoo> foo(
new BlobTestFoo());
161 EXPECT_EQ(
blob.ShareExternal<BlobTestFoo>(foo.get()), foo.get());
162 EXPECT_TRUE(
blob.IsType<BlobTestFoo>());
167TEST(BlobTest, BlobShareExternalObject) {
170 EXPECT_EQ(
blob.ShareExternal<BlobTestFoo>(&foo), &foo);
171 EXPECT_TRUE(
blob.IsType<BlobTestFoo>());
176TEST(BlobTest, StringSerialization) {
177 const std::string kTestString =
"Hello world?";
179 *
blob.GetMutable<std::string>() = kTestString;
183 CHECK(proto.ParseFromString(serialized));
184 EXPECT_EQ(proto.name(),
"test");
185 EXPECT_EQ(proto.type(),
"std::string");
186 EXPECT_FALSE(proto.has_tensor());
187 EXPECT_EQ(proto.content(), kTestString);
190TEST(TensorNonTypedTest, TensorChangeType) {
198 EXPECT_TRUE(
ptr !=
nullptr);
199 EXPECT_TRUE(
tensor.data<
int>() !=
nullptr);
200 EXPECT_TRUE(
tensor.dtype().Match<
int>());
206 EXPECT_TRUE(
tensor.mutable_data<
float>() == (
float*)
ptr);
207 EXPECT_TRUE(
tensor.data<
float>() == (
const float*)
ptr);
208 EXPECT_TRUE(
tensor.dtype().Match<
float>());
219 auto* doubleptr =
tensor.mutable_data<
double>();
220 EXPECT_TRUE(doubleptr != (
double*)
ptr);
221 EXPECT_TRUE(doubleptr !=
nullptr);
222 EXPECT_TRUE(
tensor.data<
double>() !=
nullptr);
223 EXPECT_TRUE(
tensor.dtype().Match<
double>());
226TEST(TensorNonTypedTest, NonDefaultConstructible) {
237 TypeMeta::Make<BlobTestNonDefaultConstructible>()),
242class TensorCPUTest :
public ::testing::Test {};
244class TensorCPUDeathTest :
public ::testing::Test {};
245typedef ::testing::Types<char, int, float> TensorTypes;
246TYPED_TEST_CASE(TensorCPUTest, TensorTypes);
247TYPED_TEST_CASE(TensorCPUDeathTest, TensorTypes);
249TYPED_TEST(TensorCPUTest, TensorInitializedEmpty) {
251 EXPECT_EQ(
tensor.dim(), 1);
252 EXPECT_EQ(
tensor.numel(), 0);
258 EXPECT_EQ(
tensor.dim(), 3);
259 EXPECT_EQ(
tensor.dim32(0), 2);
260 EXPECT_EQ(
tensor.dim32(1), 3);
261 EXPECT_EQ(
tensor.dim32(2), 5);
262 EXPECT_EQ(
tensor.numel(), 2 * 3 * 5);
263 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
264 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
267TYPED_TEST(TensorCPUTest, TensorInitializedNonEmpty) {
273 EXPECT_EQ(
tensor.dim(), 3);
274 EXPECT_EQ(
tensor.dim32(0), 2);
275 EXPECT_EQ(
tensor.dim32(1), 3);
276 EXPECT_EQ(
tensor.dim32(2), 5);
277 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
278 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
284 EXPECT_EQ(
tensor.dim(), 4);
285 EXPECT_EQ(
tensor.dim32(0), 7);
286 EXPECT_EQ(
tensor.dim32(1), 11);
287 EXPECT_EQ(
tensor.dim32(2), 13);
288 EXPECT_EQ(
tensor.dim32(3), 17);
289 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
290 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
293TYPED_TEST(TensorCPUTest, TensorInitializedZeroDim) {
299 EXPECT_EQ(
tensor.dim(), 3);
300 EXPECT_EQ(
tensor.dim32(0), 2);
301 EXPECT_EQ(
tensor.dim32(1), 0);
302 EXPECT_EQ(
tensor.dim32(2), 5);
303 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() ==
nullptr);
304 EXPECT_TRUE(
tensor.data<TypeParam>() ==
nullptr);
307TYPED_TEST(TensorCPUTest, TensorResizeZeroDim) {
313 EXPECT_EQ(
tensor.dim(), 3);
314 EXPECT_EQ(
tensor.dim32(0), 2);
315 EXPECT_EQ(
tensor.dim32(1), 3);
316 EXPECT_EQ(
tensor.dim32(2), 5);
317 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
318 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
324 EXPECT_EQ(
tensor.numel(), 0);
325 EXPECT_EQ(
tensor.dim(), 3);
326 EXPECT_EQ(
tensor.dim32(0), 7);
327 EXPECT_EQ(
tensor.dim32(1), 0);
328 EXPECT_EQ(
tensor.dim32(2), 13);
330 tensor.mutable_data<TypeParam>();
334TYPED_TEST(TensorCPUTest, TensorInitializedScalar) {
337 EXPECT_EQ(
tensor.dim(), 0);
338 EXPECT_EQ(
tensor.numel(), 1);
339 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
340 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
343TYPED_TEST(TensorCPUTest, TensorAlias) {
349 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
351 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
352 EXPECT_TRUE(other_tensor.
data<TypeParam>() !=
nullptr);
353 EXPECT_EQ(
tensor.data<TypeParam>(), other_tensor.
data<TypeParam>());
355 for (
int i = 0;
i <
tensor.numel(); ++
i) {
356 tensor.mutable_data<TypeParam>()[i] = i;
357 EXPECT_EQ(other_tensor.
data<TypeParam>()[i], i);
361TYPED_TEST(TensorCPUTest, TensorShareDataRawPointer) {
366 std::unique_ptr<TypeParam[]> raw_buffer(
new TypeParam[2 * 3 * 5]);
368 tensor.ShareExternalPointer(raw_buffer.get());
369 EXPECT_EQ(
tensor.mutable_data<TypeParam>(), raw_buffer.get());
370 EXPECT_EQ(
tensor.data<TypeParam>(), raw_buffer.get());
372 for (
int i = 0;
i <
tensor.numel(); ++
i) {
373 raw_buffer.get()[
i] =
i;
374 EXPECT_EQ(
tensor.data<TypeParam>()[i], i);
378TYPED_TEST(TensorCPUTest, TensorShareDataRawPointerWithMeta) {
383 std::unique_ptr<TypeParam[]> raw_buffer(
new TypeParam[2 * 3 * 5]);
385 TypeMeta meta = TypeMeta::Make<TypeParam>();
386 tensor.ShareExternalPointer(raw_buffer.get(), meta);
387 EXPECT_EQ(
tensor.mutable_data<TypeParam>(), raw_buffer.get());
388 EXPECT_EQ(
tensor.data<TypeParam>(), raw_buffer.get());
390 for (
int i = 0;
i <
tensor.numel(); ++
i) {
391 raw_buffer.get()[
i] =
i;
392 EXPECT_EQ(
tensor.data<TypeParam>()[i], i);
396TYPED_TEST(TensorCPUTest, TensorAliasCanUseDifferentShapes) {
402 alternate_dims[0] = 2 * 3 * 5;
404 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
406 other_tensor.Resize(alternate_dims);
407 EXPECT_EQ(other_tensor.dim(), 1);
408 EXPECT_EQ(other_tensor.dim32(0), alternate_dims[0]);
409 EXPECT_TRUE(
tensor.data<TypeParam>() !=
nullptr);
410 EXPECT_TRUE(other_tensor.
data<TypeParam>() !=
nullptr);
411 EXPECT_EQ(
tensor.data<TypeParam>(), other_tensor.
data<TypeParam>());
413 for (
int i = 0;
i <
tensor.numel(); ++
i) {
414 tensor.mutable_data<TypeParam>()[i] = i;
415 EXPECT_EQ(other_tensor.
data<TypeParam>()[i], i);
419TYPED_TEST(TensorCPUTest, NoLongerAliassAfterNumelChanges) {
425 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
427 EXPECT_EQ(
tensor.data<TypeParam>(), other_tensor.
data<TypeParam>());
428 auto* old_pointer = other_tensor.
data<TypeParam>();
432 EXPECT_EQ(old_pointer, other_tensor.
data<TypeParam>());
433 EXPECT_NE(old_pointer,
tensor.mutable_data<TypeParam>());
436TYPED_TEST(TensorCPUTest, NoLongerAliasAfterFreeMemory) {
442 EXPECT_TRUE(
tensor.mutable_data<TypeParam>() !=
nullptr);
444 EXPECT_EQ(
tensor.data<TypeParam>(), other_tensor.
data<TypeParam>());
445 auto* old_pointer = other_tensor.
data<TypeParam>();
448 EXPECT_EQ(old_pointer, other_tensor.
data<TypeParam>());
449 EXPECT_NE(old_pointer,
tensor.mutable_data<TypeParam>());
452TYPED_TEST(TensorCPUTest, KeepOnShrink) {
454 FLAGS_caffe2_keep_on_shrink =
true;
455 FLAGS_caffe2_max_keep_on_shrink_memory = LLONG_MAX;
459 TypeParam*
ptr =
tensor.mutable_data<TypeParam>();
460 EXPECT_TRUE(
ptr !=
nullptr);
463 TypeParam* larger_ptr =
tensor.mutable_data<TypeParam>();
464 EXPECT_TRUE(larger_ptr !=
nullptr);
471 TypeParam* smaller_ptr =
tensor.mutable_data<TypeParam>();
472 EXPECT_TRUE(smaller_ptr !=
nullptr);
473 EXPECT_EQ(larger_ptr, smaller_ptr);
478 TypeParam* new_ptr =
tensor.mutable_data<TypeParam>();
479 EXPECT_TRUE(new_ptr !=
nullptr);
480 EXPECT_EQ(larger_ptr, new_ptr);
483TYPED_TEST(TensorCPUTest, MaxKeepOnShrink) {
485 FLAGS_caffe2_keep_on_shrink =
true;
486 FLAGS_caffe2_max_keep_on_shrink_memory = 8 * 4 *
sizeof(TypeParam);
490 TypeParam*
ptr =
tensor.mutable_data<TypeParam>();
491 EXPECT_TRUE(
ptr !=
nullptr);
494 TypeParam* smaller_ptr =
tensor.mutable_data<TypeParam>();
495 EXPECT_TRUE(smaller_ptr !=
nullptr);
496 EXPECT_EQ(
ptr, smaller_ptr);
499 TypeParam* new_ptr =
tensor.mutable_data<TypeParam>();
500 EXPECT_TRUE(new_ptr !=
nullptr);
506 FLAGS_caffe2_max_keep_on_shrink_memory = LLONG_MAX;
509TYPED_TEST(TensorCPUDeathTest, CannotAccessRawDataWhenEmpty) {
511 EXPECT_EQ(
tensor.dim(), 1);
512 EXPECT_EQ(
tensor.numel(), 0);
513 ASSERT_ANY_THROW(
tensor.raw_data());
516TYPED_TEST(TensorCPUDeathTest, CannotAccessDataWhenEmpty) {
518 EXPECT_EQ(
tensor.dim(), 1);
519 EXPECT_EQ(
tensor.numel(), 0);
520 ASSERT_ANY_THROW(
tensor.data<TypeParam>());
523TEST(TensorTest, TensorNonFundamentalType) {
525 EXPECT_TRUE(
tensor.mutable_data<std::string>() !=
nullptr);
526 const std::string*
ptr =
tensor.data<std::string>();
527 for (
int i = 0;
i <
tensor.numel(); ++
i) {
528 EXPECT_TRUE(
ptr[i] ==
"");
532TEST(TensorTest, TensorNonFundamentalTypeClone) {
534 std::string*
ptr =
tensor.mutable_data<std::string>();
535 EXPECT_TRUE(
ptr !=
nullptr);
536 for (
int i = 0;
i <
tensor.numel(); ++
i) {
537 EXPECT_TRUE(
ptr[i] ==
"");
541 const std::string* dst_ptr = dst_tensor.
data<std::string>();
542 for (
int i = 0;
i < dst_tensor.numel(); ++
i) {
543 EXPECT_TRUE(dst_ptr[i] ==
"filled");
546 for (
int i = 0;
i <
tensor.numel(); ++
i) {
547 EXPECT_TRUE(
ptr[i] ==
"filled");
551 for (
int i = 0;
i < dst_tensor.numel(); ++
i) {
552 EXPECT_TRUE(dst_ptr[i] ==
"filled");
556TEST(TensorTest, Tensor64BitDimension) {
561 EXPECT_EQ(
tensor.dim(), 1);
562 EXPECT_EQ(
tensor.size(0), large_number);
563 EXPECT_EQ(
tensor.numel(), large_number);
565 EXPECT_TRUE(
tensor.mutable_data<
char>() !=
nullptr);
567 string msg =
e.what();
568 size_t found = msg.find(
"posix_memalign");
569 if (found != string::npos) {
570 msg = msg.substr(0, msg.find(
'\n'));
572 LOG(WARNING) <<
"Out of memory issue with posix_memalign;\n";
578 EXPECT_EQ(
tensor.nbytes(), large_number *
sizeof(
char));
579 EXPECT_EQ(
tensor.itemsize(),
sizeof(
char));
582 tensor.Resize(large_number, 100);
583 EXPECT_EQ(
tensor.dim(), 2);
584 EXPECT_EQ(
tensor.size(0), large_number);
585 EXPECT_EQ(
tensor.size(1), 100);
586 EXPECT_EQ(
tensor.numel(), large_number * 100);
589TEST(TensorTest, UndefinedTensor) {
591 EXPECT_FALSE(
x.defined());
594TEST(TensorTest, CopyAndAssignment) {
597 testing::randomFill(
x.template mutable_data<float>(), 16 * 17);
598 EXPECT_TRUE(
x.defined());
602 testing::assertTensorEquals(
x,
y);
603 testing::assertTensorEquals(
x,
z);
606TEST(TensorDeathTest, CannotCastDownLargeDims) {
610 EXPECT_EQ(
tensor.dim(), 1);
611 EXPECT_EQ(
tensor.size(0), large_number);
615#define TEST_SERIALIZATION_WITH_TYPE(TypeParam, field_name) \
616 TEST(TensorTest, TensorSerialization_##TypeParam) { \
618 Tensor* tensor = BlobGetMutableTensor(&blob, CPU); \
619 tensor->Resize(2, 3); \
620 for (int i = 0; i < 6; ++i) { \
621 tensor->mutable_data<TypeParam>()[i] = static_cast<TypeParam>(i); \
623 string serialized = SerializeBlob(blob, "test"); \
625 CHECK(proto.ParseFromString(serialized)); \
626 EXPECT_EQ(proto.name(), "test"); \
627 EXPECT_EQ(proto.type(), "Tensor"); \
628 EXPECT_TRUE(proto.has_tensor()); \
629 const TensorProto& tensor_proto = proto.tensor(); \
631 tensor_proto.data_type(), \
632 TypeMetaToDataType(TypeMeta::Make<TypeParam>())); \
633 EXPECT_EQ(tensor_proto.field_name##_size(), 6); \
634 for (int i = 0; i < 6; ++i) { \
635 EXPECT_EQ(tensor_proto.field_name(i), static_cast<TypeParam>(i)); \
638 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob)); \
639 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU)); \
640 const TensorCPU& new_tensor = blob.Get<TensorCPU>(); \
641 EXPECT_EQ(new_tensor.dim(), 2); \
642 EXPECT_EQ(new_tensor.size(0), 2); \
643 EXPECT_EQ(new_tensor.size(1), 3); \
644 for (int i = 0; i < 6; ++i) { \
646 tensor->data<TypeParam>()[i], new_tensor.data<TypeParam>()[i]); \
650 TEST(EmptyTensorTest, TensorSerialization_##TypeParam) { \
652 TensorCPU* tensor = BlobGetMutableTensor(&blob, CPU); \
653 tensor->Resize(0, 3); \
654 tensor->mutable_data<TypeParam>(); \
655 string serialized = SerializeBlob(blob, "test"); \
657 CHECK(proto.ParseFromString(serialized)); \
658 EXPECT_EQ(proto.name(), "test"); \
659 EXPECT_EQ(proto.type(), "Tensor"); \
660 EXPECT_TRUE(proto.has_tensor()); \
661 const TensorProto& tensor_proto = proto.tensor(); \
663 tensor_proto.data_type(), \
664 TypeMetaToDataType(TypeMeta::Make<TypeParam>())); \
665 EXPECT_EQ(tensor_proto.field_name##_size(), 0); \
667 EXPECT_NO_THROW(DeserializeBlob(serialized, &new_blob)); \
668 EXPECT_TRUE(BlobIsTensorType(new_blob, CPU)); \
669 const TensorCPU& new_tensor = blob.Get<TensorCPU>(); \
670 EXPECT_EQ(new_tensor.dim(), 2); \
671 EXPECT_EQ(new_tensor.size(0), 0); \
672 EXPECT_EQ(new_tensor.size(1), 3); \
685TEST(TensorTest, TensorSerialization_CustomType) {
689 for (
int i = 0; i < 6; ++i) {
690 tensor->mutable_data<BlobTestFoo>()[i].
val = i;
694 CHECK(proto.ParseFromString(serialized));
695 EXPECT_EQ(proto.name(),
"test");
696 EXPECT_EQ(proto.type(),
"Tensor");
704 for (
int i = 0; i < 6; ++i) {
707 tensor->data<BlobTestFoo>()[i].val);
711TEST(TensorTest, Half) {
716 for (
int i = 0;
i <
tensor->numel(); ++
i) {
721 CHECK(proto.ParseFromString(serialized));
722 EXPECT_EQ(proto.name(),
"test");
723 EXPECT_EQ(proto.type(),
"Tensor");
724 EXPECT_TRUE(proto.has_tensor());
728 if (FLAGS_caffe2_serialize_fp16_as_bytes) {
729 EXPECT_EQ(tensor_proto.byte_data().size(), 2 * kSize);
730 for (
int i = 0;
i < kSize; ++
i) {
732 auto low_bits =
static_cast<char>(
value & 0xff);
733 auto high_bits =
static_cast<char>(
value >> 8);
734 EXPECT_EQ(tensor_proto.byte_data()[2 * i], low_bits);
735 EXPECT_EQ(tensor_proto.byte_data()[2 * i + 1], high_bits);
738 EXPECT_EQ(tensor_proto.int32_data().size(), kSize);
746 for (
int i = 0;
i < kSize; ++
i) {
751TEST(TensorTest, TensorFactory) {
753 EXPECT_NE(
a.data<
float>(),
nullptr);
754 a.mutable_data<
float>()[0] = 3.0;
756 EXPECT_NE(
b.data<
int>(),
nullptr);
757 b.mutable_data<
int>()[0] = 3;
760TEST(QTensorTest, QTensorSerialization) {
762 QTensor<CPUContext>* qtensor =
blob.GetMutable<QTensor<CPUContext>>();
763 qtensor->SetPrecision(5);
764 qtensor->SetSigned(
false);
765 qtensor->SetScale(1.337);
766 qtensor->SetBias(-1.337);
770 for (
int i = 0;
i < 6; ++
i) {
771 for (
int j = 0; j < 5; ++j) {
772 qtensor->SetBitAtIndex(j, i,
rand() % 2);
778 CHECK(proto.ParseFromString(serialized));
779 EXPECT_EQ(proto.name(),
"test");
780 EXPECT_EQ(proto.type(),
"QTensor");
781 EXPECT_TRUE(proto.has_qtensor());
782 const QTensorProto& qtensor_proto = proto.qtensor();
784 EXPECT_EQ(qtensor_proto.precision(), qtensor->precision());
785 EXPECT_EQ(qtensor_proto.scale(), qtensor->scale());
786 EXPECT_EQ(qtensor_proto.bias(), qtensor->bias());
787 EXPECT_EQ(qtensor_proto.is_signed(), qtensor->is_signed());
791 EXPECT_TRUE(new_blob.IsType<QTensor<CPUContext>>());
792 const QTensor<CPUContext>&
new_qtensor =
blob.Get<QTensor<CPUContext>>();
796 for (
int i = 0;
i < 6; ++
i) {
797 for (
int j = 0; j < 5; ++j) {
798 EXPECT_EQ(qtensor->GetBitAtIndex(j, i),
new_qtensor.GetBitAtIndex(j, i));
803using StringMap = std::vector<std::pair<string, string>>;
805class VectorCursor :
public db::Cursor {
810 ~VectorCursor()
override {}
811 void Seek(
const string& )
override {}
812 void SeekToFirst()
override {}
813 void Next()
override {
816 string key()
override {
819 string value()
override {
822 bool Valid()
override {
831class VectorDB :
public db::DB {
835 ~VectorDB()
override {
838 void Close()
override {}
839 std::unique_ptr<db::Cursor> NewCursor()
override {
840 return make_unique<VectorCursor>(getData());
842 std::unique_ptr<db::Transaction> NewTransaction()
override {
845 static void registerData(
const string&
name, StringMap&&
data) {
851 StringMap* getData() {
854 return &(
it->second);
860 static std::map<string, StringMap>
data_;
868template <
typename TypeParam>
869class TypedTensorTest :
public ::testing::Test {};
871 Types<float, bool, double, int, int8_t, int16_t, uint8_t, uint16_t, int64_t>
873TYPED_TEST_CASE(TypedTensorTest, TensorDataTypes);
875TYPED_TEST(TypedTensorTest, BigTensorSerialization) {
877 int64_t d2 = FLAGS_caffe2_test_big_tensor_size
878 ? FLAGS_caffe2_test_big_tensor_size / d1
881 string db_source = (string)std::tmpnam(
nullptr);
882 VLOG(1) <<
"db_source: " << db_source;
885 VLOG(1) <<
"Test begin";
888 VLOG(1) <<
"Allocating blob";
890 auto mutableData =
tensor->mutable_data<TypeParam>();
891 VLOG(1) <<
"Filling out the blob";
893 mutableData[
i] =
static_cast<TypeParam
>(
i);
898 auto acceptor = [&](
const std::string&
key,
const std::string&
value) {
899 std::lock_guard<std::mutex> guard(
mutex);
905 VLOG(1) <<
"finished writing to DB";
910 option.set_device_type(PROTO_CPU);
911 Argument db_type_arg = MakeArgument<string>(
"db_type",
"vector_db");
912 Argument absolute_path_arg = MakeArgument<bool>(
"absolute_path",
true);
913 Argument db_source_arg = MakeArgument<string>(
"db", db_source);
917 std::vector<string>{},
918 std::vector<string>({
"test"}),
919 std::vector<Argument>{db_type_arg, db_source_arg, absolute_path_arg},
924 EXPECT_TRUE(load_op !=
nullptr);
925 VLOG(1) <<
"Running operator";
928 VLOG(1) <<
"Reading blob from workspace";
929 auto new_blob = ws.GetBlob(
"test");
937 EXPECT_EQ(
static_cast<TypeParam
>(i),
new_tensor.data<TypeParam>()[i]);
947 DummyType(
int n_chunks_init = 0) :
n_chunks(n_chunks_init) {}
948 std::string
serialize(
const std::string&
name,
const int32_t chunk_id)
const {
950 blobProto.set_name(
name);
951 blobProto.set_type(
"DummyType");
952 std::string content(
"");
953 blobProto.set_content(content);
954 blobProto.set_content_num_chunks(
n_chunks);
955 blobProto.set_content_chunk_id(chunk_id);
956 return blobProto.SerializeAsString();
964class DummyTypeSerializer :
public BlobSerializerBase {
966 DummyTypeSerializer() {}
967 ~DummyTypeSerializer()
override {}
972 SerializationAcceptor acceptor)
override {
974 const auto& container = *
static_cast<const DummyType*
>(pointer);
975 for (
int k = 0; k < container.n_chunks; ++k) {
976 std::string serialized_chunk = container.serialize(
name, k);
982class DummyTypeDeserializer :
public BlobDeserializerBase {
984 void Deserialize(
const BlobProto& proto, Blob*
blob)
override {
985 auto* container =
blob->GetMutable<DummyType>();
986 container->deserialize(proto);
996 BlobDeserializerRegistry,
998 DummyTypeDeserializer);
1000TEST(ContentChunks, Serialization) {
1001 string db_source = (string)std::tmpnam(
nullptr);
1002 VLOG(1) <<
"db_source: " << db_source;
1005 VLOG(1) <<
"Test begin";
1007 DummyType* container =
blob.GetMutable<DummyType>();
1008 VLOG(1) <<
"Allocating blob";
1009 container->n_chunks = 10;
1010 VLOG(1) <<
"Filling out the blob";
1013 auto acceptor = [&](
const std::string&
key,
const std::string&
value) {
1014 std::lock_guard<std::mutex> guard(
mutex);
1019 VLOG(1) <<
"finished writing to DB";
1024 option.set_device_type(PROTO_CPU);
1025 Argument db_type_arg = MakeArgument<string>(
"db_type",
"vector_db");
1026 Argument absolute_path_arg = MakeArgument<bool>(
"absolute_path",
true);
1027 Argument db_source_arg = MakeArgument<string>(
"db", db_source);
1031 std::vector<string>{},
1032 std::vector<string>({
"test"}),
1033 std::vector<Argument>{db_type_arg, db_source_arg, absolute_path_arg},
1038 EXPECT_TRUE(load_op !=
nullptr);
1039 VLOG(1) <<
"Running operator";
1042 VLOG(1) <<
"Reading blob from workspace";
1043 auto new_blob = ws.GetBlob(
"test");
1044 EXPECT_TRUE(new_blob->IsType<DummyType>());
1045 const auto& container = new_blob->Get<DummyType>();
1046 EXPECT_EQ(container.n_chunks, 10);
1050TEST(CustomChunkSize, BigTensorSerialization) {
1052 int64_t d2 = FLAGS_caffe2_test_big_tensor_size
1053 ? FLAGS_caffe2_test_big_tensor_size / d1
1060 tensor->mutable_data<
float>();
1063 auto acceptor = [&](
const std::string& ,
1064 const std::string& ) {
1065 std::lock_guard<std::mutex> guard(
mutex);
1080TEST(QTensor, QTensorSizingTest) {
1085 QTensor<CPUContext> qtensor(
dims, 3);
1086 EXPECT_TRUE(qtensor.mutable_data() !=
nullptr);
1087 EXPECT_EQ(qtensor.nbytes(), 12);
1088 EXPECT_EQ(qtensor.size(), 30);
1091TEST(BlobTest, CastingMessage) {
1093 b.GetMutable<BlobTestFoo>();
1094 b.Get<BlobTestFoo>();
1096 b.Get<BlobTestBar>();
1097 FAIL() <<
"Should have thrown";
1099 string msg =
e.what_without_backtrace();
1101 EXPECT_NE(msg.find(
"BlobTestFoo"), std::string::npos) << msg;
1102 EXPECT_NE(msg.find(
"BlobTestBar"), std::string::npos) << msg;
1106TEST(TensorConstruction, UninitializedCopyTest) {
1110 EXPECT_FALSE(
x.dtype_initialized());
1111 EXPECT_FALSE(
y.dtype_initialized());
1112 LOG(INFO) <<
"z.size()" <<
z.numel();
1113 EXPECT_FALSE(
z.dtype_initialized());
1116TEST(TensorConstruction, CopyConstructorTest) {
1119 x.mutable_data<
float>()[0] = 1;
1123 EXPECT_EQ(*
x.data<
float>(), 1);
1124 EXPECT_EQ(*
y.data<
float>(), 1);
1125 EXPECT_EQ(*
z.data<
float>(), 1);
1126 x.mutable_data<
float>()[0] = 5;
1127 EXPECT_EQ(*
x.data<
float>(), 5);
1128 EXPECT_EQ(*
y.data<
float>(), 1);
1129 EXPECT_EQ(*
z.data<
float>(), 1);
1132TEST(TensorConstruction, MoveAssignmentOpTest) {
1135 x.mutable_data<
float>()[0] = 1;
1139 EXPECT_EQ(*
y.data<
float>(), 1);
1142TEST(TensorSerialization, MistakenlySerializingDtypeUninitializedTensor) {
1153 [&
output](
const string& ,
const std::string&
data) {
1158 LOG(INFO) <<
"serialized proto: " <<
b.DebugString();
1171static caffe2::BlobProto CreateProtoWithInt32Data(
1174 bool useCached =
true) {
1175 static std::map<caffe2::TensorProto::DataType, caffe2::BlobProto> protos;
1176 if (useCached && protos.count(dataType)) {
1177 return protos[dataType];
1179 caffe2::BlobProto proto;
1180 proto.set_type(
"Tensor");
1181 auto tensor = proto.mutable_tensor();
1184 tensor->set_data_type(dataType);
1185 tensor->set_name(
"test_feature");
1186 tensor->mutable_device_detail()->set_device_type(0);
1187 tensor->mutable_segment()->set_begin(0);
1188 tensor->mutable_segment()->set_end(numEl);
1189 for (
size_t i = 0;
i < numEl; ++
i) {
1192 case caffe2::TensorProto_DataType_INT32:
1193 data =
static_cast<int32_t
>(
rand() % 0xffffffff);
1195 case caffe2::TensorProto_DataType_BOOL:
1196 data =
static_cast<uint8_t
>(
rand() % 0x00000001);
1198 case caffe2::TensorProto_DataType_UINT8:
1199 data =
static_cast<uint8_t
>(
rand() % 0x000000ff);
1201 case caffe2::TensorProto_DataType_INT8:
1202 data =
static_cast<int8_t
>(
rand() % 0x000000ff);
1204 case caffe2::TensorProto_DataType_UINT16:
1205 data =
static_cast<uint16_t
>(
rand() % 0x0000ffff);
1207 case caffe2::TensorProto_DataType_INT16:
1208 data =
static_cast<int16_t
>(
rand() % 0x0000ffff);
1210 case caffe2::TensorProto_DataType_FLOAT16:
1211 data =
static_cast<uint16_t
>(
rand() % 0x0000ffff);
1218 protos[dataType] = proto;
1224 std::string dataTypeName) {
1225 LOG(INFO) << dataTypeName;
1226 FLAGS_caffe2_serialize_using_bytes_as_holder =
true;
1227 size_t numEl = 1000;
1229 auto protoInt32 = CreateProtoWithInt32Data(dataType, numEl,
false);
1232 auto serializedStr =
SerializeBlob(blobInt32, protoInt32.name());
1233 caffe2::BlobProto protoBytes;
1235 protoBytes.ParseFromString(serializedStr);
1238 FLAGS_caffe2_serialize_using_bytes_as_holder =
false;
1240 protoBytes.ParseFromString(
SerializeBlob(blobBytes, protoBytes.name()));
1241 EXPECT_EQ(numEl, protoInt32.tensor().int32_data_size());
1242 EXPECT_EQ(numEl, protoBytes.tensor().int32_data_size());
1243 for (
int i = 0;
i < numEl; ++
i) {
1245 protoInt32.tensor().int32_data(i), protoBytes.tensor().int32_data(i));
1249TEST(TensorSerialization, TestCorrectness) {
1250 FLAGS_caffe2_serialize_using_bytes_as_holder =
true;
1252 caffe2::TensorProto_DataType_INT32,
"TensorProto_DataType_INT32");
1253 TestDataType(caffe2::TensorProto_DataType_BOOL,
"TensorProto_DataType_BOOL");
1255 caffe2::TensorProto_DataType_UINT8,
"TensorProto_DataType_UINT8");
1256 TestDataType(caffe2::TensorProto_DataType_INT8,
"TensorProto_DataType_INT8");
1258 caffe2::TensorProto_DataType_UINT16,
"TensorProto_DataType_UINT16");
1260 caffe2::TensorProto_DataType_INT16,
"TensorProto_DataType_INT16");
1262 caffe2::TensorProto_DataType_FLOAT16,
"TensorProto_DataType_FLOAT16");
#define C10_REGISTER_TYPED_CLASS(RegistryName, key,...)
C10_DEFINE_int64(caffe2_test_big_tensor_size, 100000000, "")
C10_DECLARE_int(caffe2_tensor_chunk_size)
C10_DECLARE_bool(caffe2_serialize_fp16_as_bytes)
#define TEST_SERIALIZATION_WITH_TYPE(TypeParam, field_name)
static std::mutex dataRegistryMutex_
BlobDeserializerBase is an abstract class that deserializes a blob from a BlobProto or a TensorProto.
BlobSerializerBase is an abstract class that serializes a blob to a string.
std::function< void(const std::string &blobName, const std::string &data)> SerializationAcceptor
void Deserialize(const BlobProto &proto, Blob *blob) override
void Serialize(const void *pointer, TypeMeta typeMeta, const string &name, SerializationAcceptor acceptor) override
Serializes a Blob.
~BlobTestFooSerializer() override
Blob is a general container that hosts a typed pointer.
#define REGISTER_CAFFE2_DB(name,...)
Tensor rand(IntArrayRef size, const TensorOptions &options)
std::tuple< Tensor, Tensor > mode(const Tensor &self, int64_t dim, bool keepdim)
Tensor new_qtensor(IntArrayRef sizes, const TensorOptions &options, QuantizerPtr quantizer)
constexpr remove_reference_t< T > && move(T &&t) noexcept
::c10::Error EnforceNotMet
decltype(auto) str(const Args &... args)
Mode
The mode of the database, whether we are doing a read, write, or creating a new database.
def DeviceOption(device_type, device_id=0, random_seed=None, node_name=None, numa_node_id=None, extra_info=None)
Copyright (c) 2016-present, Facebook, Inc.
the data types supported are *float *int32 *int64 and *bool *If the dtype argument is not the data type of value is used The output tensor shape is either specified by the shape argument or will match the shape of the input tensor if one is the input should be a tensor containing the desired output fill the output with the first element of V When specifying dtype use the integer keys from the *DataType *enum in TensorProto
Tensor * BlobGetMutableTensor(Blob *blob, at::IntArrayRef dims, at::TensorOptions options)
parameter efficient embedding termed TT which can be plugged in into any model and trained end to end The benefits of our compressed TT layer are twofold instead of storing huge embedding it stores a sequence of much smaller dimensional and dimensional necessary for reconstructing the required which allows compressing the model significantly at the cost of a negligible performance drop the overall number of parameters can be relatively which allows to use larger batches or train efficiently in a case of limited resources DOC vector< int >
CAFFE_KNOWN_TYPE(c10::intrusive_ptr< LinearPackedParamsBase >)
runs the row wise sparse AdaGrad update and Parameters to be updated Integer vector containing indices of the first dimension of param for the slices that are being updated learning rate Updated parameters rounding option
reconstruct values together according to masks A comprehensive False False True Reconstruct Note that for all mask there must be at least one True This is not False False we accept the first value
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and shortest side desired for image resize Defaults to[-1, -1] or no random resize desired data
std::string SerializeBlobProtoAsString_EnforceCheck(const BlobProto &blob)
and label is applied to the tensor elementwise If y
we first initialize the output tensor to all and then do accumulation Any further calls to the The input tensor that has to be accumulated to the output tensor If the output size is not the same as input size
Output tensor quantization scale the filter blob
REGISTER_BLOB_SERIALIZER((TypeMeta::Id< std::unique_ptr< Module > >()), ScriptModuleSerializer)
TensorProto::DataType TypeMetaToDataType(const TypeMeta meta)
The common world The allreduced tensor
true SparseLengthsFused4BitRowwiseFakeFP16Op< CPUContext, true >::WEIGHTS uint8 tensor obtained with Vector with the same sum of elements as the first dimension of DATA output
REGISTER_BLOB_DESERIALIZER(torch::jit::Module, ScriptModuleDeserializer)
Unscaled log probabilities Optional blob to be used to weight the samples for the loss With spatial weighting is by x
unique_ptr< OperatorBase > CreateOperator(const OperatorDef &operator_def, Workspace *ws, int net_position)
*and produces a single output tensor *expanded *The op also takes an argument *dims *with a list of dimensions for where to add the single dimensional entries If the same blob is provided as input and the operation is copy free This is the exact inverse operation of *Squeeze *Github dims
Tensor empty(at::IntArrayRef dims, at::TensorOptions options)
required base learning rate default used only for inv policy type default sampling rate on iterations default True in alter policy int64_t
constexpr auto kChunkIdSeparator
constexpr int kNoChunking
void DeserializeBlob(const string &content, Blob *result)
Deserializes from a string containing either BlobProto or TensorProto.
void SerializeBlob(const Blob &blob, const string &name, BlobSerializerBase::SerializationAcceptor acceptor, int chunk_size)
Serializes the given blob, if possible.
bool BlobIsTensorType(const Blob &blob, DeviceType device_type)
OperatorDef CreateOperatorDef(const string &type, const string &name, const IterableInputs &inputs, const IterableOutputs &outputs, const IterableArgs &args, const DeviceOption &device_option=DeviceOption(), const string &engine="")
TEST(CommonTest, TestStoi)
CAFFE_ENFORCE(dims.front() >=0, "Dimension ids must be non-negative.")
def deserialize(binary_data, tensor_table)
void serialize(serialize::OutputArchive &archive, const ska::flat_hash_map< std::string, std::unique_ptr< OptimizerParamState > > &state)
Tensor new_tensor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject *args, PyObject *kwargs)