pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

blob_test.cc File Reference
#include <iostream>
#include <memory>
#include <mutex>
#include <gtest/gtest.h>
#include "c10/util/Registry.h"
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/db.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/qtensor.h"
#include "caffe2/core/qtensor_serialization.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/test_utils.h"
#include "caffe2/core/types.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
Include dependency graph for blob_test.cc:

Go to the source code of this file.

Classes

class  caffe2::BlobTestFooSerializer
 
class  caffe2::BlobTestFooDeserializer
 

Namespaces

namespace  caffe2
 Copyright (c) 2016-present, Facebook, Inc.
 

Macros

#define TEST_SERIALIZATION_WITH_TYPE(TypeParam, field_name)
 

Functions

 C10_DEFINE_int64 (caffe2_test_big_tensor_size, 100000000, "")
 
 C10_DECLARE_int (caffe2_tensor_chunk_size)
 
 C10_DECLARE_bool (caffe2_serialize_fp16_as_bytes)
 
 C10_DECLARE_bool (caffe2_serialize_using_bytes_as_holder)
 
 caffe2::CAFFE_KNOWN_TYPE (BlobTestFoo)
 
 caffe2::CAFFE_KNOWN_TYPE (BlobTestBar)
 
 caffe2::CAFFE_KNOWN_TYPE (BlobTestNonDefaultConstructible)
 
 caffe2::REGISTER_BLOB_SERIALIZER ((TypeMeta::Id< BlobTestFoo >()), BlobTestFooSerializer)
 
 caffe2::REGISTER_BLOB_DESERIALIZER (BlobTestFoo, BlobTestFooDeserializer)
 
 caffe2::CAFFE_KNOWN_TYPE (DummyType)
 

Macro Definition Documentation

◆ TEST_SERIALIZATION_WITH_TYPE

#define TEST_SERIALIZATION_WITH_TYPE (   TypeParam,
  field_name 
)

Definition at line 615 of file blob_test.cc.

Function Documentation

◆ C10_DECLARE_bool() [1/2]

C10_DECLARE_bool ( caffe2_serialize_fp16_as_bytes  )

◆ C10_DECLARE_bool() [2/2]

C10_DECLARE_bool ( caffe2_serialize_using_bytes_as_holder  )

◆ C10_DECLARE_int()

C10_DECLARE_int ( caffe2_tensor_chunk_size  )

◆ C10_DEFINE_int64()

C10_DEFINE_int64 ( caffe2_test_big_tensor_size  ,
100000000  ,
""   
)

Variable Documentation

◆ data_

◆ dataRegistryMutex_

std::mutex dataRegistryMutex_
staticprivate

Definition at line 859 of file blob_test.cc.

◆ n_chunks

int n_chunks

Definition at line 961 of file blob_test.cc.

◆ name_

◆ pos_

size_t pos_ = 0
private

Definition at line 828 of file blob_test.cc.

◆ val

int32_t val

Definition at line 32 of file blob_test.cc.

Referenced by at::native::_grid_sampler_2d_cpu_fallback_backward(), torch::jit::tensorexpr::HashProvider::_hash_combine(), at::_isnan(), torch::optim::_strong_wolfe(), caffe2::NNApi::addFloatOperand(), caffe2::NNApi::addScalarOperand(), torch::jit::logging::LockingLogger::addStatValue(), torch::jit::fuser::cuda::ir_utils::FilterIterator< FilterType, Iterator >::advance(), torch::jit::fuser::cuda::ir_utils::asConstTV(), torch::jit::fuser::cuda::ir_utils::asTV(), at::transformation::bernoulli(), caffe2::BisectPercentileOp< Context >::binary_search(), torch::jit::fuser::cuda::LaunchParams::bind(), torch::jit::tensorexpr::SimpleIREvaluatorImpl::bindVar(), torch::jit::CastAllConstantToFloating(), at::transformation::cauchy(), torch::jit::fuser::cuda::LaunchParams::checkAndSet(), caffe2::BisectPercentileOp< Context >::compute_percentile(), torch::jit::fuser::cuda::FusionExecutor::computeLaunchParams(), caffe2::LarsOp< T, Context >::ComputeLearningRate(), torch::jit::tensorexpr::TensorExprKernel::computeValue(), torch::jit::tensorexpr::TensorExprKernel::constant(), torch::jit::constantFoldedConditionValue(), torch::jit::fuser::cuda::Val::constDispatch(), torch::jit::PythonPrintImpl::containsNonASCIIString(), caffe2::convert(), caffe2::QuantileOp< Context >::CountLowerEq(), caffe2::onnx::Caffe2Backend::CreateDynamicSlice(), caffe2::BlobTestFooDeserializer::Deserialize(), torch::jit::dictConstruct(), torch::jit::fuser::cuda::Val::dispatch(), caffe2::MergeIdListsOp< Context >::DoRunWithType(), caffe2::SelfBinningHistogramOp< Context >::DoRunWithType(), torch::jit::InterpreterStateImpl::dump(), torch::jit::to_ir::emitDelete(), torch::jit::to_ir::emitIsInstance(), torch::jit::to_ir::emitTupleSlice(), torch::jit::to_ir::emitUnaryOp(), c10::Scalar::equal(), at::transformation::exponential(), c10::ShapeSymbol::fromStaticSize(), torch::jit::fuser::cuda::Fusion::Fusion(), torch::jit::fuser::cuda::IrGraphGenerator::generate(), qnnpack::generateBlockCSRMatrix(), torch::jit::fuser::generateKernel(), at::transformation::geometric(), caffe2::TileOp< Context >::GetArgFromTensor(), caffe2::TileGradientOp< Context >::GetArgFromTensor(), torch::jit::AliasDb::getElements(), torch::jit::fuser::getInputDependencies(), torch::jit::SimpleValue::getitem(), torch::jit::fuser::cuda::GpuLower::getLowerValue(), torch::jit::PythonPrintImpl::getOrAddConstant(), torch::jit::fuser::cuda::kir::GridReduction::getPredicateFlagName(), caffe2::QuantileOp< Context >::GetRangeFromInputs(), torch::jit::tracer::TracingState::getValue(), getValueAttr(), torch::jit::getValues(), torch::jit::fuser::cuda::BackwardVisitor::handle(), torch::handle_torch_function_indexing(), torch::jit::fuser::cuda::Fusion::hasInput(), torch::jit::fuser::cuda::Fusion::hasOutput(), torch::jit::fuser::cuda::Fusion::inputsOf(), torch::jit::Graph::insertConstant(), torch::jit::insertConstant(), torch::jit::fuser::cuda::kir::isLoweredScalar(), torch::jit::fuser::cuda::kir::isLoweredVal(), torch::jit::PassManager< DerivedType >::isRegistered(), torch::jit::fuser::cuda::isTV(), torch::jit::fuser::cuda::ir_utils::isTV(), torch::jit::SimpleValue::len(), at::transformation::log_normal(), torch::jit::fuser::cuda::GpuLower::lowerValue(), torch::jit::tensorexpr::Let::make(), caffe2::onnx::MakeAttribute(), torch::jit::ExitTransformer::matchValuesWithUnitialized(), torch::jit::materializeConstant(), torch::jit::fuser::cuda::StatefulExpressionEvaluator::maybeHandle(), torch::jit::MemoryPlanner::MemoryPlanner(), torch::jit::tensorexpr::Vectorizer::mutate(), torch::jit::fuser::cuda::Val::mutatorDispatch(), c10d::ProcessGroupNCCL::ncclCommWatchdogInternal(), torch::jit::fuser::cuda::BackwardVisitor::next(), at::transformation::normal(), torch::jit::fuser::cuda::executor_utils::nvrtcCompile(), c10::IValue::HashAliasedIValue::operator()(), c10::operator+(), c10::operator-(), torch::jit::fuser::cuda::Fusion::origin(), c10d::parseEnvVarFlag(), torch::jit::tensorexpr::BlockPrinter::PrintArguments(), torch::jit::fuser::cuda::IrGraphGenerator::printValue(), torch::jit::fuser::cuda::KernelArgumentHolder::push(), torch::jit::Unpickler::readGlobal(), at::native::detail::MinMaxReductionOps< comp_t >::reduce(), torch::jit::fuser::cuda::Fusion::registerLoweredVal(), torch::jit::fuser::cuda::OptOutMutator::registerMutation(), torch::jit::fuser::cuda::OptInMutator::registerMutation(), torch::jit::fuser::cuda::Fusion::registerVal(), torch::jit::fuser::cuda::Fusion::removeVal(), caffe2::RepeatedMaskWithFunctor(), torch::jit::fuser::cuda::GpuLower::replaceSymbolicSizes(), torch::jit::PeepholeOptimizeAliasSensitiveImpl::replaceWithIValue(), torch::jit::fuser::cuda::TransformReplay::replayPasC(), torch::jit::fuser::cuda::ReplayTransformations::ReplayTransformations(), torch::utils::returned_structseq_repr(), torch::jit::fuser::cuda::FusionExecutorCache::runFusionWithInputs(), caffe2::RMACRegionsOp< Context >::RunOnDevice(), caffe2::FunHashOp< T, Context >::RunOnDevice(), caffe2::FunHashGradientOp< T, Context >::RunOnDevice(), caffe2::SparseFunHashOp< T, Context >::RunOnDevice(), caffe2::SparseFunHashGradientOp< T, Context >::RunOnDevice(), caffe2::ClipTensorByScalingOp< Context >::RunOnDevice(), torch::jit::fuser::cuda::TransformRFactor::runReplay(), torch::jit::fuser::cuda::TransformRFactor::runReplay2(), at::native::s_addmm_out_sparse_dense_worker(), caffe2::BlobTestFooSerializer::Serialize(), at::native::setStrided(), torch::jit::setTensorExprFuserEnabled(), torch::jit::fuser::cuda::FusionExecutor::setUsedTVs(), dnnlowp::sgn(), torch::optim::LBFGS::step(), torch::jit::tensorexpr::Placeholder::store(), torch::jit::tensorexpr::Placeholder::storeWithMask(), caffe2::swap_endian(), torch::jit::tensorexpr::HashProvider::te_hash(), torch::autograd::THPVariable_setitem(), torch::jit::fuser::cuda::IterVisitor::traverse_(), torch::jit::tryInsertConstant(), at::transformation::uniform_int(), at::transformation::uniform_int_from_to(), at::transformation::uniform_int_full_range(), at::transformation::uniform_real(), torch::jit::fuser::cuda::Fusion::unordered_uses(), torch::jit::ConcreteSourceRangeUnpickler::unpickle(), torch::jit::fuser::cuda::Fusion::used(), torch::jit::fuser::cuda::validateIr(), vst1_f16_x2(), vst1_f32_x2(), vst1_f64_x2(), vst1_p16_x2(), vst1_p64_x2(), vst1_p8_x2(), vst1_s16_x2(), vst1_s32_x2(), vst1_s64_x2(), vst1_s8_x2(), vst1_u16_x2(), vst1_u32_x2(), vst1_u64_x2(), vst1_u8_x2(), vst1q_f16_x2(), vst1q_f32_x2(), vst1q_f64_x2(), vst1q_p16_x2(), vst1q_p64_x2(), vst1q_p8_x2(), vst1q_s16_x2(), vst1q_s32_x2(), vst1q_s64_x2(), vst1q_s8_x2(), vst1q_u16_x2(), vst1q_u32_x2(), vst1q_u64_x2(), vst1q_u8_x2(), and torch::jit::wrap_maybe().