pytorch  1.8.2
About: PyTorch provides Tensor computation (like NumPy) with strong GPU acceleration and Deep Neural Networks (in Python) built on a tape-based autograd system. LTS (Long Term Support) release.
  Fossies Dox: pytorch-1.8.2.tar.gz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

diagnose_protobuf Namespace Reference

Module scripts.diagnose_protobuf. More...

Variables

 python_version = google.protobuf.__version__
 
bool python_protobuf_installed = True
 
string protoc_name = 'protoc.exe'
 
 p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)
 
 out
 
 err
 
bool native_protobuf_installed = False
 
 tmp = re.search(r'\d\.\d\.\d', out)
 
 native_version = tmp.group(0)
 
string PYTHON_PROTOBUF_NOT_INSTALLED
 
string NATIVE_PROTOBUF_NOT_INSTALLED
 
string VERSION_MISMATCH
 

Detailed Description

Module scripts.diagnose_protobuf.

Variable Documentation

◆ err

diagnose_protobuf.err

Definition at line 36 of file diagnose_protobuf.py.

◆ native_protobuf_installed

bool diagnose_protobuf.native_protobuf_installed = False

Definition at line 41 of file diagnose_protobuf.py.

◆ NATIVE_PROTOBUF_NOT_INSTALLED

string diagnose_protobuf.NATIVE_PROTOBUF_NOT_INSTALLED
Initial value:
1= """
2You have not installed the protoc binary. Protoc is needed to compile Caffe2
3protobuf source files. Depending on the platform you are on, you can install
4protobuf via:
5 (1) Mac: using homebrew and do brew install protobuf.
6 (2) Linux: use apt and do apt-get install libprotobuf-dev
7 (3) Windows: install from source, or from the releases here:
8 https://github.com/google/protobuf/releases/
9"""

Definition at line 63 of file diagnose_protobuf.py.

◆ native_version

diagnose_protobuf.native_version = tmp.group(0)

Definition at line 51 of file diagnose_protobuf.py.

◆ out

diagnose_protobuf.out

Definition at line 36 of file diagnose_protobuf.py.

◆ p

diagnose_protobuf.p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE)

Definition at line 35 of file diagnose_protobuf.py.

Referenced by torch::optim::LBFGS._add_grad(), at::native._cdist_backward(), at::native._cdist_forward(), torch::optim::LBFGS._clone_param(), torch::optim::LBFGS._gather_flat_grad(), at::native._norm(), torch::optim::LBFGS._numel(), at::native._pdist_backward(), at::native._pdist_forward(), torch._lobpcg._polynomial_coefficients_given_roots(), at::native._sobol_engine_initialize_state_(), at::native._sobol_engine_scramble_(), at::native._sspaddmm_out_cpu(), at::native::legacy::cuda._th_renorm(), at::native::legacy::cpu._th_renorm(), at::native::legacy::cuda._th_renorm_(), at::native::legacy::cpu._th_renorm_(), at::native::legacy::cuda._th_renorm_out(), at::native::legacy::cpu._th_renorm_out(), at::native::legacy::cuda._thnn_multi_margin_loss_backward(), at::native::legacy::cuda._thnn_multi_margin_loss_backward_out(), at::native::legacy::cuda._thnn_multi_margin_loss_forward(), at::native::legacy::cuda._thnn_multi_margin_loss_forward_out(), torch::optim::Optimizer.add_param_group(), torch::jit::mobile::SGD.add_param_group(), ShmProcessMutexCheck.addLock(), caffe2::python.addObjectMethods(), caffe2::AllocAligned< T >.alloc(), c10::cuda::CUDACachingAllocator::DeviceCachingAllocator.alloc_block(), torch::autograd::VariableType.allTypesForBackends(), at::native.alpha_dropout(), at::native.alpha_dropout_(), caffe2::PartitionOpBase.ApplyPartition(), torch::jit::SourceImporterImpl.attributeAssignmentSpecialHandlingHack(), torch::jit::StaticRuntime.benchmark(), torch::jit::StaticRuntime.benchmark_individual_ops(), at::native.bernoulli(), at::transformation.bernoulli(), at::native.bernoulli_(), at::native::templates.bernoulli_impl_(), at::native::templates::cuda.bernoulli_kernel(), at::native::templates::cuda.bernoulli_tensor_cuda_kernel(), caffe2::OnnxifiTransformer.blocklistCpuPartition(), caffe2.Brightness(), caffe2::OpenCLContext.BuildKernel(), fake_fp16.CalcSigmoidByLUT(), caffe2::FixedDivisor< std::int32_t >.CalcSignedMagic(), fake_fp16.CalcSwishByLUT(), fake_fp16.CalcSwishByLUTCubic(), torch::jit::InterpreterStateImpl.callstack(), at::native.cdist(), at::native.cdist_impl(), at::native.chain_matmul(), torch::jit::CodeTemplate.charAt(), at::native.check_attributes(), dnnlowp::KLDivergenceMinimization.ChooseQuantizationParams(), torch::jit::fuser::cuda::IrCloner.clone(), caffe2.ColorLighting(), caffe2.ColorNormalization(), torch::jit::fuser.compileKernel(), torch::jit::tensorexpr::LoopNest.computeAt(), caffe2::math::utils.ComputeTransposeAxesForReduceOp(), torch::jit.computeUpdatedConvWeightAndBias(), c10d::tcputil.connect(), AlignedAllocator< T, Alignment >.construct(), caffe2::RecurrentNetworkGradientOp< Context >.constructParams(), caffe2.Contrast(), torch::jit::ArgumentSpecCreator.create(), at::native::vulkan::detail::VContext.createInstance(), caffe2::CuDNNWrapper.cudnn_states(), AlignedAllocator< T, Alignment >.deallocate(), nom::nql.deallocTokenStrings(), torch::jit::CompilationUnit.define(), AlignedAllocator< T, Alignment >.destroy(), at::native.dist(), nom::nql::GraphMatcher.doesMatch(), caffe2::RecurrentNetworkGradientOp< Context >.DoRunWithType(), at::native.dropout(), at::native.dropout_(), torch::jit::Module.dump_to_str(), extendFrozenModules(), caffe2::OnnxifiTransformer.extractPartitionInfo(), torch::jit.factorial(), torch::jit::tensorexpr.fast_tanh(), at::native.feature_alpha_dropout(), at::native.feature_alpha_dropout_(), at::native.feature_dropout(), at::native.feature_dropout_(), torch::jit.findChildModule(), torch::jit::tensorexpr::LoopNest.flatten(), torch::nn::detail::RNNImplBase< Derived >.flatten_parameters(), caffe2.FreeDecodedData(), torch::jit::FunctionValue.FunctionValue(), torch::jit::fuser.generateKernel(), GenerateStylizedImage(), at::transformation.geometric(), at::native.geometric_(), at::native::templates.geometric_impl_(), c10::cuda::CUDACachingAllocator::DeviceCachingAllocator.get_free_block(), caffe2.get_op_args(), caffe2.get_tensor_shapes(), getblock32(), getblock64(), torch::jit::tensorexpr.getBoundExtents(), at.getDeprecatedTypeProperties(), at::DeprecatedTypePropertiesRegistry.getDeprecatedTypeProperties(), c10::impl.getDeviceGuardImpl(), caffe2::OnnxifiOp< Context >.getExtFunctionPointers(), c10d::DistributedC10d.getGlobalRank(), torch::jit.getInvokedModuleOpt(), nom::nql::GraphMatcher.getMatches(), torch::distributed::rpc::ProcessGroupAgent.handleSend(), at::cuda::detail::CUDAHooks.initCUDA(), at::Context.initCUDAIfNeeded(), at::Context.initHIPIfNeeded(), torch::jit.initJitScriptBindings(), torch::autograd.initTensorImplConversion(), caffe2::math.IntegerLog2(), at::native.is_metal_available(), at::native::ConvParams.is_output_padding_neg(), at::native::ConvParams.is_padded(), at::native::ConvParams.is_padding_neg(), at::native.is_vulkan_available(), c10.isSubtypeOfList(), torch.distributions.kl.kl_divergence(), torch::jit::SourceImporterImpl.LEGACY_import_methods(), c10d::tcputil.listen(), torch::jit.loop(), caffe2.MatchStrings(), torch::autograd::impl.materialize_autograd_meta(), at::native.matmul(), at::native.max_unpooling2d_backward_out_cpu(), at::native.max_unpooling3d_backward_out_cpu(), at::native.max_unpooling3d_forward_out_cpu_frame(), torch::jit::MemoryPlanner.MemoryPlanner(), torch::jit::SubgraphUtils.mergeNodeIntoSubgraph(), at::metal.metal_copy_(), at::native.multi_margin_loss_cpu(), at::native.multi_margin_loss_cpu_backward(), at::native.multi_margin_loss_cpu_backward_out(), at::native.multi_margin_loss_cpu_out(), torch::jit::tensorexpr::PolynomialTransformer.mutate(), at::native.mvlgamma(), at::native.mvlgamma_(), torch::autograd::generated::details.mvlgamma_backward(), at::native.mvlgamma_check(), at::native.norm(), torch::autograd::generated::details.norm_backward(), at::native.norm_out(), at::native.norm_sparse(), torch::jit::tensorexpr::LoopNest.normalize(), at::native.nuclear_norm_out(), c10::IValue.null_to_undefined_tensor(), caffe2::AlignedDeleter< T >.operator()(), at::native::BernoulliStub< RNG >.operator()(), at::native::templates::cuda::GeometricKernel< RNG >.operator()(), at::native::templates::cuda::BernoulliKernel< RNG >.operator()(), at::native::GeometricStub< RNG >.operator()(), caffe2::DecodedFrame::avDeleter.operator()(), torch::jit::fuser::cuda.operator<<(), std.operator<<(), torch::jit::tensorexpr.operator<<(), THPPointer< T >.operator=(), at::native.pairwise_distance(), tools.codegen.model.Arguments.parse(), torch::jit::SourceImporterImpl.parseSourceIfNeeded(), torch::jit::SchemaTypeParser.parseType(), torch::jit::ScriptTypeParser.parseType(), torch::nn::functional.pdist(), at::native.pdist(), torch::jit::tensorexpr::Polynomial.Polynomial(), caffe2::dag_utils.prepareOperatorNodes(), torch::jit::tensorexpr.print(), caffe2.ProtoToType(), ratevl(), torch::jit::Unpickler.readGlobal(), torch::jit::tensorexpr.Reduce(), torch::jit.registerPass(), torch::jit::PassManager< DerivedType >.registerPass(), torch::jit.registerPostPass(), torch::jit::RegisterPostPass.RegisterPostPass(), torch::jit.registerPrePass(), caffe2::AllocAligned< T >.release(), torch::autograd::generated::details.renorm_backward(), caffe2::NetObserverReporterPrint.report(), caffe2::ElementwiseLinearOp< T, Context, Engine >.RunOnDevice(), caffe2::ElementwiseLinearGradientOp< T, Context, Engine >.RunOnDevice(), caffe2::OnnxifiOp< Context >.RunOnDevice(), caffe2::BatchMatMulDNNLowPOp< T >.RunOnDevice(), caffe2::PoolOp< T, Context, Functor >.RunOnDeviceWithOrderNCHW(), caffe2::PoolGradientOp< T, Context, Functor >.RunOnDeviceWithOrderNCHW(), caffe2::PoolOp< T, Context, Functor >.RunOnDeviceWithOrderNHWC(), caffe2::PoolGradientOp< T, Context, Functor >.RunOnDeviceWithOrderNHWC(), caffe2.Saturation(), caffe2::RecurrentNetworkExecutorBase.SetMaxParallelTimesteps(), caffe2.signQuantize(), caffe2::BlobStat.sizeBytes(), torch::jit::tensorexpr::LoopNest.sliceHead(), torch::jit::tensorexpr::LoopNest.sliceTail(), torch::jit::tensorexpr::LoopNest.splitWithMask(), torch::jit::tensorexpr::LoopNest.splitWithTail(), torch::jit::mobile::SGD.step(), torch::optim::Adagrad.step(), torch::optim::Adam.step(), torch::optim::AdamW.step(), torch::optim::RMSprop.step(), torch::optim::SGD.step(), caffe2::PerfNetObserver.Stop(), caffe2::OnnxifiTransformer.SubnetToOnnxifiOpViaC2(), torch::jit::tensorexpr::Term.Term(), THPPointer< T >.THPPointer(), c10::IValue.toIntrusivePtr(), at::native.triplet_margin_loss(), caffe2.uniformQuantize2b1b(), torch::jit.unpackQuantizedWeightsHelper(), torch::jit::tensorexpr::LoopNest.unroll(), c10d::Reducer.verify_replica0_across_processes(), torch::jit::tensorexpr::CudaAnalysis.visit(), at::vulkan.vulkan_copy_(), torch::optim::Optimizer.zero_grad(), torch::jit::mobile::SGD.zero_grad(), and torch::jit::tensorexpr::KernelArena.~KernelArena().

◆ protoc_name

string diagnose_protobuf.protoc_name = 'protoc.exe'

Definition at line 30 of file diagnose_protobuf.py.

◆ python_protobuf_installed

bool diagnose_protobuf.python_protobuf_installed = True

Definition at line 24 of file diagnose_protobuf.py.

◆ PYTHON_PROTOBUF_NOT_INSTALLED

string diagnose_protobuf.PYTHON_PROTOBUF_NOT_INSTALLED
Initial value:
1= """
2You have not installed python protobuf. Protobuf is needed to run caffe2. You
3can install protobuf via pip or conda (if you are using anaconda python).
4"""

Definition at line 58 of file diagnose_protobuf.py.

◆ python_version

diagnose_protobuf.python_version = google.protobuf.__version__

Definition at line 23 of file diagnose_protobuf.py.

◆ tmp

diagnose_protobuf.tmp = re.search(r'\d\.\d\.\d', out)

Definition at line 49 of file diagnose_protobuf.py.

Referenced by at::native._pad_packed_sequence(), torch::optim._strong_wolfe(), caffe2.AddInput< CUDAContext >(), caffe2::python.addObjectMethods(), caffe2::onnx::OnnxAttributes.AddRewrittenAttribute(), caffe2::OnnxifiOp< Context >.adjustOutputBatchSizes(), caffe2::BatchBoxCoxOp< Context >.BoxCoxNaive(), caffe2::onnx::Caffe2Backend.BuildTensorFillingOp(), caffe2::SpatialBNFakeFp16Op.ComputeFusedParam(), torch::jit::tensorexpr::TensorExprKernel.computeValue(), at::native.conv_tbc_backward(), caffe2::onnx.ConvertIntegralValueToCaffe2(), caffe2::onnx.ConvertIntegralValueToCaffe2<::google::protobuf::uint64 >(), cos256_ps(), caffe2.custom_fp16_gemm(), caffe2::SpatialBNFakeLoweredFp16Op.DoRunWithType(), cl::CommandQueue.enqueueBarrierWithWaitList(), cl::CommandQueue.enqueueFillBuffer(), cl::CommandQueue.enqueueFillImage(), cl::CommandQueue.enqueueMarkerWithWaitList(), cl::CommandQueue.enqueueMigrateMemObjects(), exp256_ps(), c10::detail.f32_from_bits(), torch::autograd::generated::details.glu_double_backward_grad_output(), torch.handle_torch_function_no_python_arg_parser(), torch::jit.lambdaLiftReverse(), log256_ps(), torch::jit::fuser::cuda::TensorDomain.merge(), caffe2.MultiFrameOpticalFlowExtractor(), c10::SparseBitVector< ElementSize >::SparseBitVectorIterator.operator++(), c10::intrusive_ptr< TTarget, NullType >.operator=(), c10::weak_intrusive_ptr< TTarget, NullType >.operator=(), c10.operator>>(), THPPointer< T >.release(), caffe2::onnx::OnnxBackendGraphMap.remove(), caffe2::DotProductWithPaddingOp< T, Context >.RunOnDevice(), caffe2::IntegralImageGradientOp< T, Context >.RunOnDevice(), caffe2::FloatToFused8BitRowwiseQuantizedOp< T, Tsb, convert, HAS_CONVERT, Context >.RunOnDevice(), caffe2::Fused8BitRowwiseQuantizedToFloatOp< T, Tsb, convert, HAS_CONVERT, Context >.RunOnDevice(), caffe2::FloatToFusedNBitRowwiseQuantizedOp< BIT_RATE, T, convert, GREEDY >.RunOnDevice(), caffe2::FusedNBitRowwiseQuantizedToFloatOp< BIT_RATE, T, convert >.RunOnDevice(), caffe2::FloatToFusedNBitFakeRowwiseQuantizedOp< BIT_RATE, T, convert, GREEDY >.RunOnDevice(), caffe2::FileStoreHandler.set(), sin256_ps(), sincos256_ps(), at::native.soft_margin_loss_out(), at::native.sparse_transpose_(), c10::basic_string_view< CharT >.swap(), c10::detail_::impl< Ts >.swap(), c10::intrusive_ptr< TTarget, NullType >.swap(), and c10::weak_intrusive_ptr< TTarget, NullType >.swap().

◆ VERSION_MISMATCH

string diagnose_protobuf.VERSION_MISMATCH
Initial value:
1= """
2Your python protobuf is of version {py_ver} but your native protoc version is of
3version {native_ver}. This will cause the installation to produce incompatible
4protobuf files. This is bad in general - consider installing the same version.
5""".format(py_ver=python_version, native_ver=native_version)
constexpr Symbol format(static_cast< unique_t >(_keys::aten_format))

Definition at line 73 of file diagnose_protobuf.py.