목록Machine Learning/tf_source (10)
ecsimsw
/// matmul_op.cc #endif // GOOGLE_CUDA template class MatMulOp : public OpKernel { public: explicit MatMulOp(OpKernelConstruction* ctx) : OpKernel(ctx), algorithms_set_already_(false) { OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_a", &transpose_a_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("transpose_b", &transpose_b_)); LaunchMatMul::GetBlasGemmAlgorithm( ctx, &algorithms_, &algorithms_set_already_); ..
/// matmul_op.cc 1468 REGISTER_KERNEL_BUILDER #if defined(INTEL_MKL) // math kernel library TF_CALL_float(REGISTER_CPU_EIGEN); #else TF_CALL_float(REGISTER_CPU); #define REGISTER_CPU_EIGEN(T) \ REGISTER_KERNEL_BUILDER( \ Name("MatMul").Device(DEVICE_CPU).TypeConstraint("T").Label("eigen"), \ MatMulOp); #define REGISTER_CPU(T) \ REGISTER_KERNEL_BUILDER( \ Name("MatMul").Device(DEVICE_CPU).TypeCon..
/// REGISTER_KERNEL_BUILDER(Name("SparseApplyAdadelta") \ .Device(DEVICE_CPU) \ .TypeConstraint("T") \ .TypeConstraint("Tindices"), \ SparseApplyAdadeltaOp); /// #define REGISTER_KERNEL_BUILDER_UNIQ(ctr, kernel_builder, ...) \ constexpr bool should_register_##ctr##__flag = \ SHOULD_REGISTER_OP_KERNEL(#__VA_ARGS__); \ static ::tensorflow::kernel_factory::OpKernelRegistrar \ registrar__body__##ctr..
stackexchange_ What's the difference between user registers and kernel registers? "Answer" It's simple - when each application program runs, it has access to its own set of registers. When you switch to other application, these register contents is saved to memory, and registers, saved from other application, loaded and this application continues its execution. Similarly, OS has its own register..
///"tensorflow/core/framework/op_kernel.cc" typedef std::unordered_multimap KernelRegistry; void* GlobalKernelRegistry() { static KernelRegistry* global_kernel_registry = new KernelRegistry; return global_kernel_registry; } static KernelRegistry* GlobalKernelRegistryTyped() { return reinterpret_cast(GlobalKernelRegistry()); } /// Conclusion KernelRegistry : Unordered multi_map , includes key, ke..
///"tensorflow/core/framework/op_kernel.cc" // TODO(mrry): Convert to std::make_unique when available. OpKernel::OpKernel(OpKernelConstruction* context) : OpKernel(context, std::unique_ptr(new NodeDef(context->def()))) {} OpKernel::OpKernel(OpKernelConstruction* context, std::unique_ptr node_def) : def_(std::move(node_def)), input_types_(context->input_types().begin(), context->input_types().end..
///"tensorflow/core/framework/op_kernel.h" class OpKernelConstruction { public: OpKernelConstruction(DeviceType device_type, DeviceBase* device, Allocator* allocator, const NodeDef* node_def,const OpDef* op_def, FunctionLibraryRuntime* flib, const DataTypeSlice& input_types, const MemoryTypeSlice& input_memory_types, const DataTypeSlice& output_types, const MemoryTypeSlice& output_memory_types, ..
"CODE" ///"tensorflow/core/platform/file_system.h" class FileSystemRegistry { public: typedef std::function Factory; virtual ~FileSystemRegistry(); virtual Status Register(const string& scheme, Factory factory) = 0; virtual FileSystem* Lookup(const string& scheme) = 0; virtual Status GetRegisteredFileSystemSchemes( std::vector* schemes) = 0; }; /// tensorflow/core/framework/op_kernel.h namespace..