mirror of
https://github.com/saymrwulf/onnxruntime.git
synced 2026-05-14 20:48:00 +00:00
Remove unnecessary const
This commit is contained in:
parent
d6abc38182
commit
afd831ae9e
5 changed files with 11 additions and 11 deletions
|
|
@ -726,7 +726,7 @@ const std::vector<MLDataType>& DataTypeImpl::AllTensorTypes() {
|
|||
// helper to stream. expected to only be used for error output, so any typeid lookup
|
||||
// cost should be fine. alternative would be to add a static string field to DataTypeImpl
|
||||
// that we set in the register macro to the type name, and output that instead.
|
||||
std::ostream& operator<<(std::ostream& out, const MLDataType data_type) {
|
||||
std::ostream& operator<<(std::ostream& out, const DataTypeImpl* data_type) {
|
||||
if (data_type == nullptr)
|
||||
return out << "(null)";
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ ExecutionFrame::ExecutionFrame(const std::unordered_map<std::string, MLValue>& f
|
|||
ExecutionFrame::~ExecutionFrame() = default;
|
||||
|
||||
Status ExecutionFrame::AllocateMLValueTensorSelfOwnBuffer(int mlvalue_index,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape,
|
||||
bool create_fence) {
|
||||
|
|
@ -71,7 +71,7 @@ Status ExecutionFrame::AllocateMLValueTensorSelfOwnBuffer(int mlvalue_index,
|
|||
}
|
||||
|
||||
Status ExecutionFrame::AllocateMLValueTensorSelfOwnBufferHelper(int mlvalue_index,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape,
|
||||
bool create_fence) {
|
||||
|
|
@ -163,7 +163,7 @@ void ExecutionFrame::TraceAllocate(int mlvalue_idx, size_t size) {
|
|||
}
|
||||
|
||||
Status ExecutionFrame::AllocateTensorWithSelfOwnBuffer(const int index,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape,
|
||||
bool create_fence) {
|
||||
|
|
@ -173,7 +173,7 @@ Status ExecutionFrame::AllocateTensorWithSelfOwnBuffer(const int index,
|
|||
|
||||
Status ExecutionFrame::AllocateMLValueTensorPreAllocateBuffer(int mlvalue_index_to_allocate,
|
||||
int mlvalue_index_reuse,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape,
|
||||
bool create_fence) {
|
||||
|
|
@ -200,7 +200,7 @@ Status ExecutionFrame::AllocateMLValueTensorPreAllocateBuffer(int mlvalue_index_
|
|||
|
||||
Status ExecutionFrame::AllocateTensorWithPreAllocateBufferHelper(MLValue* p_mlvalue,
|
||||
void* pBuffer,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape) {
|
||||
if (p_mlvalue->IsAllocated()) {
|
||||
|
|
@ -219,7 +219,7 @@ Status ExecutionFrame::AllocateTensorWithPreAllocateBufferHelper(MLValue* p_mlva
|
|||
|
||||
Status ExecutionFrame::AllocateTensorWithPreAllocateBuffer(const int offset,
|
||||
void* pBuffer,
|
||||
const MLDataType element_type,
|
||||
const DataTypeImpl* element_type,
|
||||
const ONNXRuntimeAllocatorInfo& location,
|
||||
const TensorShape& shape) {
|
||||
ONNXRUNTIME_ENFORCE(offset >= 0 && offset < node_values_.size());
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ bool KernelRegistry::VerifyKernelDef(const onnxruntime::Node& node,
|
|||
// valid names (of types or parameters) at the time that kernels are registered.
|
||||
if ((nullptr != actual_type) &&
|
||||
!std::any_of(allowed_types.begin(), allowed_types.end(),
|
||||
[actual_type, &node, &error_str](const MLDataType& expected_type) {
|
||||
[actual_type, &node, &error_str](const DataTypeImpl* expected_type) {
|
||||
bool rc = expected_type->IsCompatible(*actual_type); // for easier debugging
|
||||
if (!rc) {
|
||||
// TODO print type information as well
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ namespace python {
|
|||
namespace py = pybind11;
|
||||
using namespace onnxruntime::logging;
|
||||
|
||||
int OnnxRuntimeTensorToNumpyType(const MLDataType& tensor_type) {
|
||||
int OnnxRuntimeTensorToNumpyType(const DataTypeImpl* tensor_type) {
|
||||
static std::map<MLDataType, int> type_map{
|
||||
{DataTypeImpl::GetType<bool>(), NPY_BOOL},
|
||||
{DataTypeImpl::GetType<float>(), NPY_FLOAT},
|
||||
|
|
@ -42,7 +42,7 @@ int OnnxRuntimeTensorToNumpyType(const MLDataType& tensor_type) {
|
|||
}
|
||||
}
|
||||
|
||||
const MLDataType& NumpyToOnnxRuntimeTensorType(int numpy_type) {
|
||||
const DataTypeImpl* NumpyToOnnxRuntimeTensorType(int numpy_type) {
|
||||
static std::map<int, MLDataType> type_map{
|
||||
{NPY_BOOL, DataTypeImpl::GetType<bool>()},
|
||||
{NPY_FLOAT, DataTypeImpl::GetType<float>()},
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ namespace python {
|
|||
|
||||
namespace py = pybind11;
|
||||
|
||||
int OnnxRuntimeTensorToNumpyType(const MLDataType& tensor_type);
|
||||
int OnnxRuntimeTensorToNumpyType(const DataTypeImpl* tensor_type);
|
||||
|
||||
void CreateGenericMLValue(AllocatorPtr alloc, const std::string& name_input, py::object& value, MLValue* p_mlvalue);
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue