1 #ifndef NuSonic_Triton_TritonData 2 #define NuSonic_Triton_TritonData 9 #include <unordered_map> 15 #include "grpc_client.h" 16 #include "grpc_service.pb.h" 24 template <
typename DT>
26 template <
typename DT>
30 template <
typename IO>
33 using Result = nvidia::inferenceserver::client::InferResult;
46 template <
typename DT>
48 template <
typename DT>
76 return std::any_of(vec.
begin(), vec.
end(), [](int64_t i) {
return i < 0; });
79 return std::accumulate(vec.
begin(), vec.
end(), 1, std::multiplies<int64_t>());
std::unordered_map< std::string, TritonOutputData > TritonOutputMap
void setBatchSize(unsigned bsize)
bool setShape(const ShapeType &newShape)
TritonData(const std::string &name, const TensorMetadata &model_info, bool noBatch)
nvidia::inferenceserver::client::InferResult Result
bool anyNeg(const ShapeView &vec) const
bool variableDims() const
const ShapeView & shape() const
std::vector< int64_t > ShapeType
inference::DataType dtype_
void toServer(std::shared_ptr< TritonInput< DT >> ptr)
void setResult(std::shared_ptr< Result > result)
std::vector< std::vector< DT >> TritonInput
unsigned batchSize() const
int64_t sizeShape() const
void createObject(IO **ioptr) const
bool setShape(unsigned loc, int64_t val)
std::shared_ptr< Result > result_
std::vector< triton_span::Span< const DT * >> TritonOutput
std::shared_ptr< IO > data_
int64_t dimProduct(const ShapeView &vec) const
const std::string & dname() const
std::unordered_map< std::string, TritonInputData > TritonInputMap
inference::ModelMetadataResponse_TensorMetadata TensorMetadata
TritonOutput< DT > fromServer() const