Commit 4fd831ba authored by Oleg Dzhimiev's avatar Oleg Dzhimiev

1. removed trt h & cpp

2. added project eclipse files
parent 5ae19695
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?fileVersion 4.0.0?><cproject storage_type_id="org.eclipse.cdt.core.XmlProjectDescriptionStorage">
<storageModule moduleId="org.eclipse.cdt.core.settings">
<cconfiguration id="org.eclipse.cdt.core.default.config.90621872">
<storageModule buildSystemId="org.eclipse.cdt.core.defaultConfigDataProvider" id="org.eclipse.cdt.core.default.config.90621872" moduleId="org.eclipse.cdt.core.settings" name="Configuration">
<externalSettings/>
<extensions/>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.externalSettings"/>
</cconfiguration>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.pathentry">
<pathentry kind="src" path=""/>
<pathentry excluding="**/CMakeFiles/**" kind="out" path="build"/>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.core.LanguageSettingsProviders"/>
</cproject>
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>tensorflow-feed-from-gpu</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.cdt.core.cBuilder</name>
<triggers>clean,full,incremental,</triggers>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.cdt.core.cnature</nature>
<nature>org.eclipse.cdt.core.ccnature</nature>
<nature>org.eclipse.cdt.cmake.core.cmakeNature</nature>
</natures>
</projectDescription>
cmake_minimum_required(VERSION 3.8)
cmake_minimum_required(VERSION 3.16)
set(ENV{CUDACXX} /usr/local/cuda/bin/nvcc)
project(tf_detector_example LANGUAGES CXX CUDA)
cmake_policy(SET CMP0074 OLD)
......@@ -16,7 +19,6 @@ set(SOURCE_FILES
infer_with_trt.cpp
inference_base.cpp
inference_tf.cpp
inference_trt.cpp
channel_first.cu
)
......
#include "inference_trt.h"
using namespace cv;
using namespace std;
int InferenceTensorRT::ReadGraph()
{
auto runtimeEngineContext = CreateTrtEngineAndContext(graphFile, isInt8);
runtime = std::get<0>(runtimeEngineContext);
engine = std::get<1>(runtimeEngineContext);
context = std::get<2>(runtimeEngineContext);
return 0;
}
int InferenceTensorRT::ReadClassLabels()
{
populateClassLabels(labelsVector, labelsFile);
return 0;
}
int InferenceTensorRT::doInference(cv::cuda::GpuMat &d_frame)
{
auto inferenceTuple = doInferenceWithTrt(d_frame, context, labelsVector);
detections = std::get<0>(inferenceTuple);
numDetections = std::get<1>(inferenceTuple);
return 0;
}
void InferenceTensorRT::visualize(cv::cuda::GpuMat &d_frame, double fps)
{
Mat img;
d_frame.download(img);
for (int p = 0; p < N; ++p)
{
for (int i = 0; i < numDetections[p]; ++i)
{
float *det = &detections[0] + (p * detectionOutputParam.keepTopK + i) * 7;
if (det[2] < visualizeThreshold)
continue;
// Output format for each detection is stored in the below order
// [image_id, label, confidence, xmin, ymin, xmax, ymax]
assert((int)det[1] < OUTPUT_CLS_SIZE);
std::string storeName = outFileRoot + labelsVector[(int)det[1]] + "-" + std::to_string(det[2]) + ".jpg";
if (debug & 0x2)
{
// det array idxs: (4, 3) = (y0, x0), (6, 5) = (y1, x1)
// dets are in absolute coordinates: 0 <= pt <= 1
drawBoundingBoxOnImage(img, det[4], det[3], det[6], det[5], det[2], labelsVector[(int)det[1]]);
}
}
}
if (debug & 0x2)
{
string framework("TensorRT");
if (isInt8)
{
framework += " (INT8)";
}
auto color = Scalar(0, 255, 255);
drawFrameworkSignature(img, fps, framework, color);
}
}
#pragma once
#include "inference_base.h"
using namespace std;
class InferenceTensorRT : public InferenceBase
{
private:
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *context;
bool isInt8;
//batch size
const int N = 1;
const float visualizeThreshold = 0.5;
vector<string> labelsVector;
vector<int> numDetections;
vector<float> detections;
string outFileRoot;
protected:
int ReadGraph() override;
int ReadClassLabels() override;
int doInference(cv::cuda::GpuMat &d_frame) override;
void visualize(cv::cuda::GpuMat&, double) override;
public:
InferenceTensorRT(const string &labelsFile, const string &graphFile, bool isInt8, double threshScore = 0.5, double threshIOU = 0.8, int dbg = 0, string outFile="")
: InferenceBase(labelsFile, graphFile, threshScore, threshIOU, dbg)
, labelsVector()
, numDetections(N)
, detections(N * detectionOutputParam.keepTopK * 7)
, outFileRoot(outFile)
, isInt8(isInt8)
{
}
virtual ~InferenceTensorRT()
{
if(context != nullptr)
{
context->destroy();
}
if(engine != nullptr)
{
engine->destroy();
}
if(runtime != nullptr)
{
runtime->destroy();
}
}
};
\ No newline at end of file
#include "inference_base.h"
#include "inference_tf.h"
#include "inference_trt.h"
#include <cuda_profiler_api.h>
......@@ -22,9 +21,11 @@ int main(int argc, char *argv[])
return -1;
}
cout << "Hello world!\n";
const String keys =
"{d display |1 | view video while objects are detected}"
"{t tensorrt|false | use tensorrt}"
//"{t tensorrt|false | use tensorrt}"
"{i int8|false| use INT8 (requires callibration)}"
"{v video | | video for detection}"
"{graph ||frozen graph location}"
......@@ -36,14 +37,12 @@ int main(int argc, char *argv[])
CommandLineParser parser(argc, argv, keys);
int showWindow = parser.get<int>("d");
String video_file = parser.get<String>("v");
bool is_tensor_rt = parser.get<bool>("t");
//bool is_tensor_rt = parser.get<bool>("t");
bool is_int8 = parser.get<bool>("i");
String LABELS = parser.get<String>("labels");
String GRAPH = parser.get<String>("graph");
unique_ptr<InferenceBase> infer(is_tensor_rt ?
(InferenceBase *) new InferenceTensorRT(LABELS, GRAPH, is_int8)
: (InferenceBase *) new InferenceTensorflow(LABELS, GRAPH));
unique_ptr<InferenceBase> infer((InferenceBase *) new InferenceTensorflow(LABELS, GRAPH));
infer->set_debug(showWindow);
......@@ -51,4 +50,4 @@ int main(int argc, char *argv[])
infer->RunInferenceOnStream();
return 0;
}
\ No newline at end of file
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment