#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/dnn.hpp>
const char* keys =
"{ help h | | Print help message. }"
"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
"{ model m | | Path to a binary .pb file contains trained network.}"
"{ width | 320 | Preprocess input image by resizing to a specific width. It should be multiple by 32. }"
"{ height | 320 | Preprocess input image by resizing to a specific height. It should be multiple by 32. }"
"{ thr | 0.5 | Confidence threshold. }"
"{ nms | 0.4 | Non-maximum suppression threshold. }";
void decode(
const Mat& scores,
const Mat& geometry,
float scoreThresh,
std::vector<RotatedRect>& detections, std::vector<float>& confidences);
int main(int argc, char** argv)
{
parser.about("Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)");
if (argc == 1 || parser.has("help"))
{
parser.printMessage();
return 0;
}
float confThreshold = parser.get<float>("thr");
float nmsThreshold = parser.get<float>("nms");
int inpWidth = parser.get<int>("width");
int inpHeight = parser.get<int>("height");
if (!parser.check())
{
parser.printErrors();
return 1;
}
if (parser.has("input"))
else
static const std::string kWinName = "EAST: An Efficient and Accurate Scene Text Detector";
std::vector<Mat> outs;
std::vector<String> outNames(2);
outNames[0] = "feature_fusion/Conv_7/Sigmoid";
outNames[1] = "feature_fusion/concat_3";
{
cap >> frame;
if (frame.empty())
{
break;
}
net.setInput(blob);
net.forward(outs, outNames);
std::vector<RotatedRect> boxes;
std::vector<float> confidences;
decode(scores, geometry, confThreshold, boxes, confidences);
std::vector<int> indices;
NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
Point2f ratio((
float)frame.cols / inpWidth, (
float)frame.rows / inpHeight);
for (size_t i = 0; i < indices.size(); ++i)
{
for (int j = 0; j < 4; ++j)
{
vertices[j].
x *= ratio.x;
vertices[j].
y *= ratio.y;
}
for (int j = 0; j < 4; ++j)
line(frame, vertices[j], vertices[(j + 1) % 4],
Scalar(0, 255, 0), 1);
}
std::vector<double> layersTimes;
double t = net.getPerfProfile(layersTimes) / freq;
std::string label = format("Inference time: %.2f ms", t);
}
return 0;
}
void decode(
const Mat& scores,
const Mat& geometry,
float scoreThresh,
std::vector<RotatedRect>& detections, std::vector<float>& confidences)
{
detections.clear();
const int height = scores.
size[2];
const int width = scores.
size[3];
for (int y = 0; y < height; ++y)
{
const float* scoresData = scores.
ptr<
float>(0, 0, y);
const float* x0_data = geometry.
ptr<
float>(0, 0, y);
const float* x1_data = geometry.
ptr<
float>(0, 1, y);
const float* x2_data = geometry.
ptr<
float>(0, 2, y);
const float* x3_data = geometry.
ptr<
float>(0, 3, y);
const float* anglesData = geometry.
ptr<
float>(0, 4, y);
for (int x = 0; x < width; ++x)
{
float score = scoresData[x];
if (score < scoreThresh)
continue;
float offsetX = x * 4.0f, offsetY = y * 4.0f;
float angle = anglesData[x];
float h = x0_data[x] + x2_data[x];
float w = x1_data[x] + x3_data[x];
Point2f offset(offsetX + cosA * x1_data[x] + sinA * x2_data[x],
offsetY - sinA * x1_data[x] + cosA * x2_data[x]);
detections.push_back(r);
confidences.push_back(score);
}
}
}