Documente Academic
Documente Profesional
Documente Cultură
Calculation of brightness for the previous and current frames, their Gaussian blur with a radius of
5 pixels, calculation of the difference between the received frames (cv :: cvtColor, cv ::
GaussianBlur, cv :: absdiff)
Search for contours of closed areas (cv :: convexHull), initialization of CarDescriptor to track their
movement
Match the objects found in the previous step with the objects found in the current step
(matchCars). Comprises:
prediction of the next position of the object to a maximum of 5 points from the transport history
(predictNextPosition), * searching for an object in the radius sqrt (w ^ 2 + h ^ 2) * 0.5 with respect
to the predicted point,
Delete objects from the list of monitored after their absence for 5 frames.
#define SHOW_STEPS 0
#include <algorithm>
#include <iterator>
#include <vector>
#include "processing.h"
CarDescriptor::CarDescriptor() :
isMatchFound(true),
isCounted(false),
numFramesWithoutMatch(0)
{
// empty
}
void CarDescriptor::predictNextPosition() {
int account = std::min(5, (int)centerPositions.size());
auto prev = centerPositions.rbegin();
auto current = prev;
std::advance(current, 1);
int deltaX = 0, deltaY = 0, sum = 0;
for (int i = 1; i < account; ++i) {
deltaX += (current->x - prev->x) * i;
deltaY += (current->y - prev->y) * i;
sum += i;
}
if (sum > 0) {
deltaX /= sum;
deltaY /= sum;
}
predictedNextPos.x = centerPositions.back().x + deltaX;
predictedNextPos.y = centerPositions.back().y + deltaY;
}
#ifdef SHOW_STEPS
void show(const cv::Size& imageSize, const std::list<CarDescriptor>& cars, const std::string& title)
{
cv::Mat image(imageSize, CV_8UC3, BLACK);
std::vector<std::vector<cv::Point>> contours;
for (auto &car : cars)
contours.push_back(car.contour);
cv::drawContours(image, contours, -1, WHITE, -1);
cv::imshow(title, image);
}
#endif
DetectFilter::DetectFilter(QObject* parent) :
AbstractFilter(parent),
_mutex(QMutex::Recursive),
_carsCount(0)
{
_structuringElement = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
}
std::vector<std::vector<cv::Point>> convexHulls(contours.size());
for (int i = 0; i < contours.size(); i++)
cv::convexHull(contours[i], convexHulls[i]);
#if SHOW_STEPS
show(imgThresh.size(), convexHulls, "convexHulls");
#endif
if (_frameCount <= 2)
_cars.swap(_currentFrameCars);
else {
matchCars(_cars, _currentFrameCars);
}
#if SHOW_STEPS
show(imgThresh.size(), _cars, "trackedCars");
#endif
_prevFrame = currentFrame;
currentFrame = currentFrame.clone();
// prepare visualization
drawCarsInfo(_cars, currentFrame);
#include <QGraphicsScene>
#include <QGraphicsSceneMouseEvent>
#include <QPainter>
GraphicsItemPolylinePoint::~GraphicsItemPolylinePoint() {
delete _menu;
}
void GraphicsItemPolylinePoint::menuDeletePoint() {
emit deletePoint();
}
scene->addItem(this);
_pen.setWidth(3);
_penInverted.setWidth(3);
setAcceptedMouseButtons(Qt::RightButton);
GraphicsItemPolyline::~GraphicsItemPolyline() {
for (auto &pt : _points)
scene()->removeItem(pt.object);
delete _menu;
}
QRectF GraphicsItemPolyline::boundingRect() const {
QMutexLocker lock(&_mutex);
for (auto &item : _points) {
auto rect = item.object->boundingRect();
auto newCenter = item.object->pos();
rect.moveCenter(newCenter);
_boundRect = _boundRect.united(rect);
}
return _boundRect;
}
void GraphicsItemPolyline::itemPosChanged() {
int pos = IndexOfPoint(sender());
if (pos >= 0) {
if (UpdatePoint(pos))
update();
}
}
void GraphicsItemPolyline::menuAddPoint() {
int pos = IndexOfSegment(_menuPoint);
InsertPoint(pos + 1, _menuPoint);
}
void GraphicsItemPolyline::menuDeletePoint() {
QMutexLocker lock(&_mutex);
int pos = IndexOfPoint(sender());
if (_points.size() > 2)
DeletePoint(pos);
}
void GraphicsItemPolyline::menuInvertSegment() {
QMutexLocker lock(&_mutex);
int pos = IndexOfSegment(_menuPoint);
auto &point = _points[pos];
point.invertDirection = !point.invertDirection;
emit segmentsUpdated(segments());
update();
}
#include <QTimerEvent>
#include "QtUtility.h"
// class QtCVImage
#ifdef VIDEOFRAME_SUPPORT
QtCVImage& QtCVImage::operator=(const QVideoFrame& frame) {
_frame = frame;
if (_frame.map(QAbstractVideoBuffer::ReadOnly)) {
QImage::Format format =
QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat());
_image = QImage(frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(),
format);
}
return *this;
}
#endif
// class AbstractFilter
void AbstractFilter::start() {
if (!_timer.isActive()) {
_frameCount = 0;
_timer.start((int)(1001 / 24), this);
}
}
void AbstractFilter::stop() {
_timer.stop();
}
start();
return true;
}
// Find the four orientations needed for general and special cases
int o1 = orientation(p1, q1, p2);
int o2 = orientation(p1, q1, q2);
int o3 = orientation(p2, q2, p1);
int o4 = orientation(p2, q2, q1);
directionDown = o3 == 2;
// General case
if (o1 != o2 && o3 != o4)
return true;
// Special Cases
// p1, q1 and p2 are colinear and p2 lies on segment p1q1
if (o1 == 0 && onSegment(p1, p2, q1)) return true;