@import url(http://www.cppblog.com/CuteSoft_Client/CuteEditor/Load.ashx?type=style&file=SyntaxHighlighter.css);@import url(/css/cuteeditor.css);
买了这个板子有好久了,去年放着没心思搞,今天觉得该干点事了。
我的目标是要使用tx1上的摄像头来抓取视频帧并做识别。
首先,jetson tx-1的板载摄像头是不提供默认v4l2的驱动的,所以我自己给它找了驱动,但是这个驱动是基于gstreamer的。
所以,我整合了一套gstreamer的管道命令,如下:
export CLIENT_IP=127.0.0.1
gst-launch-1.0 nvcamerasrc fpsRange="30 30" intent=3 ! nvvidconv flip-method=6 ! 'video/x-raw(memory:NVMM), width=(int)960, height=(int)540, format=(string)I420, framerate=(fraction)30/1' ! omxh264enc control-rate=2 bitrate=4000000 ! 'video/x-h264, stream-format=(string)byte-stream' ! h264parse ! queue ! omxh264dec ! nvvidconv ! 'video/x-raw, format=(string)UYVY' ! videoconvert ! jpegenc quality=30 ! rtpjpegpay ! udpsink host=$CLIENT_IP port=5000 sync=false async=false
这段代码把摄像头上的内容抓取并压缩为960p的30帧的h264格式的视频流,并通过udp协议丢出到板卡的5000号端口上。
以上是gstreamer的服务器端,接下来,可以将内容抓取下来看看。
客户端的命令我就不写了,直接使用代码。这段代码是从youtube上的jetson的openCV教程组装来的,自己找了一下opencv处理gstreamer的API,搭上就可以用。
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace cv;
int main(int, char**)
{
VideoCapture input("./stream.sdp");
if(!input.isOpened()){ // check if we succeeded
std::cout<< "open failed" << std::endl;
return -1;
}
Mat img, img_gray;
OrbFeatureDetector detector(7000);
vector<KeyPoint> img_keypoints, car_keypoints;
Mat img_descriptors, car_descriptors;
input.read(img);
Mat car;
img(Rect(400, 320, 150, 100)).copyTo(car);
detector(car, Mat(), car_keypoints, car_descriptors);
drawKeypoints(car, car_keypoints, car);
for(;;)
{
if(!input.read(img))
break;
detector(img, Mat(), img_keypoints, img_descriptors);
drawKeypoints(img, img_keypoints, img);
BFMatcher matcher;
vector<DMatch> matches;
matcher.match(car_descriptors, img_descriptors, matches);
vector<Point2f> car_points, img_points;
for(int i=0; i < matches.size(); ++i){
car_points.push_back(car_keypoints[matches[i].queryIdx].pt);
img_points.push_back(img_keypoints[matches[i].queryIdx].pt);
}
std::cout<<"car points count = " << car_points.size() << std::endl;
if(car_points.size() >= 4){
Matx33f H = findHomography(car_points, img_points, CV_RANSAC);
vector<Point> car_border, img_border;
car_border.push_back(Point(0, 0));
car_border.push_back(Point(0, car.rows));
car_border.push_back(Point(car.cols, car.rows));
car_border.push_back(Point(car.cols, 0));
for (size_t i = 0; i < car_border.size(); ++i){
Vec3f p = H * Vec3f(car_border[i].x, car_border[i].y, 1);
img_border.push_back(Point(p[0]/p[2], p[1]/p[2]));
}
polylines(img, img_border, true, CV_RGB(255, 255, 0));
Mat img_matches;
drawMatches(car, car_keypoints, img, img_keypoints, matches, img_matches);
imshow("img_matches", img_matches);
}
// imshow("car", car);
// imshow("img", img);
if(waitKey(27) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
编译的makefile会麻烦一点:先写一个cmake的配置文件CMakeLists.txt
cmake_minimum_required(VERSION 2.8)
project(hello)
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(cv_hello hello.cpp)
target_link_libraries(cv_hello ${OpenCV_LIBS})
执行命令:cmake ./ && make
然后生成cv_hello,可以执行。
关键配置文件是stream.sdp文件,这个文件帮我减少了命令行的麻烦。内容如下:
[stream.sdp]
c=IN IP4 127.0.0.1
m=video 5000 RTP/AVP 96
a=rtpmap:96 JPEG/4000000
全部内容结束。可以看到摄像头的视频内容了