OpenCVサンプルメモ
OpenCV-CookBookで紹介されている画像処理をGR-PEACH、GR-LYCHEEで動かす際のメモ。
ベースとなるサンプルはGR-Boards_NonContactMouseで、mbed_app.jsonを下記のように設定して使用する。
mbed_app.json
{
"config": {
"camera":{
"help": "0:disable 1:enable",
"value": "1"
},
"lcd":{
"help": "0:disable 1:enable",
"value": "1"
}
}
}
サンプルのmain.cppをコピペして使うことでその動作を試せる。
画像を平滑化する(ぼかす)
メディアンフィルタを用いた平滑化を行います。
USER_BUTTON0を押すとフィルタの係数が変化します。
main.cpp
#include "mbed.h"
#include "EasyAttach_CameraAndLCD.h"
#include "opencv.hpp"
#define VIDEO_PIXEL_HW (480u) /* WQVGA */
#define VIDEO_PIXEL_VW (272u) /* WQVGA */
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define DATA_SIZE_PER_PIC (4u)
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * DATA_SIZE_PER_PIC) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
static uint8_t FrameBuffer_Video[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Lcd[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static DisplayBase Display;
static cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC4, FrameBuffer_Video);
static cv::Mat dst_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC4, FrameBuffer_Lcd);
static InterruptIn btn0(USER_BUTTON0);
static int btn0_num = 1;
static void camera_start(void) {
// Camera
EasyAttach_Init(Display, VIDEO_PIXEL_HW, VIDEO_PIXEL_VW);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)FrameBuffer_Video,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_RGB888,
DisplayBase::WR_RD_WRSWA_32BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
static void lcd_start(void) {
DisplayBase::rect_t rect;
// GRAPHICS_LAYER_0
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
(void *)&dst_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_RGB888,
DisplayBase::WR_RD_WRSWA_32BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
static void btn0_fall(void) {
if (btn0_num < 30) {
btn0_num += 2;
} else {
btn0_num = 1;
}
printf("pram:%d\r\n", btn0_num);
}
int main() {
// button setting
btn0.fall(&btn0_fall);
// カメラの取り込み開始
camera_start();
// LCD表示の開始
lcd_start();
while (1) {
cv::medianBlur(src_img, dst_img, btn0_num);
}
}
楕円フィッティングを行う
画像から輪郭を検出し、その輪郭に対して楕円フィッティングを行う。
USER_BUTTON0を押すとLCD表示が「カメラ画像 -> グレースケール -> 2値化データ」の順に変化します。
main.cpp
#include "mbed.h"
#include "EasyAttach_CameraAndLCD.h"
#include "opencv.hpp"
#define VIDEO_PIXEL_HW (480u) /* WQVGA */
#define VIDEO_PIXEL_VW (272u) /* WQVGA */
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define DATA_SIZE_PER_PIC (2u)
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * DATA_SIZE_PER_PIC) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
static uint8_t FrameBuffer_Video[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Lcd[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static DisplayBase Display;
static cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Video);
static cv::Mat dst_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Lcd);
static cv::Mat result_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result);
static InterruptIn btn0(USER_BUTTON0);
static int btn0_type = 0;
static bool btn0_change = false;
#define BTN0_TYPE_MAX (2)
static void camera_start(void) {
// Camera
EasyAttach_Init(Display, VIDEO_PIXEL_HW, VIDEO_PIXEL_VW);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)FrameBuffer_Video,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
static void lcd_start(void) {
DisplayBase::rect_t rect;
// GRAPHICS_LAYER_0
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
(void *)&src_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
// GRAPHICS_LAYER_2
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_2,
(void *)&result_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_ARGB4444,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_2);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
static void convert_gray2yuv422(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (uint32_t i = 0; i < size; i++) {
dst.data[idx++] = src.data[i];
dst.data[idx++] = 0x80;
}
}
static void btn0_fall(void) {
if (btn0_type < BTN0_TYPE_MAX) {
btn0_type++;
} else {
btn0_type = 0;
}
btn0_change = true;
}
int main() {
cv::Mat gray_img;
cv::Mat bin_img;
std::vector<std::vector<cv::Point> > contours;
// button setting
btn0.fall(&btn0_fall);
// カメラの取り込み開始
camera_start();
// LCD表示の開始
lcd_start();
while (1) {
// グレースケール化
cv::cvtColor(src_img, gray_img, cv::COLOR_YUV2GRAY_YUY2);
// 画像の二値化
cv::threshold(gray_img, bin_img, 0, 255, cv::THRESH_BINARY|cv::THRESH_OTSU);
// 輪郭の検出
cv::findContours(bin_img, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
// 検出結果バッファのクリア
memset(FrameBuffer_Result, 0, sizeof(FrameBuffer_Result));
// 検出結果の描画
for (int i = 0; i < contours.size(); i++) {
size_t count = contours[i].size();
if (count < 150 || count > 1000) continue; // (小さすぎる|大きすぎる)輪郭を除外
cv::Mat pointsf;
cv::Mat(contours[i]).convertTo(pointsf, CV_32F);
// 楕円フィッティング
cv::RotatedRect box = cv::fitEllipse(pointsf);
// 楕円の描画 (BGRA4444 を8bit2チャンネルと見立てる)
cv::ellipse(result_img, box, cv::Scalar(0x00, 0xFF), 2, cv::LINE_AA);
}
// カメラ画像の描画
switch (btn0_type) {
case 0: // 元データ
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &src_img.data[0]);
}
break;
case 1: // グレースケール
convert_gray2yuv422(gray_img, dst_img);
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &dst_img.data[0]);
}
break;
case 2: // 二値化データ
convert_gray2yuv422(bin_img, dst_img);
break;
default:
break;
}
}
}
輪郭の検出
USER_BUTTON0を押すとLCD表示が「カメラ画像 -> グレースケール -> 2値化データ」の順に変化します。
main.cpp
#include "mbed.h"
#include "EasyAttach_CameraAndLCD.h"
#include "opencv.hpp"
#define VIDEO_PIXEL_HW (480u) /* WQVGA */
#define VIDEO_PIXEL_VW (272u) /* WQVGA */
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define DATA_SIZE_PER_PIC (2u)
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * DATA_SIZE_PER_PIC) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
static uint8_t FrameBuffer_Video[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Lcd[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result0[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result1[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static DisplayBase Display;
static cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Video);
static cv::Mat dst_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Lcd);
static cv::Mat result_img0(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result0);
static cv::Mat result_img1(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result1);
static int result_idx = 0;
static cv::Mat * p_result_img[2] = {&result_img0, &result_img1};
static InterruptIn btn0(USER_BUTTON0);
static int btn0_type = 0;
static bool btn0_change = false;
#define BTN0_TYPE_MAX (2)
static void camera_start(void) {
// Camera
EasyAttach_Init(Display, VIDEO_PIXEL_HW, VIDEO_PIXEL_VW);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)FrameBuffer_Video,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
static void lcd_start(void) {
DisplayBase::rect_t rect;
// GRAPHICS_LAYER_0
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
(void *)&src_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
// GRAPHICS_LAYER_2
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_2,
(void *)&result_img0.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_ARGB4444,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_2);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
static void convert_gray2yuv422(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (uint32_t i = 0; i < size; i++) {
dst.data[idx++] = src.data[i];
dst.data[idx++] = 0x80;
}
}
static void btn0_fall(void) {
if (btn0_type < BTN0_TYPE_MAX) {
btn0_type++;
} else {
btn0_type = 0;
}
btn0_change = true;
}
int main() {
cv::Mat gray_img;
cv::Mat bin_img;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
// button setting
btn0.fall(&btn0_fall);
// カメラの取り込み開始
camera_start();
// LCD表示の開始
lcd_start();
while (1) {
// グレースケール化
cv::cvtColor(src_img, gray_img, cv::COLOR_YUV2GRAY_YUY2);
// 画像の2値化
cv::threshold(gray_img, bin_img, 0, 255, cv::THRESH_BINARY|cv::THRESH_OTSU);
// 輪郭検出 (2値画像,輪郭(出力),階層構造(出力),輪郭抽出モード,輪郭の近似手法)
cv::findContours(bin_img, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
// 検出結果バッファのクリア
*p_result_img[result_idx] = cv::Scalar(0x00, 0x00);
/// 輪郭の描画 (画像,輪郭,描画輪郭指定インデックス,色,太さ,種類,階層構造,描画輪郭の最大レベル)
cv::drawContours(*p_result_img[result_idx], contours, -1, cv::Scalar(0xBE, 0xA0), 2, 8, hierarchy, 1);
// 検出結果の表示
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_2, (void *)(*p_result_img[result_idx]).data);
// 検出結果バッファの切り替え
result_idx ^= 1;
// カメラ画像の描画
switch (btn0_type) {
case 0: // カメラ画像
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &src_img.data[0]);
}
break;
case 1: // グレースケール
convert_gray2yuv422(gray_img, dst_img);
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &dst_img.data[0]);
}
break;
case 2: // 2値化データ
convert_gray2yuv422(bin_img, dst_img);
break;
default:
break;
}
}
}
微分画像・エッジ画像を求める
USER_BUTTON0を押すとLCD表示が「カメラ画像 -> Cannyエッジ -> カメラ画像とCannyエッジの重ね画像」の順に変化します。
main.cpp
#include "mbed.h"
#include "EasyAttach_CameraAndLCD.h"
#include "opencv.hpp"
#include "dcache-control.h"
#define VIDEO_PIXEL_HW (480u) /* WQVGA */
#define VIDEO_PIXEL_VW (272u) /* WQVGA */
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define DATA_SIZE_PER_PIC (2u)
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * DATA_SIZE_PER_PIC) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
static uint8_t FrameBuffer_Video[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((aligned(32)));
static DisplayBase Display;
static cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Video);
static cv::Mat result_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result);
static InterruptIn btn0(USER_BUTTON0);
static int btn0_type = 0;
static bool btn0_change = false;
#define BTN0_TYPE_MAX (2)
static void camera_start(void) {
// Camera
EasyAttach_Init(Display, VIDEO_PIXEL_HW, VIDEO_PIXEL_VW);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)FrameBuffer_Video,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
static void lcd_start(void) {
DisplayBase::rect_t rect;
// GRAPHICS_LAYER_0
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
(void *)&src_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
// GRAPHICS_LAYER_2
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_2,
(void *)&result_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_ARGB4444,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_2);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
static void convert_bin2RGB4444(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (uint32_t i = 0; i < size; i++) {
if (src.data[i] != 0) {
dst.data[idx++] = 0x0F; // G:4 B:4
dst.data[idx++] = 0xF0; // A:4 R:4
} else {
dst.data[idx++] = 0x00; // G:4 B:4
dst.data[idx++] = 0x00; // A:4 R:4
}
}
}
static void convert_bin2RGB4444_mono(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (uint32_t i = 0; i < size; i++) {
if (src.data[i] != 0) {
dst.data[idx++] = 0xFF; // G:4 B:4
dst.data[idx++] = 0xFF; // A:4 R:4
} else {
dst.data[idx++] = 0x00; // G:4 B:4
dst.data[idx++] = 0xF0; // A:4 R:4
}
}
}
static void btn0_fall(void) {
if (btn0_type < BTN0_TYPE_MAX) {
btn0_type++;
} else {
btn0_type = 0;
}
btn0_change = true;
}
int main() {
cv::Mat gray_img;
cv::Mat canny_img;
memset(&result_img.data[0], 0, FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT);
dcache_clean(&result_img.data[0], FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT);
// button setting
btn0.fall(&btn0_fall);
// カメラの取り込み開始
camera_start();
// LCD表示の開始
lcd_start();
while (1) {
// カメラ画像の描画
switch (btn0_type) {
case 0: // カメラ画像のみ
if (btn0_change) {
btn0_change = false;
memset(&result_img.data[0], 0, FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT);
dcache_clean(&result_img.data[0], FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT);
}
break;
case 1: // Cannyのみ
case 2: // カメラ画像とCannyの重ね
// グレースケール化
cv::cvtColor(src_img, gray_img, cv::COLOR_YUV2GRAY_YUY2);
// Canny
cv::Canny(gray_img, canny_img, 100, 200);
if (btn0_type == 1) {
convert_bin2RGB4444_mono(canny_img, result_img);
} else {
convert_bin2RGB4444(canny_img, result_img);
}
dcache_clean(&result_img.data[0], FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT);
break;
default:
break;
}
}
}
人検出
少し改造しました。
main.cpp
#include "mbed.h"
#include "EasyAttach_CameraAndLCD.h"
#include "opencv.hpp"
#define VIDEO_PIXEL_HW (480u) /* WQVGA */
#define VIDEO_PIXEL_VW (272u) /* WQVGA */
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define DATA_SIZE_PER_PIC (2u)
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * DATA_SIZE_PER_PIC) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
static uint8_t FrameBuffer_Video[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Lcd[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result0[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static uint8_t FrameBuffer_Result1[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
static DisplayBase Display;
static cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Video);
static cv::Mat dst_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Lcd);
static cv::Mat result_img0(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result0);
static cv::Mat result_img1(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Result1);
static int result_idx = 0;
static cv::Mat * p_result_img[2] = {&result_img0, &result_img1};
static InterruptIn btn0(USER_BUTTON0);
static int btn0_type = 0;
static bool btn0_change = false;
#define BTN0_TYPE_MAX (1)
static void camera_start(void) {
// Initialize the background to black
for (uint32_t i = 0; i < sizeof(FrameBuffer_Video); i += 2) {
FrameBuffer_Video[i + 0] = 0x10;
FrameBuffer_Video[i + 1] = 0x80;
}
// Camera
EasyAttach_Init(Display, VIDEO_PIXEL_HW, VIDEO_PIXEL_VW);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)FrameBuffer_Video,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
static void lcd_start(void) {
DisplayBase::rect_t rect;
// GRAPHICS_LAYER_0
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
(void *)&src_img.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
// GRAPHICS_LAYER_2
rect.vs = 0;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = 0;
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_2,
(void *)&result_img0.data[0],
FRAME_BUFFER_STRIDE,
DisplayBase::GRAPHICS_FORMAT_ARGB4444,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_2);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
static void convert_gray2yuv422(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (uint32_t i = 0; i < size; i++) {
dst.data[idx++] = src.data[i];
dst.data[idx++] = 0x80;
}
}
static void btn0_fall(void) {
if (btn0_type < BTN0_TYPE_MAX) {
btn0_type++;
} else {
btn0_type = 0;
}
btn0_change = true;
}
int main() {
cv::Mat gray_img;
// button setting
btn0.fall(&btn0_fall);
// カメラの取り込み開始
camera_start();
// LCD表示の開始
lcd_start();
cv::HOGDescriptor hog;
// hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
hog.winSize = cv::Size(48, 96);
hog.setSVMDetector(hog.getDaimlerPeopleDetector());
double scale = 2.0;
cv::Mat smallImg(cv::saturate_cast<int>(src_img.rows/scale), cv::saturate_cast<int>(src_img.cols/scale), CV_8UC1);
while (1) {
// グレースケール化
cv::cvtColor(src_img, gray_img, cv::COLOR_YUV2GRAY_YUY2);
// 処理時間短縮のために画像を縮小
cv::resize(gray_img, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR);
cv::equalizeHist(smallImg, smallImg);
std::vector<cv::Rect> found;
// hog.detectMultiScale(smallImg, found, 0, cv::Size(8,8), cv::Size(0,0), 1.05, 5);
hog.detectMultiScale(smallImg, found);
// 検出結果バッファのクリア
*p_result_img[result_idx] = cv::Scalar(0x00, 0x00);
std::vector<cv::Rect>::const_iterator it = found.begin();
for (; it!=found.end(); ++it) {
cv::Rect r = *it;
// 描画に際して,検出矩形を若干小さくする
r.x = cvRound(r.x*scale + r.width*scale*0.15);
r.width = cvRound(r.width*scale*0.7);
r.y = cvRound(r.y*scale + r.height*scale*0.10);
r.height = cvRound(r.height*scale*0.8);
cv::rectangle(*p_result_img[result_idx], r.tl(), r.br(), cv::Scalar(0x0F, 0xA0), 2);
}
// 検出結果の表示
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_2, (void *)(*p_result_img[result_idx]).data);
// 検出結果バッファの切り替え
result_idx ^= 1;
// カメラ画像の描画
switch (btn0_type) {
case 0: // 元データ
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &src_img.data[0]);
}
break;
case 1: // グレースケール
convert_gray2yuv422(gray_img, dst_img);
if (btn0_change) {
btn0_change = false;
Display.Graphics_Read_Change(DisplayBase::GRAPHICS_LAYER_0, &dst_img.data[0]);
}
break;
default:
break;
}
}
}
非接触マウス(指検出版)
手を握る(グー)と「マウス左ボタン押し込み」、手を開く(パー)と「マウス左ボタンを離す」動作を行います。
ソース中のコメントアウト箇所を有効にすると、使えるようになります。
コメントアウト箇所
// mouse.press(MOUSE_LEFT); // mouse.release(MOUSE_LEFT);
誤作動が多いので、自己責任でご利用ください。(PCで試すとファイルがどこかに行ってしまったりする)
main.cpp
#include "mbed.h"
#include "opencv.hpp"
#include "EasyAttach_CameraAndLCD.h"
#include "USBMouse.h"
#define PLOT_INTERVAL (30)
#define DIST_SCALE_FACTOR_X (6.0)
#define DIST_SCALE_FACTOR_Y (6.0)
/*! Frame buffer stride: Frame buffer stride should be set to a multiple of 32 or 128
in accordance with the frame buffer burst transfer mode. */
#define VIDEO_PIXEL_HW (160u) /* HQVGA */
#define VIDEO_PIXEL_VW (120u) /* HQVGA */
#define FRAME_BUFFER_STRIDE (((VIDEO_PIXEL_HW * 4) + 31u) & ~31u)
#define FRAME_BUFFER_HEIGHT (VIDEO_PIXEL_VW)
#if defined(__ICCARM__)
#pragma data_alignment=32
static uint8_t user_frame_buffer0[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]@ ".mirrorram";
#else
static uint8_t user_frame_buffer0[FRAME_BUFFER_STRIDE * FRAME_BUFFER_HEIGHT]__attribute((section("NC_BSS"),aligned(32)));
#endif
static volatile int Vfield_Int_Cnt = 0;
DisplayBase Display;
DigitalOut led1(LED1);
USBMouse mouse;
static void IntCallbackFunc_Vfield(DisplayBase::int_type_t int_type) {
if (Vfield_Int_Cnt > 0) {
Vfield_Int_Cnt--;
}
}
static void wait_new_image(void) {
while (Vfield_Int_Cnt > 0) {
Thread::wait(1);
}
Vfield_Int_Cnt = 1;
}
static void Start_Video_Camera(void) {
// Field end signal for recording function in scaler 0
Display.Graphics_Irq_Handler_Set(DisplayBase::INT_TYPE_S0_VFIELD, 0, IntCallbackFunc_Vfield);
// Video capture setting (progressive form fixed)
Display.Video_Write_Setting(
DisplayBase::VIDEO_INPUT_CHANNEL_0,
DisplayBase::COL_SYS_NTSC_358,
(void *)user_frame_buffer0,
FRAME_BUFFER_STRIDE,
DisplayBase::VIDEO_FORMAT_RGB888,
DisplayBase::WR_RD_WRSWA_32BIT,
VIDEO_PIXEL_VW,
VIDEO_PIXEL_HW
);
EasyAttach_CameraStart(Display, DisplayBase::VIDEO_INPUT_CHANNEL_0);
}
#if MBED_CONF_APP_LCD
static void convert_gray2yuv422(cv::Mat& src, cv::Mat& dst) {
uint32_t src_size = src.cols * src.rows;
uint32_t dst_size = dst.cols * dst.rows;
uint32_t size;
uint32_t idx = 0;
if (src_size < dst_size) {
size = src_size;
} else {
size = dst_size;
}
for (int i = 0; i < size; i++) {
dst.data[idx++] = src.data[i];
dst.data[idx++] = 0x80;
}
}
static uint8_t FrameBuffer_Lcd[FRAME_BUFFER_STRIDE/2 * FRAME_BUFFER_HEIGHT]__attribute((aligned(32)));
static cv::Mat dst_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC2, FrameBuffer_Lcd);
static void Start_LCD_Display(void) {
DisplayBase::rect_t rect;
rect.vs = (LCD_PIXEL_HEIGHT - VIDEO_PIXEL_VW) / 2;
rect.vw = VIDEO_PIXEL_VW;
rect.hs = (LCD_PIXEL_WIDTH - VIDEO_PIXEL_HW);
rect.hw = VIDEO_PIXEL_HW;
Display.Graphics_Read_Setting(
DisplayBase::GRAPHICS_LAYER_0,
&dst_img.data[0],
FRAME_BUFFER_STRIDE/2,
DisplayBase::GRAPHICS_FORMAT_YCBCR422,
DisplayBase::WR_RD_WRSWA_32_16BIT,
&rect
);
Display.Graphics_Start(DisplayBase::GRAPHICS_LAYER_0);
Thread::wait(50);
EasyAttach_LcdBacklight(true);
}
#endif
int main() {
cv::Mat prev_image;
cv::Mat curr_image;
std::vector<cv::Point2f> prev_pts;
std::vector<cv::Point2f> curr_pts;
cv::Point2f point;
int16_t x = 0;
int16_t y = 0;
EasyAttach_Init(Display);
Start_Video_Camera();
#if MBED_CONF_APP_LCD
Start_LCD_Display();
#endif
// Initialization of optical flow
point.y = (VIDEO_PIXEL_VW / 2) + (PLOT_INTERVAL * 1);
for (int32_t i = 0; i < 3; i++) {
point.x = (VIDEO_PIXEL_HW / 2) - (PLOT_INTERVAL * 1);
for (int32_t j = 0; j < 3; j++) {
prev_pts.push_back(point);
point.x += PLOT_INTERVAL;
}
point.y -= PLOT_INTERVAL;
}
cv::Mat hsv, skin;
bool click_last = false;
int close_cnt = 0;
int open_cnt = 0;
int init_cnt = 0;
int click_wait = 0;
while (1) {
// Wait for image input
wait_new_image();
// Convert from YUV422 to grayscale
cv::Mat src_img(VIDEO_PIXEL_VW, VIDEO_PIXEL_HW, CV_8UC4, user_frame_buffer0);
cv::cvtColor(src_img, curr_image, cv::COLOR_BGR2GRAY);
cv::cvtColor(src_img, hsv, CV_BGR2HSV);
cv::inRange(hsv, cv::Scalar(0, 20, 20), cv::Scalar(25, 255, 255), skin);
// 抜き出した部分をなめらかにする処理
cv::Mat structElem = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(skin, skin, cv::MORPH_CLOSE, structElem);
// 最も長い輪郭線を選ぶ
vector<vector<cv::Point> > contours;
vector<cv::Vec4i> hierarchy;
cv::findContours(skin, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
double largestArea = 0.0;
int largestContourIndex = 0;
for (int i = 0; i < contours.size(); ++i) {
double a = cv::contourArea(contours[i], false);
if (a > largestArea) {
largestArea = a;
largestContourIndex = i;
}
}
// 欠けた部分を求める
vector<vector<int> > hulls (1);
cv::convexHull(contours[largestContourIndex], hulls[0], false, false);
std::vector<cv::Vec4i> defects;
cv::convexityDefects(contours[largestContourIndex], hulls[0], defects);
// 小さいものや離れすぎているものを除いて指の数を数える
int fingerCount = 0;
for (int i = 0; i< defects.size(); i++) {
int start_index = defects[i][0];
CvPoint start_point = contours[largestContourIndex][start_index];
int end_index = defects[i][1];
CvPoint end_point = contours[largestContourIndex][end_index];
double d1 = (end_point.x - start_point.x);
double d2 = (end_point.y - start_point.y);
double distance = sqrt((d1*d1)+(d2*d2));
int depth = defects[i][3]/1000;
if (depth > 5 && distance > 2.0 && distance < 100.0) {
fingerCount++;
}
}
// グーorパー
if (init_cnt < 100) {
init_cnt++; // 輝度安定待ち
} else if ((click_last == false) && (fingerCount <= 1)) {
open_cnt = 0;
close_cnt++;
if (close_cnt >= 3) {
click_last = true;
// mouse.press(MOUSE_LEFT);
printf("Close\r\n");
close_cnt = 0;
click_wait = 10;
}
} else if ((click_last != false) && (fingerCount >= 2)) {
close_cnt = 0;
open_cnt++;
if (open_cnt >= 3) {
click_last = false;
// mouse.release(MOUSE_LEFT);
printf("Open\r\n");
open_cnt = 0;
click_wait = 10;
}
} else {
close_cnt = 0;
open_cnt = 0;
}
convert_gray2yuv422(skin, dst_img);
if ((close_cnt != 0) || (open_cnt != 0)) {
continue;
}
if (click_wait > 0) {
click_wait--;
continue;
}
point = cv::Point2f(0, 0);
if ((!curr_image.empty()) && (!prev_image.empty())) {
// Optical flow
std::vector<uchar> status;
std::vector<float> err;
cv::calcOpticalFlowPyrLK(prev_image, curr_image, prev_pts, curr_pts, status, err, cv::Size(21, 21), 0);
// Setting movement distance of feature point
std::vector<cv::Scalar> samples;
for (size_t i = 0; i < (size_t)status.size(); i++) {
if (status[i]) {
cv::Point2f vec = curr_pts[i] - prev_pts[i];
cv::Scalar sample = cv::Scalar(vec.x, vec.y);
samples.push_back(sample);
}
}
// Mean and standard deviation
if (samples.size() >= 6) {
cv::Scalar mean;
cv::Scalar stddev;
cv::meanStdDev((cv::InputArray)samples, mean, stddev);
// printf("%d, stddev=%lf, %lf\r\n", samples.size(), stddev[0], stddev[1]); // for debug
if ((stddev[0] < 10.0) && (stddev[1] < 10.0)) {
point.x = mean[0];
point.y = mean[1];
}
}
}
cv::swap(prev_image, curr_image);
x = (int16_t)(point.x * DIST_SCALE_FACTOR_X) * -1;
y = (int16_t)(point.y * DIST_SCALE_FACTOR_Y);
if ((x != 0) || (y != 0)) {
led1 = 1;
// printf("x=%d, y=%d\r\n", x, y); // for debug
mouse.move(x, y);
} else {
led1 = 0;
}
}
}
Please log in to post comments.
