Skip to main content

Yolov5 视觉跟随

项目视频

本项目基于

我的另一篇博客:Yolov5深度相机Demo

前言

在实现对目标的检测时候,获取到物体的在画面中的x、y、depth三个坐标之后,使用PID控制器,让机器人运动保持识别物体在画面的正中心,距离维持在给定值。

视觉部分(PC端)

视觉部分依然是使用了kinect,基于检测Demo:Yolov5深度相机Demo,增加串口发送部分,在计算完成后,将第一个物体的中心x,y,深度距离通过串口发送给下位机。 PC上的源代码如下:

# coding: utf-8
import serial
import numpy as np
import cv2
import sys
import torch
import random
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
import time
from models.yolo import Model

import struct
import serial
global ser

def pack_and_send(x, y, depth, sn):

head = (struct.pack("B", 0xff))
point_num = (struct.pack("B", sn))
x = (struct.pack("f", x))
y = (struct.pack("f", y))
depth = (struct.pack("f", depth))
data = head+point_num+x+y+depth
global ser
result = ser.write(data) # 写数据


def custom(path_or_model='path/to/model.pt'):
model = torch.load(path_or_model) if isinstance(
path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
return hub_model


def get_mid_pos(frame, box, depth_data, randnum):
distance_list = []
mid_pos = [(box[0] + box[2])//2, (box[1] + box[3])//2] # 确定索引深度的中心像素位置
min_val = min(abs(box[2] - box[0]), abs(box[3] - box[1])) # 确定深度搜索范围
for i in range(randnum):
bias = random.randint(-min_val//4, min_val//4)
dist = depth_data[int(mid_pos[1] + bias), int(mid_pos[0] + bias)]
cv2.circle(
frame, (int(mid_pos[0] + bias), int(mid_pos[1] + bias)), 4, (255, 0, 0), -1)
#print(int(mid_pos[1] + bias), int(mid_pos[0] + bias))
if dist:
distance_list.append(dist)
distance_list = np.array(distance_list)
distance_list = np.sort(distance_list)[
randnum//2-randnum//4:randnum//2+randnum//4] # 冒泡排序+中值滤波
#print(distance_list, np.mean(distance_list))
return np.mean(distance_list), mid_pos


def dectshow(org_img, boxs, depth_data, fps):
img = org_img.copy()
cnt=0
for box in boxs:
cv2.rectangle(img, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), (0, 255, 0), 2)
dist, mid_pos = get_mid_pos(org_img, box, depth_data, 24)
dist_m = dist/1000
cv2.circle(img, (int(mid_pos[0]), int(mid_pos[1])), 5, (0, 0, 255), -1)
cv2.putText(img, box[-1] + str(dist / 1000)[:4] + 'm',
(int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(img, str(mid_pos[0]) + 'x' + str(mid_pos[1]),
(int(mid_pos[0]) + 10, int(mid_pos[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.putText(img, 'fps:' + str(round(fps, 1)),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (61, 84, 234), 2)
if cnt==0:
pack_and_send(mid_pos[0], mid_pos[1], dist_m, cnt)
cnt = cnt +1
cv2.imshow('dec_img', img)


def run():
try:
# 端口,GNU / Linux上的/ dev / ttyUSB0 等 或 Windows上的 COM3 等
portx = "/dev/ttyUSB0"
# 波特率,标准值之一:50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,19200,38400,57600,115200
bps = 115200
# 超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
timex = 5
# 打开串口,并得到串口对象
global serial
global ser
ser = serial.Serial(portx, bps, timeout=timex)
print(ser.port) # 获取到当前打开的串口名
print(ser.baudrate) # 获取波特率
except Exception as e:
ser.close() # 关闭串口
print("---异常---:", e)

# model = custom(path_or_model='yolov5l6-box-best.pt')
# model = custom(path_or_model='yolov5x-box-best.pt')
model = custom(path_or_model='yolov5s-box-best.pt')
model = model.autoshape() # for PIL/cv2/np inputs and NMS

try:
from pylibfreenect2 import OpenGLPacketPipeline
pipeline = OpenGLPacketPipeline()
except:
try:
from pylibfreenect2 import OpenCLPacketPipeline
pipeline = OpenCLPacketPipeline()
except:
from pylibfreenect2 import CpuPacketPipeline
pipeline = CpuPacketPipeline()
print("Packet pipeline:", type(pipeline).__name__)
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
sys.exit(1)
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline=pipeline)
listener = SyncMultiFrameListener(
FrameType.Color | FrameType.Ir | FrameType.Depth)
# Register listeners
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
# NOTE: must be called after device.start()
registration = Registration(
device.getIrCameraParams(), device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)

try:
while True:
start = time.time()
# Wait for a coherent pair of frames: depth and color
frames = listener.waitForNewFrame()
color_frame = frames["color"]
depth_frame = frames["depth"]
registration.apply(color_frame, depth_frame, undistorted, registered,
bigdepth=None, color_depth_map=None)
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = cv2.resize(
depth_frame.asarray(), (int(800 / 1), int(400 / 1)))
color_image = cv2.resize(
color_frame.asarray(), (int(800 / 1), int(400 / 1)))
listener.release(frames)
results = model(color_image)
boxs = results.pandas().xyxy[0].values
end = time.time()
seconds = end - start
fps = 1 / seconds
dectshow(color_image, boxs, depth_image, fps)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
# Stop streaming
device.stop()
device.close()
ser.close() # 关闭串口

if __name__ == "__main__":
run()

控制部分(MCU)

MCU部分基于Dji大疆RM-A开发板,使用Rt-thread嵌入式实时操作系统,因此该部分的程序对Rt-thread平台有很好的兼容性,其他平台则需要进行移植。

数据接收

本部分使用串口进行数据的接收和解析

本部分代码流程:

  1. 初始化串口,开启DMA+空闲中断的接收模式,设置空闲中断的回调函数
  2. 在空闲中断的回调函数中,进行数据的解析
  3. 在解析函数中,将数据填入到联合体raw中,其他线程读取val即可得到浮点值
#include <rtthread.h>
#include <rtdevice.h>

typedef enum _vision_frame_e
{
X = 0, //
Y, //
Z, //
} vision_frame_e;

typedef struct
{
uint16_t num;
union
{
uint8_t raw[12];
float val[3];
} point[5];
} vision_frame_t;

#define VF_UART_NAME "uart3"

vision_frame_t vision_frame;
static uint8_t vf_buffer[RT_SERIAL_RB_BUFSZ + 1];

/* 串口句柄及串口设置 */
static rt_device_t vf_serial;
static struct serial_configure vf_serial_config = {
//
.baud_rate = 115200, // 波特率
.data_bits = DATA_BITS_8, // 数据位
.stop_bits = STOP_BITS_1, // 停止位
.bufsz = 128, // 缓冲区大小
.parity = PARITY_NONE, // 校验
.bit_order = BIT_ORDER_LSB // 大小端
};

// 帧头 点个数 数据 校验位
// FF NUM XXXX CS

// 数据 (中心点X 中心点Y 中心点Z) ...(中心点X 中心点Y 中心点Z)...
/* 数据解析及处理函数 */
static void vf_frame_parse(vision_frame_t *vf, uint8_t *buf)
{
if (buf[0] == 0xFF)
{
vf->num = buf[1];
//联合体 共享内存 访问value[]即可获得float数据
rt_memcpy(&(vf->point[0].raw[0]), &(buf[2]), 12);
}
}

/* DMA空闲中断回调函数 */
static rt_err_t vf_dma_callback(rt_device_t dev, rt_size_t size)
{
rt_device_read(dev, 0, vf_buffer, size);
vf_frame_parse(&vision_frame, vf_buffer);
return RT_EOK;
}

int vision_recv_init()
{
rt_err_t ret = RT_EOK;

/* 寻找串口设备 */
vf_serial = rt_device_find(VF_UART_NAME);
if (!vf_serial)
{
rt_kprintf("find %s failed!\n", VF_UART_NAME);
return RT_ERROR;
}
/* 控制串口设备,通过控制接口传入命令控制字,与控制参数 */
ret = rt_device_control(vf_serial, RT_DEVICE_CTRL_CONFIG, &vf_serial_config);
if (ret != RT_EOK)
{
rt_kprintf("rt_device_control failed!\n");
return RT_ERROR;
}
/* 以DMA+空闲中断方式接收 */
ret = rt_device_open(vf_serial, RT_DEVICE_FLAG_DMA_RX);
if (ret != RT_EOK)
{
rt_kprintf("rt_device_open failed!\n");
return RT_ERROR;
}
/* 设置接收回调函数 */
ret = rt_device_set_rx_indicate(vf_serial, vf_dma_callback);
if (ret != RT_EOK)
{
rt_kprintf("rt_device_set_rx_indicate failed!\n");
return RT_ERROR;
}

return ret;
}

运动控制

该部分使用两个PID控制器,通过调整机器人的yaw角,使物体保持在画面的正中心。

通过调整机器人的前进方向的速度,使物体与机器人的距离保持在给定值(2.2m)

以下是伪代码,并非完整,其说明了基本的运算过程

chassis.out.wz = simple_pid_update(&follow_yaw_pid, 400.0f, vision_frame.point[0].val[0], 0.01f);
chassis.out.wz = -float_deadline(chassis.out.wz, -0.01f, 0.01f);
chassis.out.vx = simple_pid_update(&follow_dis_pid, 2.2f, vision_frame.point[0].val[2], 0.01f);
chassis.out.vx = -float_deadline(chassis.out.vx, -0.03f, 0.03f);