paddleDetection前置

环境

GPU

  • NVIDIA 3090*2

  • 显卡驱动 515.43.04

  • CUDA版本 11.7

  • CUDAtoolkit (cuda_11.7.0_515.43.04_linux)

  • cuDNN (v8.4.1)

  • paddlepaddle 多卡训练需要NCLL支持 (ncll v2.12.12 cuda11.7)

paddlepaddle版本

  • v2.4.2

paddleDetection版本

  • v2.6.0

python环境

  • CentOS7.9

  • anaconda3

  • python3.8

普通视频处理

h264格式视频被opencv解析帧率超过65535报错

源码:

./deploy/pipeline/pipeline.py predict_video

1
2
3
out_path = os.path.join(self.output_dir, video_out_name + ".mp4")
fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))

输入:cv2.VideoCapture()

输出:cv2.VideoWriter()

本GPU模式下对时长5分钟的行人检测视频处理时间约20分钟(高精度模型),视频体积增大13倍(100M->1.3G)

视频流

问题

  1. h264格式存在问题(需要重新编译opencv),实时帧数过高(>65535)cv2.VideoWriter报错,无法保存视频

    问题原因:libx264基于GPLffmpeg编码器要使用libx264,必须--enable-gpl,而opencv使用的是MIT许可

  2. 如果视频流传输加载较慢,获取不到视频报错(修改pipeline.py predict_video)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    class FrameQueueEmptyException(Exception):
    """An Exception for check frame queue"""

    def __init__(self, time):
    self.time = time

    def __str__(self):
    print(f"{self.time}秒无法获取frame队列,请检查摄像头是否正常运转")

    for i in range(1, 11):
    if framequeue.empty():
    time.sleep(1)
    print(f"Couldn't catch framequeue for {i} seconds")
    else:
    break
    if i == 10:
    raise FrameQueueEmptyException

摄像头 & 应用

liveCMS应用为例(国标设备见文档:LiveGBS国标GB-T28181视频流媒体平台)

每26天需更新一次

  • liveCMS应用安装在192.168.9.165机器

  • 国标设备服务端口:192.168.9.165:10005账号:admin,密码:A12345678

  • 近端设备地址:192.168.9.207

  • 设备主板ip:192.168.177.177账户:root,密码:admin

  • 摄像头设备地址:192.168.1.131192.168.1.133

摄像头推流

摄像头推流api:http://<livecms_host>:<ip>/api/v1/stream/list

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
{
"StreamCount": 1,
"Streams": [
{
"AudioEnable": false,
"CDN": "",
"CascadeSize": 0,
"ChannelID": "34020000001320000131",
"ChannelName": "192.168.1.131",
"CloudRecord": false,
"DeviceID": "34020000002000000207",
"Duration": 20520,
"FLV": "http://192.168.9.165:10005/sms/34020000002020000001/flv/hls/34020000002000000207_34020000001320000131.flv",
"HLS": "http://192.168.9.165:10005/sms/34020000002020000001/hls/34020000002000000207_34020000001320000131/live.m3u8",
"InBitRate": 914,
"InBytes": 2196788210,
"NumOutputs": 1,
"Ondemand": true,
"OutBytes": 2308731853,
"RTMP": "rtmp://192.168.9.165:11935/hls/34020000002000000207_34020000001320000131",
"RTPCount": 2049812,
"RTPLostCount": 3526,
"RTPLostRate": 0.6256656017039404,
"RTSP": "rtsp://192.168.9.165:554/34020000002000000207_34020000001320000131",
"RecordStartAt": "",
"RelaySize": 0,
"SMSID": "34020000002020000001",
"SnapURL": "/sms/34020000002020000001/snap/34020000002000000207/34020000001320000131.jpg?t=1690332332290541800",
"SourceAudioCodecName": "aac",
"SourceAudioSampleRate": 8000,
"SourceVideoCodecName": "h264",
"SourceVideoFrameRate": 30,
"SourceVideoHeight": 720,
"SourceVideoWidth": 1280,
"StartAt": "2023-07-26 08:45:30",
"StreamID": "stream:34020000002000000207:34020000001320000131",
"Transport": "UDP",
"VideoFrameCount": 605632,
"WEBRTC": "webrtc://192.168.9.165:10005/sms/34020000002020000001/rtc/34020000002000000207_34020000001320000131",
"WS_FLV": "ws://192.168.9.165:10005/sms/34020000002020000001/ws-flv/hls/34020000002000000207_34020000001320000131.flv"
}
]
}

执行paddleDetection –rtst指令

1
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --rtsp http://192.168.9.165:10005/sms/34020000002020000001/flv/hls/34020000002000000207_34020000001320000131.flv --device=gpu

解决rtsp视频流中断问题(样例)

pipline.py - class PipePredictor

1
self.rtsp = args.rtsp

pipline.py - predict_video

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
while True:
flag = False
if not framequeue.empty():
...
else:
if self.rtsp:
count = 1
capture = cv2.VideoCapture(video_file)
try:
fps = int(capture.get(cv2.CAP_PROP_FPS))
except OverflowError:
fps = 20
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("restart camera fps: %d, frame_count: %d" % (fps, frame_count))

framequeue = queue.Queue(10)
thread = threading.Thread(
target=self.capturevideo, args=(capture, framequeue, execute_time))
thread.start()
time.sleep(1)
while True:

if framequeue.empty():
time.sleep(1)
print(f"Couldn't catch framequeue for {count} seconds")
count += 1
if count == 11:
flag = True
break
else:
break
else:
flag = True
if self.dbconfig:
if self.dbconfig.rtsp:
frame_thread_ = threading.Thread(
target=self.video_composition, args=(self.frame_dir, self.out_path, video_fps)
)
if not os.listdir(os.path.dirname(self.out_path)):
if not self.frame_flag:
frame_thread_.start()
if flag:
break

使用Python脚本

pipeline/pipeline.py

在对应config文件中将MOT的tracker_config修改为绝对路径

1
2
3
4
5
MOT:
model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip
tracker_config: /exp/work/video/PaddleDetection/deploy/pipeline/config/tracker_config.yml
batch_size: 1
enable: True

修改pipeline.py启动脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
if __name__ == '__main__':
paddle.enable_static()

# parse params from command
parser = argsparser()
FLAGS = parser.parse_args()
FLAGS.device = 'gpu'
FLAGS.config = '/exp/work/video/PaddleDetection/deploy/pipeline/config/examples/infer_cfg_human_attr.yml'
FLAGS.video_file = '/exp/work/video/PaddleDetection/demo_input/t2.mp4'
FLAGS.output_dir = '/exp/work/video/PaddleDetection/demo_output/'
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU', 'NPU'
], "device should be CPU, GPU, XPU or NPU"

main()

修改cfg_utils.py argsparser;parse_args,修改和注释掉config作为必要条件

1
2
3
4
5
6
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
# assert args.config is not None, \
# "Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
1
2
3
4
5
6
parser.add_argument(
"--config",
type=str,
default=None,
help=("Path of configure"),
required=False) # True ——> False

启动脚本:

1
python pipeline.py

或将其封装为接口

python/infer.py

导出预测模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 导出YOLOv3检测模型
python tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --output_dir=./inference_model \
-o weights=https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams

# 导出HigherHRNet(bottom-up)关键点检测模型
python tools/export_model.py -c configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml -o weights=https://paddledet.bj.bcebos.com/models/keypoint/higherhrnet_hrnet_w32_512.pdparams

# 导出HRNet(top-down)关键点检测模型
python tools/export_model.py -c configs/keypoint/hrnet/hrnet_w32_384x288.yml -o weights=https://paddledet.bj.bcebos.com/models/keypoint/hrnet_w32_384x288.pdparams

# 导出FairMOT多目标跟踪模型
python tools/export_model.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams

# 导出ByteTrack多目标跟踪模型(相当于只导出检测器)
python tools/export_model.py -c configs/mot/bytetrack/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/ppyoloe_crn_l_36e_640x640_mot17half.pdparams

预测

修改deploy/python/infer.py启动脚本

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
FLAGS.device = 'gpu'
FLAGS.video_file = '/exp/work/video/PaddleDetection/demo_input/t2.mp4'
FLAGS.output_dir = '/exp/work/video/PaddleDetection/demo_output/'
FLAGS.model_dir = '/exp/work/video/PaddleDetection/inference_model/yolov3_darknet53_270e_coco'
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU', 'NPU'
], "device should be CPU, GPU, XPU or NPU"
assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device"

assert not (
FLAGS.enable_mkldnn == False and FLAGS.enable_mkldnn_bfloat16 == True
), 'To enable mkldnn bfloat, please turn on both enable_mkldnn and enable_mkldnn_bfloat16'

main()

pp-human

行人检测模块,对应配置文件位置:deploy/pipeline/config/examples/infer_cfg_pphuman.yml

通过命令启动检测脚本

1
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --device=gpu --video_file=demo_input/t1.mp4 --output_dir=demo_output/

pp-vehicle

车辆检测模块,对应配置文件位置:deploy/pipeline/config/examples/infer_cfg_ppvehicle.yml

通过命令启动检测脚本

1
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml --video_file=demo_input/car.mp4 --device=gpu --output_dir=demo_output

Powered by Hexo and Hexo-theme-hiker

Copyright © 2017 - 2024 青域 All Rights Reserved.

UV : | PV :