yolov8-火灾检测

环境

GPU

  • NVIDIA 3090*2
  • 显卡驱动 535.104.05
  • CUDA版本 12.2
  • CUDAtoolkit (cuda_12.2.2_535.104.05_linux)
  • cuDNN (v8.9.7)

yolo版本

  • v8.1.5 (ultralytics yolov8)

pytorch版本

  • v2.1.2

python环境

  • CentOS7.9
  • anaconda3
  • python3.9

安装

源码主页:https://github.com/ultralytics/ultralytics

官方文档:https://docs.ultralytics.com/zh

克隆源码

1
git clone https://hub.nuaa.cf/ultralytics/ultralytics.git

安装依赖

1
pip install pip install ultralytics -i https://mirror.baidu.com/pypi/simple

环境验证

python

1
2
import ultralytics
ultralytics.checks()

cli

1
yolo predict model=yolov8n.pt source=ultralytics/assets/zidane.jpg

执行完毕后得到输出的结果如下:

1
2
3
4
5
6
7
8
(py39_yolov8) [root@jdz yolov8]# yolo predict model=yolov8n.pt source=ultralytics/assets/zidane.jpg 
Ultralytics YOLOv8.1.5 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs

image 1/1 /exp/work/video/yolov8/ultralytics/assets/zidane.jpg: 384x640 2 persons, 1 tie, 216.9ms
Speed: 7.3ms preprocess, 216.9ms inference, 762.4ms postprocess per image at shape (1, 3, 384, 640)
Results saved to runs/detect/predict
💡 Learn more at https://docs.ultralytics.com/modes/predict

将在Results saved to runs/detect/predict目录下找到输出结果

至此,已验证基础环境正常工作。

训练集

前往kaggle官网寻找火焰训练集,这里使用Fire and Smoke Dataset数据集(https://www.kaggle.com/datasets/dataclusterlabs/fire-and-smoke-dataset

数据集包含烟、火图像共100张

数据标注

标注工具:labelme

对图像中的火焰和烟雾进行矩形标记,将数据的label分为两类:0-火焰;1-烟雾

数据格式转换

将labelme数据格式转为yolo格式

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import os
import json
import shutil
import random
import numpy as np
from tqdm import tqdm
from settings import PR

# 数据集目录
Dataset_root = os.path.join(PR, 'datasets/protective_clothing')

os.chdir(fr'{Dataset_root}/images')
test_frac = 0.2 # 测试集比例
random.seed(123) # 随机数种子,便于复现

img_paths = os.listdir(fr'{Dataset_root}/images')
random.shuffle(img_paths) # 随机打乱

val_number = int(len(img_paths) * test_frac) # 测试集文件个数
train_files = img_paths[val_number:] # 训练集文件名列表
val_files = img_paths[:val_number] # 测试集文件名列表

print('数据集文件总数', len(img_paths))
print('训练集文件个数', len(train_files))
print('测试集文件个数', len(val_files))

if not os.path.exists('train'):
os.mkdir('train')
for each in tqdm(train_files):
shutil.move(each, 'train')

if not os.path.exists('val'):
os.mkdir('val')
for each in tqdm(val_files):
shutil.move(each, 'val')

os.chdir(fr'{Dataset_root}/labelme_jsons')

if not os.path.exists('train'):
os.mkdir('train')
for each in tqdm(train_files):
srt_path = each.split('.')[0] + '.json'
shutil.move(srt_path, 'train')

if not os.path.exists('val'):
os.mkdir('val')
for each in tqdm(val_files):
srt_path = each.split('.')[0] + '.json'
shutil.move(srt_path, 'val')


classes = {
'unprotected': 0,
'protected': 1
}

os.chdir(Dataset_root)

with open('classes.txt', 'w', encoding='utf-8') as f:
for each in list(classes.keys()):
f.write(each + '\n')

if not os.path.exists('labels'):
os.mkdir('labels')
if not os.path.exists('labels/train'):
os.mkdir('labels/train')
if not os.path.exists('labels/val'):
os.mkdir('labels/val')


def process_single_json(labelme_path, save_folder=fr'{Dataset_root}/labels/train'):
# 载入 labelme格式的 json 标注文件
with open(labelme_path, 'r', encoding='utf-8') as f:
labelme = json.load(f)

img_width = labelme['imageWidth'] # 图像宽度
img_height = labelme['imageHeight'] # 图像高度

# 生成 YOLO 格式的 txt 文件
suffix = labelme_path.split('.')[-2]
yolo_txt_path = suffix + '.txt'

with open(yolo_txt_path, 'w', encoding='utf-8') as f:
for each_ann in labelme['shapes']: # 遍历每个框

if each_ann['shape_type'] == 'rectangle': # 筛选出框

# 获取类别 ID
bbox_class_id = classes[each_ann['label']]

# 左上角和右下角的 XY 像素坐标
bbox_top_left_x = int(min(each_ann['points'][0][0], each_ann['points'][1][0]))
bbox_bottom_right_x = int(max(each_ann['points'][0][0], each_ann['points'][1][0]))
bbox_top_left_y = int(min(each_ann['points'][0][1], each_ann['points'][1][1]))
bbox_bottom_right_y = int(max(each_ann['points'][0][1], each_ann['points'][1][1]))

# 框中心点的 XY 像素坐标
bbox_center_x = int((bbox_top_left_x + bbox_bottom_right_x) / 2)
bbox_center_y = int((bbox_top_left_y + bbox_bottom_right_y) / 2)

# 框宽度
bbox_width = bbox_bottom_right_x - bbox_top_left_x

# 框高度
bbox_height = bbox_bottom_right_y - bbox_top_left_y

# 框中心点归一化坐标
bbox_center_x_norm = bbox_center_x / img_width
bbox_center_y_norm = bbox_center_y / img_height

# 框归一化宽度
bbox_width_norm = bbox_width / img_width
# 框归一化高度
bbox_height_norm = bbox_height / img_height

# 生成 YOLO 格式的一行标注,指定保留小数点后几位
bbox_yolo_str = '{} {:.4f} {:.4f} {:.4f} {:.4f}'.format(bbox_class_id, bbox_center_x_norm,
bbox_center_y_norm, bbox_width_norm,
bbox_height_norm)
# 写入 txt 文件中
f.write(bbox_yolo_str + '\n')

shutil.move(yolo_txt_path, save_folder)
print('{} --> {} 转换完成'.format(labelme_path, yolo_txt_path))


os.chdir(fr'{Dataset_root}/labelme_jsons/train')

save_folder = fr'{Dataset_root}/labels/train'
for path in os.listdir():
process_single_json(path, save_folder=save_folder)
print('YOLO格式的txt标注文件已保存至 ', save_folder)

os.chdir(fr'{Dataset_root}/labelme_jsons/val')
save_folder = fr'{Dataset_root}//labels/val'
for path in os.listdir():
process_single_json(path, save_folder=save_folder)
print('YOLO格式的txt标注文件已保存至 ', save_folder)

创建训练yaml文件

在数据集的根目录下新建fire.yaml文件用于记录训练集信息

1
2
3
4
5
6
train: /exp/work/video/yolov8/datasets/fire/data/train/images # 训练集
val: /exp/work/video/yolov8/datasets/fire/data/val/images # 验证集
test: /exp/work/video/yolov8/datasets/fire/data/test/images # 测试集
nc: 2 # 分类数量

names: [fire, smoke] # 类别名称

训练

在项目根目录下创建训练脚本yolov8_train.py,并设置双卡训练

1
2
3
4
5
6
7
8
9
10
11
12
13
from ultralytics import YOLO

# 加载模型
model = YOLO('yolov8n.pt') # 加载预训练模型

# 双卡训练
model.train(
data='datasets/fire/data/fire.yaml',
epochs=300,
device=[0, 1])

# 启动验证
model.val()

启动训练脚本,开启训练

1
python yolov8_train.py

打印配置信息

1
2
3
4
Ultralytics YOLOv8.1.5 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
engine/trainer: task=detect, mode=train, model=yolov8n.pt, data=datasets/fire/data/fire.yaml, epochs=300, time=None, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=[0, 1], workers=8, project=None, name=train9, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train9
Overriding model.yaml nc=80 with nc=2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
                   from  n    params  module                                       arguments                     
0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2]
1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2]
2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True]
3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2]
4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True]
5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2]
6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True]
7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2]
8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True]
9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5]
10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1]
12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1]
13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1]
15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1]
16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2]
17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1]
18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1]
19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2]
20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1]
21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1]
22 [15, 18, 21] 1 751702 ultralytics.nn.modules.head.Detect [2, [64, 128, 256]]
Model summary: 225 layers, 3011238 parameters, 3011222 gradients, 8.2 GFLOPs

Transferred 319/355 items from pretrained weights
DDP: debug command /root/anaconda3/envs/py39_yolov8/bin/python -m torch.distributed.run --nproc_per_node 2 --master_port 45027 /root/.config/Ultralytics/DDP/_temp_bfxihrb7140614302513712.py
WARNING:__main__:
*****************************************
Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
*****************************************
Ultralytics YOLOv8.1.5 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
Overriding model.yaml nc=80 with nc=2
Transferred 319/355 items from pretrained weights
Freezing layer 'model.22.dfl.conv.weight'
AMP: running Automatic Mixed Precision (AMP) checks with YOLOv8n...
AMP: checks passed ✅

记录模型输出

1
2
3
4
5
6
Plotting labels to runs/detect/train9/labels.jpg... 
optimizer: 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically...
optimizer: AdamW(lr=0.000714, momentum=0.9) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias(decay=0.0)
Image sizes 640 train, 640 val
Using 16 dataloader workers
Logging results to runs/detect/train9

训练开始

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
Starting training for 300 epochs...

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
1/300 1.38G 2.132 4.599 2.13 6 640: 100%|██████████| 3/3 [00:04<00:00, 1.53s/it]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 3.10it/s]
all 41 62 0.00191 0.349 0.0223 0.0121

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
2/300 1.44G 1.984 4.085 1.942 18 640: 100%|██████████| 3/3 [00:00<00:00, 5.08it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.57it/s]
all 41 62 0.00209 0.373 0.0247 0.0119

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
3/300 1.48G 2.421 4.407 2.348 14 640: 100%|██████████| 3/3 [00:00<00:00, 5.78it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 10.23it/s]
all 41 62 0.00205 0.373 0.0335 0.0165

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
4/300 1.48G 1.795 3.787 1.724 18 640: 100%|██████████| 3/3 [00:00<00:00, 5.61it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 9.82it/s]
all 41 62 0.00239 0.434 0.0445 0.0219

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
5/300 1.48G 1.707 3.619 1.785 17 640: 100%|██████████| 3/3 [00:00<00:00, 5.49it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 8.87it/s]
all 41 62 0.003 0.495 0.0735 0.0283

......

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
295/300 1.5G 0.3742 0.5917 0.8247 7 640: 100%|██████████| 3/3 [00:00<00:00, 7.21it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.40it/s]
all 41 62 0.996 1 0.995 0.946

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
296/300 1.44G 0.4263 0.6716 0.8004 7 640: 100%|██████████| 3/3 [00:00<00:00, 6.86it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.97it/s]
all 41 62 0.996 1 0.995 0.945

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
297/300 1.5G 0.473 0.6775 0.8499 7 640: 100%|██████████| 3/3 [00:00<00:00, 7.20it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.12it/s]
all 41 62 0.996 1 0.995 0.945

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
298/300 1.51G 0.4877 0.6956 0.8936 11 640: 100%|██████████| 3/3 [00:00<00:00, 6.73it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.03it/s]
all 41 62 0.996 1 0.995 0.942

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
299/300 1.5G 0.4303 0.622 0.8264 6 640: 100%|██████████| 3/3 [00:00<00:00, 6.89it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.62it/s]
all 41 62 0.996 1 0.995 0.941

Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size
300/300 1.44G 0.4593 0.6413 0.8419 6 640: 100%|██████████| 3/3 [00:00<00:00, 6.31it/s]
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 11.83it/s]
all 41 62 0.995 1 0.995 0.94

训练结束,验证

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
300 epochs completed in 0.101 hours.
Optimizer stripped from runs/detect/train9/weights/last.pt, 6.3MB
Optimizer stripped from runs/detect/train9/weights/best.pt, 6.3MB

Validating runs/detect/train9/weights/best.pt...
Ultralytics YOLOv8.1.5 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
Model summary (fused): 168 layers, 3006038 parameters, 0 gradients, 8.1 GFLOPs
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:00<00:00, 10.62it/s]
all 41 62 0.996 1 0.995 0.946
fire 41 41 0.999 1 0.995 0.938
smoke 41 21 0.992 1 0.995 0.955
Speed: 0.1ms preprocess, 1.4ms inference, 0.0ms loss, 1.0ms postprocess per image
Results saved to runs/detect/train9
Ultralytics YOLOv8.1.5 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
Model summary (fused): 168 layers, 3006038 parameters, 0 gradients, 8.1 GFLOPs
Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 3/3 [00:04<00:00, 1.41s/it]
all 41 62 0.996 1 0.995 0.941
fire 41 41 0.999 1 0.995 0.939
smoke 41 21 0.992 1 0.995 0.942
Speed: 0.2ms preprocess, 14.3ms inference, 0.0ms loss, 16.8ms postprocess per image
Results saved to runs/detect/train92

评估

进入保存训练结果的文件夹内,其中weights文件夹包含了最佳训练模型和最近训练模型,文件夹外存储训练的各项记录和测试曲线等

result.csv文件中记录了300轮epoch的详细结果

预测

在项目根目录下创建预测脚本yolov8_test.py

1
2
3
4
from ultralytics import YOLO

model = YOLO('runs/detect/train9/weights/best.pt') # 指定最佳模型
model.predict('video/fire.mp4', save=True, classes=[0, 1]) # 指定输出类别

执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
video 1/1 (1/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 208.5ms
video 1/1 (2/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 9.6ms
video 1/1 (3/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.9ms
video 1/1 (4/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.9ms
video 1/1 (5/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 8.0ms
video 1/1 (6/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.8ms
video 1/1 (7/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.7ms
video 1/1 (8/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.6ms
video 1/1 (9/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.9ms
video 1/1 (10/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.8ms
video 1/1 (11/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 8.1ms
video 1/1 (12/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 8.1ms
video 1/1 (13/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 8.2ms
video 1/1 (14/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 8.0ms
video 1/1 (15/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.7ms
video 1/1 (16/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.8ms
video 1/1 (17/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 (no detections), 7.6ms
video 1/1 (18/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.6ms
video 1/1 (19/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.8ms
video 1/1 (20/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.6ms
video 1/1 (21/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.8ms
video 1/1 (22/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 2 fires, 7.7ms
video 1/1 (23/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.6ms
video 1/1 (24/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.5ms
video 1/1 (25/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.7ms
video 1/1 (26/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.5ms
video 1/1 (27/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.4ms
video 1/1 (28/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.6ms
video 1/1 (29/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.8ms
video 1/1 (30/306) /exp/work/video/yolov8/video/fire.mp4: 384x640 1 fire, 7.7ms
...
Speed: 2.2ms preprocess, 8.3ms inference, 3.8ms postprocess per image at shape (1, 3, 384, 640)
Results saved to runs/detect/predict13

Powered by Hexo and Hexo-theme-hiker

Copyright © 2017 - 2024 青域 All Rights Reserved.

UV : | PV :