yolov8-seg:皮带机偏移实时检测

环境&安装

同上文yolov8:火灾检测

模型使用yolov8n-seg

数据标注

标注工具:labelme

对分割目标进行多边矩形标注

数据格式转换

将labelme多边矩形数据格式转为yolo-seg数据格式,通用转换代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
import json


def convert_labelme_to_yolo(json_file, output_txt_file, label_to_class_id):
with open(json_file, 'r') as file:
data = json.load(file)

image_width = data['imageWidth']
image_height = data['imageHeight']

with open(output_txt_file, 'w') as file:
for shape in data['shapes']:
points = shape['points']
class_id = label_to_class_id.get(shape['label'], -1)
normalized_points = []
for x, y in points:
nx = round(x / image_width, 6)
ny = round(y / image_height, 6)
normalized_points.append(f"{nx} {ny}")
file.write(f"{class_id} " + " ".join(normalized_points) + "\n")


def batch_convert(json_folder, output_folder, label_to_class_id):
# 确保输出文件夹存在
os.makedirs(output_folder, exist_ok=True)

# 遍历文件夹中的所有JSON文件
for filename in os.listdir(json_folder):
if filename.endswith('.json'):
json_file = os.path.join(json_folder, filename)
output_txt_file = os.path.join(output_folder, filename.replace('.json', '.txt'))
convert_labelme_to_yolo(json_file, output_txt_file, label_to_class_id)


def create_label_file(label_dict, output):
sorted_labels = [label for label, _ in sorted(label_dict.items(), key=lambda item: item[1])]

with open(output, 'w') as file:
for label in sorted_labels:
file.write(f"{label}\n")


if __name__ == '__main__':
# 使用示例
label_map = {
'belt': 0,
}

json_path = r'D:\pycharmproject_2\yolo\ultralytics\datasets\belt_seg\json'
output_path = r'D:\pycharmproject_2\yolo\ultralytics\datasets\belt_seg\images'

batch_convert(json_path, output_path, label_map)
# 生成labelme.txt文件,按照label_dict的值从小到大排列,一个类别1行
create_label_file(label_map, r'D:\pycharmproject_2\yolo\ultralytics\datasets\belt_seg\yolo-label.txt')

完成后稍作整理,将数据集按5:1:1比例划分成train/test/val

创建训练yaml文件

参考yolov8n-seg.yaml

1
2
3
4
5
6
train: /exp/work/video/ultralytics/datasets/belt_seg/data/train/images
val: /exp/work/video/ultralytics/datasets/belt_seg/data/val/images
test: /exp/work/video/ultralytics/datasets/belt_seg/data/test/images
nc: 1

names: [belt]

训练

代码

1
2
3
4
5
6
7
8
9
10
11
12
13
from ultralytics import YOLO

# 加载预训练模型
model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training)

# 启用双卡训练
model.train(
data='datasets/belt_seg/belt_seg.yaml',
epochs=300,
device=[0, 1],
save_dir='runs/segment/belt')
# 启用模型验证
model.val()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
(py39_yolov8) [root@jdz ultralytics]# python yolov8_train.py 
Ultralytics YOLOv8.2.41 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
engine/trainer: task=segment, mode=train, model=yolov8n-seg.pt, data=datasets/belt_seg/belt_seg.yaml, epochs=300, time=None, patience=100, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=[0, 1], workers=8, project=None, name=train6, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, bgr=0.0, mosaic=1.0, mixup=0.0, copy_paste=0.0, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/segment/train6
Overriding model.yaml nc=80 with nc=1

from n params module arguments
0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2]
1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2]
2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True]
3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2]
4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True]
5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2]
6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True]
7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2]
8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True]
9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5]
10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1]
12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1]
13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest']
14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1]
15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1]
16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2]
17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1]
18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1]
19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2]
20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1]
21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1]
22 [15, 18, 21] 1 1004275 ultralytics.nn.modules.head.Segment [1, 32, 64, [64, 128, 256]]
YOLOv8n-seg summary: 261 layers, 3263811 parameters, 3263795 gradients, 12.1 GFLOPs

Transferred 381/417 items from pretrained weights
DDP: debug command /root/anaconda3/envs/py39_yolov8/bin/python -m torch.distributed.run --nproc_per_node 2 --master_port 48598 /root/.config/Ultralytics/DDP/_temp_p1zi2y0s140608071072736.py
Ultralytics YOLOv8.2.41 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
Overriding model.yaml nc=80 with nc=1
Transferred 381/417 items from pretrained weights
Freezing layer 'model.22.dfl.conv.weight'
AMP: running Automatic Mixed Precision (AMP) checks with YOLOv8n...
AMP: checks passed ✅
train: Scanning /exp/work/video/ultralytics/datasets/belt_seg/data/train/labels... 100 images, 0 backgrounds, 0 corrupt: 100%|██████████| 100/100 [00:00<00:00, 681.30it/s]
train: WARNING ⚠️ /exp/work/video/ultralytics/datasets/belt_seg/data/train/images/04221812151.jpg: corrupt JPEG restored and saved
train: WARNING ⚠️ /exp/work/video/ultralytics/datasets/belt_seg/data/train/images/1549055475952230.jpg: corrupt JPEG restored and saved
train: New cache created: /exp/work/video/ultralytics/datasets/belt_seg/data/train/labels.cache
val: Scanning /exp/work/video/ultralytics/datasets/belt_seg/data/val/labels... 20 images, 0 backgrounds, 0 corrupt: 100%|██████████| 20/20 [00:00<00:00, 557.79it/s]
val: New cache created: /exp/work/video/ultralytics/datasets/belt_seg/data/val/labels.cache
Plotting labels to runs/segment/train6/labels.jpg...
optimizer: 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically...
optimizer: AdamW(lr=0.000714, momentum=0.9) with parameter groups 66 weight(decay=0.0), 77 weight(decay=0.0005), 76 bias(decay=0.0)
Image sizes 640 train, 640 val
Using 16 dataloader workers
Logging results to runs/segment/train6
Starting training for 300 epochs...

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
1/300 1.57G 0.9085 3.003 2.743 1.368 6 640: 100%|██████████| 7/7 [00:05<00:00, 1.35it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 2.54it/s]
all 20 21 0.0035 1 0.179 0.087 0.0035 1 0.113 0.0414

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
2/300 1.6G 0.7924 2.131 2.561 1.221 6 640: 100%|██████████| 7/7 [00:01<00:00, 4.83it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 8.52it/s]
all 20 21 0.0035 1 0.753 0.548 0.0035 1 0.753 0.499

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
3/300 1.63G 0.5945 1.036 1.887 1.12 3 640: 100%|██████████| 7/7 [00:01<00:00, 4.93it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 8.46it/s]
all 20 21 0.0035 1 0.886 0.581 0.0035 1 0.886 0.616

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
4/300 1.62G 0.5529 0.7711 1.401 1.065 7 640: 100%|██████████| 7/7 [00:01<00:00, 5.25it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 7.88it/s]
all 20 21 0.606 0.952 0.819 0.582 0.606 0.952 0.819 0.641
...
Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
295/300 1.61G 0.1653 0.1045 0.2493 0.8894 3 640: 100%|██████████| 7/7 [00:01<00:00, 5.45it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 9.24it/s]
all 20 21 0.982 1 0.995 0.926 0.982 1 0.995 0.912

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
296/300 1.63G 0.2021 0.1132 0.2765 0.895 2 640: 100%|██████████| 7/7 [00:01<00:00, 5.70it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 9.00it/s]
all 20 21 0.974 1 0.995 0.933 0.974 1 0.995 0.917

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
297/300 1.63G 0.1777 0.1075 0.2616 0.8351 2 640: 100%|██████████| 7/7 [00:01<00:00, 5.51it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 7.61it/s]
all 20 21 0.969 1 0.995 0.927 0.969 1 0.995 0.915

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
298/300 1.63G 0.1736 0.1195 0.2461 0.8778 2 640: 100%|██████████| 7/7 [00:01<00:00, 5.64it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 8.73it/s]
all 20 21 0.968 1 0.995 0.935 0.968 1 0.995 0.916

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
299/300 1.61G 0.2036 0.6821 0.2815 0.8217 2 640: 100%|██████████| 7/7 [00:01<00:00, 5.33it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 8.39it/s]
all 20 21 0.962 1 0.995 0.933 0.962 1 0.995 0.905

Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size
300/300 1.63G 0.155 0.1236 0.2409 0.8592 2 640: 100%|██████████| 7/7 [00:01<00:00, 5.38it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 8.70it/s]
all 20 21 0.977 1 0.995 0.927 0.977 1 0.995 0.917

300 epochs completed in 0.181 hours.
Optimizer stripped from runs/segment/train6/weights/last.pt, 6.8MB
Optimizer stripped from runs/segment/train6/weights/best.pt, 6.8MB

Validating runs/segment/train6/weights/best.pt...
Ultralytics YOLOv8.2.41 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
YOLOv8n-seg summary (fused): 195 layers, 3258259 parameters, 0 gradients, 12.0 GFLOPs
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:00<00:00, 9.32it/s]
all 20 21 0.995 1 0.995 0.948 0.995 1 0.995 0.944
Speed: 0.2ms preprocess, 1.5ms inference, 0.0ms loss, 0.9ms postprocess per image
Results saved to runs/segment/train6
Ultralytics YOLOv8.2.41 🚀 Python-3.9.18 torch-2.1.2+cu121 CUDA:0 (NVIDIA GeForce RTX 3090, 24260MiB)
CUDA:1 (NVIDIA GeForce RTX 3090, 24260MiB)
YOLOv8n-seg summary (fused): 195 layers, 3258259 parameters, 0 gradients, 12.0 GFLOPs
val: Scanning /exp/work/video/ultralytics/datasets/belt_seg/data/val/labels.cache... 20 images, 0 backgrounds, 0 corrupt: 100%|██████████| 20/20 [00:00<?, ?it/s]
Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 2/2 [00:01<00:00, 1.42it/s]
all 20 21 0.995 1 0.995 0.948 0.995 1 0.995 0.944
Speed: 0.3ms preprocess, 24.2ms inference, 0.0ms loss, 32.2ms postprocess per image
Results saved to runs/segment/train62

预测

导包、模型加载&GPU加载

1
2
3
4
5
6
7
8
9
10
11
12
import cv2
import time

from ultralytics import YOLO

model2 = YOLO('runs/segment/train6/weights/best.pt')
t = time.time()
results = model2.predict('video/belt.mp4', stream=False, save=False, device=[0])

for res in results:
print(res)
print(time.time() - t)

多媒体推拉流

主函数(多进程):

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import cv2
import time
import threading
import multiprocessing
import subprocess as sp

from ultralytics import YOLO
from visualize.segment import *
from settings import *


class StreamingSegmentServer:
def __init__(self, stream_src, stream_dst, cordon):
self.pipe = None
self.model = None
self.capture = None

self.fps = 0
self.width = 0
self.height = 0
self.frame_id = 0
self.solve_id = 0

self.cordon = cordon
self.stream_src = stream_src
self.stream_dst = stream_dst

self.center = self.calculate_geometric_center()
self.avg_width = (self.cordon[3][0]-self.cordon[0][0] + self.cordon[2][0]-self.cordon[1][0]) / 2

def calculate_geometric_center(self):
if len(self.cordon) == 0:
return None
sum_x = sum([point[0] for point in self.cordon])
sum_y = sum([point[1] for point in self.cordon])
center_x = sum_x / len(self.cordon)
center_y = sum_y / len(self.cordon)
return center_x, center_y

def run(self):
framequeue = multiprocessing.Queue(10)
solvequeue = multiprocessing.Queue(10)
solvequeue.put(1)
self.capture = cv2.VideoCapture(self.stream_src)
self.fps = int(self.capture.get(cv2.CAP_PROP_FPS))
self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))

command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(self.width, self.height),
'-r', str(self.fps),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'rtsp',
self.stream_dst]
pipe = sp.Popen(command, stdin=sp.PIPE)

capture_thread = threading.Thread(target=self.capture_video, args=(framequeue,))
capture_thread.start()

solve_id = multiprocessing.Value('i', 1)
for _ in range(worker.get('worker_threads')):
process_thread = multiprocessing.Process(target=self.process_video, args=(pipe, framequeue, solve_id))
process_thread.start()

def capture_video(self, framequeue):
while True:
if framequeue.full():
time.sleep(0.05)
else:
ret, frame = self.capture.read()
if not ret:
continue
image_bgr = frame
self.frame_id += 1
framequeue.put((image_bgr, self.frame_id))

def process_video(self, pipe, framequeue, solve_id):
model = YOLO('runs/segment/train6/weights/best.pt')
while True:
if not framequeue.empty():
frame_bgr, frame_id = framequeue.get()
results = model.predict(
frame_bgr,
stream=False,
save=False,
device=[0],
verbose=False
)
names = results[0].names
clses = [int(i) for i in results[0].boxes.cls.tolist()]
masks = [i.tolist() for i in results[0].masks.xy]
name_set = [names.get(i) for i in clses]

frame_bgr = visualize_seg(
frame_bgr,
masks,
name_set,
self.cordon,
self.center,
self.avg_width)

while True:
time.sleep(0.0001)
if solve_id.value == frame_id:
pipe.stdin.write(frame_bgr.tobytes())
solve_id.value += 1
break


if __name__ == '__main__':
rtsp_url = 'rtsp://192.168.9.164:8554/video'
rtsp_stream = 'rtsp://192.168.9.164:8554/live'
outline = [[268, 4], [145, 311],..., [482, 311], [358, 4]] # 自定义安全区域
sss = StreamingSegmentServer(rtsp_url, rtsp_stream, outline)
sss.run()

可视化 & 偏移算法(计算分割掩码与安全区域中心点x轴偏移量)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
import cv2
import math
import numpy as np
from PIL import Image, ImageDraw, ImageFile, ImageFont

from settings import PR

font_file = os.path.join(PR, 'visualize/SourceHanSansCN-Medium.otf')
ImageFile.LOAD_TRUNCATED_IMAGES = True


def calculate_geometric_center(points):
if len(points) == 0:
return None
sum_x = sum([point[0] for point in points])
sum_y = sum([point[1] for point in points])
center_x = sum_x / len(points)
center_y = sum_y / len(points)
return center_x, center_y


def visualize_seg(im, masks, names, cordon, center, avg_width):
if isinstance(im, str):
im = Image.open(im)
im = np.ascontiguousarray(np.copy(im))
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
else:
im = np.ascontiguousarray(np.copy(im))

im = Image.fromarray(im)
im = im.convert('RGBA')
size = im.size

layer = Image.new('RGBA', size)
draw = ImageDraw.Draw(im)
draw2 = ImageDraw.Draw(layer)

for i, name in enumerate(names):
mask = masks[i]
mask = [tuple(j) for j in mask]
text = name
center_rt = calculate_geometric_center(mask)
# offset = math.sqrt(pow(center_rt[0] - center[0], 2) + pow(center_rt[1] - center[1], 2)) / avg_width 计算距离偏移
offset = (center_rt[0] - center[0]) / avg_width

draw2.polygon(mask,
fill=(128, 0, 128, 160),
outline=(255, 255, 255, 255))
im.paste(layer, mask=layer)
draw.text(
(mask[0][0], mask[0][1]),
text,
font=ImageFont.truetype(font_file, size=13),
fill=(0, 0, 0, 1000))
draw.text(
(mask[0][0], mask[0][1]+20),
'offset: ' + str(offset),
font=ImageFont.truetype(font_file, size=13),
fill=(0, 0, 0, 1000))

draw.polygon([tuple(i) for i in cordon],
fill=None,
outline=(255, 255, 0, 255))
im = im.convert('RGB')
im = np.ascontiguousarray(np.copy(im))
return im

配置项

1
2
3
4
5
6
7
8
import os

PR = os.path.dirname(__file__)

worker = {
'worker_threads': 12,
'worker_queue_len': 10
}

Powered by Hexo and Hexo-theme-hiker

Copyright © 2017 - 2024 青域 All Rights Reserved.

UV : | PV :