yolo

yolov8图像识别

官方网址

https://docs.ultralytics.com/

安装


# 使用清华大学源加速
# https://pypi.tuna.tsinghua.edu.cn/simple/ 

# 查看已经安装的模块
pip list

# 卸载模块
pip uninstall <package-Name>

# 安装opencv
pip install python-opencv -i https://pypi.tuna.tsinghua.edu.cn/simple/

# 安装opencv 扩展
pip install opencv-contrib-python  -i https://pypi.tuna.tsinghua.edu.cn/simple/

# 安装pytorch
# https://pytorch.org/get-started/locally/
pip install torch torchvision torchaudio -i https://pypi.tuna.tsinghua.edu.cn/simple/

# 安装yolov8
# https://docs.ultralytics.com/quickstart/
pip install ultralytics -i https://pypi.tuna.tsinghua.edu.cn/simple/

使用命令行

# 命令行格式
yolo TASK MODE ARGS

# TASK:[detect : 侦测], [segment :分割], [classify :分类], [pose :姿态]
# MODE :[train:训练], [val:验证], [predict:预测/测试], [export:导出], [track:跟踪]
# 使用yolov8n.pt 预测图片
yolo detect predict model="./yolo/yolov8n.pt" source="./images/1.jpg"

# 使用yolov8n-seg.pt 分割图片
yolo segment predict model="./yolo/yolov8n-seg.pt" source="./images/1.jpg"

训练模型

# 使用coco128数据集进行模型训练
yolo detect train data=./yolo/coco128.yaml model=./yolo/yolov8n.pt epochs=100 imgsz=640
# 数据集实际上一个yaml配置文件
# 文件示例:https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/datasets
# coco128数据集

path: ../datasets/coco128 # dataset root dir
train: images/train2017 # train images (relative to 'path') 128 images
val: images/train2017 # val images (relative to 'path') 128 images
test: # test images (optional)

# Classes
names:
  0: person
  1: bicycle
 
download: https://ultralytics.com/assets/coco128.zip

python代码

from ultralytics import YOLO

model = YOLO('./yolo/yolov8n.pt')  # load a pretrained model (recommended for training)
results = model.train(data='./images/coco128.yaml', epochs=100, imgsz=640)

yolo读取视频并检测&cv

这是一段示例代码,用于跑通基础验证,不可用于生产,除非你的视频路数少于4路



from time import sleep
import cv2 as cv, torch
from ultralytics import YOLO
import logging
import time

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
log = logging.getLogger(__name__)

# log.info("-- OpenCV Info --")
# log.info(cv.getBuildInformation())
# log.info("-" * 50)

# device = "cuda:0" if torch.cuda.is_available() else "cpu"
# log.info(f"Using device: {device}")

yolo_model = YOLO("./yolo_model/yolo11s.pt")
image_size = 640
yolo_model.overrides["imgsz"] = image_size

cls_map = {
    0: "person",
    1: "bicycle",
    2: "car",
    3: "motorcycle",
    5: "bus",
    7: "truck",
}

# 帧率
FPS_TARGET = 12
# 帧间隔
T_INTERVAL = 1.0 / FPS_TARGET
#  上一次时间
last_t = time.time()
# 检测间隔
detect_every = 3
# 检测计数
detect_count = 0

cap = cv.VideoCapture(
    "https://smart.saas.vppark.cn/oss/1.mp4",
)
 

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    # if time.time() - last_t < T_INTERVAL:
    #     continue
    # last_t = time.time()

        
    detect_count += 1
    if detect_count % detect_every == 0 or True:
        # 手动 resize
        h0, w0 = frame.shape[:2]
        scale = min(image_size / h0, image_size / w0)
        h1, w1 = int(h0 * scale), int(w0 * scale)
        frame_resize = cv.resize(frame, (w1, h1), interpolation=cv.INTER_LINEAR)

        # 执行检测
        results = yolo_model(
            frame_resize, imgsz=image_size, classes=list(cls_map.keys())
        )
        # 把框映射回原图
        for result in results:
            boxes_data = result.boxes.data.clone()
            boxes_data[..., :4] /= scale
            result.boxes.data = boxes_data
            react_frame = result.plot(img=frame, line_width=2)
            cv.imshow("frame", react_frame)
    else:
        cv.imshow("frame", frame)
    # if cv.waitKey(int(1000 / FPS_TARGET)) & 0xFF == ord("q"):
    if cv.waitKey(1) & 0xFF == ord("q"):
        break

cap.release()
cv.destroyAllWindows()