0
点赞
收藏
分享

微信扫一扫

66、python爬虫数据和预处理标签数据和yolov8训练

高子歌 2024-01-03 阅读 11


基本思想:需要使用爬虫代码,预先爬虫一些数据和标注,这里只做简单记录,不做具体意图探讨

一、爬虫数据,然后进行部分筛选

# -*- coding: utf-8 -*-
import requests
import os
import re


def get_images_from_baidu(keyword, page_num, save_dir):
    # UA 伪装:当前爬取信息伪装成浏览器
    # 将 User-Agent 封装到一个字典中
    # 【(网页右键 → 审查元素)或者 F12】 → 【Network】 → 【Ctrl+R】 → 左边选一项,右边在 【Response Hearders】 里查找
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
    # 请求的 url
    url = 'https://image.baidu.com/search/acjson?'
    n = 0
    for pn in range(0, 30 * page_num, 30):
        # 请求参数
        param = {'tn': 'resultjson_com',
                 # 'logid': '7603311155072595725',
                 'ipn': 'rj',
                 'ct': 201326592,
                 'is': '',
                 'fp': 'result',
                 'queryWord': keyword,
                 'cl': 2,
                 'lm': -1,
                 'ie': 'utf-8',
                 'oe': 'utf-8',
                 'adpicid': '',
                 'st': -1,
                 'z': '',
                 'ic': '',
                 'hd': '',
                 'latest': '',
                 'copyright': '',
                 'word': keyword,
                 's': '',
                 'se': '',
                 'tab': '',
                 'width': '',
                 'height': '',
                 'face': 0,
                 'istype': 2,
                 'qc': '',
                 'nc': '1',
                 'fr': '',
                 'expermode': '',
                 'force': '',
                 'cg': '',  # 这个参数没公开,但是不可少
                 'pn': pn,  # 显示:30-60-90
                 'rn': '30',  # 每页显示 30 条
                 'gsm': '1e',
                 '1618827096642': ''
                 }
        request = requests.get(url=url, headers=header, params=param)
        if request.status_code == 200:
            print('Request success.')
        request.encoding = 'utf-8'
        # 正则方式提取图片链接
        html = request.text
        image_url_list = re.findall('"thumbURL":"(.*?)",', html, re.S)
        print(image_url_list)

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        for image_url in image_url_list:
            image_data = requests.get(url=image_url, headers=header).content
            with open(os.path.join(save_dir, f'{n:06d}.jpg'), 'wb') as fp:
                fp.write(image_data)
            n = n + 1


if __name__ == '__main__':
    keyword = '蓝色工业箱子'
    root = "C:\\Downloads"
    save_dir = os.path.join(root, keyword)
    page_num = 60  # 实际是每页30 page_num*30个数据
    get_images_from_baidu(keyword, page_num, save_dir)
    print('Get images finished.')

二、将条形码贴到白底的图片上,生成labelImg标签

import glob
import os.path
from PIL import Image
import numpy as np
import cv2
import glob
from xml.dom.minidom import Document
import random

dest_dir="output"
if not os.path.exists(dest_dir):
    os.mkdir(dest_dir)
# 创建一个黑色图像
label_w=1920
label_h=1080



 # paste image giving dimensions
for idx in range(0,500):
    img = np.ones((label_h, label_w), np.uint8) * 255
    cv2.imwrite("template.jpg", img)
    arcode_id=idx%5

    image_dir = "/home/ubuntu/PycharmProjects/pythonProject9/arcode_dir/"+str(arcode_id)+".png"
    path_0, filename_0 = os.path.split(image_dir)
    image = cv2.imread(image_dir)
    print(image.shape)
    h_, w_, _ = image.shape

    # open the image

    Image1 = Image.open('template.jpg')

    # make a copy the image so that

    # the original image does not get affected

    Image1copy = Image1.copy()

    Image2 = Image.open(image_dir)

    Image2copy = Image2.copy()

    x_=random.randint(0 , label_w-w_)
    y_=random.randint(0 , label_h-h_)
    Image1copy.paste(Image2copy, (x_, y_))

     # save the image

    new_image_name=os.path.join(str(idx)+"_arcode.jpg")
    Image1copy.save(os.path.join(dest_dir,new_image_name))
    filename_0=str(idx)+"_arcode.jpg"


    doc = Document()  # 创建DOM文档对象
    DOCUMENT = doc.createElement('annotation')  # 创建根元素

    folder = doc.createElement('folder')  ##建立自己的开头
    folder_text = doc.createTextNode('JPEGImages')  ##建立自己的文本信息
    folder.appendChild(folder_text)  ##自己的内容
    DOCUMENT.appendChild(folder)
    doc.appendChild(DOCUMENT)

    filename = doc.createElement('filename')
    filename_text = doc.createTextNode(filename_0)
    filename.appendChild(filename_text)
    DOCUMENT.appendChild(filename)
    doc.appendChild(DOCUMENT)

    path = doc.createElement('path')
    path_text = doc.createTextNode(filename_0)
    path.appendChild(path_text)
    DOCUMENT.appendChild(path)
    doc.appendChild(DOCUMENT)

    source = doc.createElement('source')
    database = doc.createElement('database')
    database_text = doc.createTextNode('Unknow')  # 元素内容写入
    database.appendChild(database_text)
    source.appendChild(database)
    DOCUMENT.appendChild(source)
    doc.appendChild(DOCUMENT)

    size = doc.createElement('size')
    width = doc.createElement('width')
    width_text = doc.createTextNode(str(label_w))  # 元素内容写入
    width.appendChild(width_text)
    size.appendChild(width)

    height = doc.createElement('height')
    height_text = doc.createTextNode(str(label_h))
    height.appendChild(height_text)
    size.appendChild(height)

    depth = doc.createElement('depth')
    depth_text = doc.createTextNode('3')
    depth.appendChild(depth_text)
    size.appendChild(depth)

    DOCUMENT.appendChild(size)

    segmented = doc.createElement('segmented')
    segmented_text = doc.createTextNode('0')
    segmented.appendChild(segmented_text)
    DOCUMENT.appendChild(segmented)
    doc.appendChild(DOCUMENT)

    object = doc.createElement('object')
    name = doc.createElement('name')
    name_text = doc.createTextNode('2')
    name.appendChild(name_text)
    object.appendChild(name)

    pose = doc.createElement('pose')
    pose_text = doc.createTextNode('Unspecified')
    pose.appendChild(pose_text)
    object.appendChild(pose)

    truncated = doc.createElement('truncated')
    truncated_text = doc.createTextNode('0')
    truncated.appendChild(truncated_text)
    object.appendChild(truncated)

    bndbox = doc.createElement('bndbox')
    xmin = doc.createElement('xmin')
    xmin_text = doc.createTextNode(str(x_))
    xmin.appendChild(xmin_text)
    bndbox.appendChild(xmin)

    ymin = doc.createElement('ymin')
    ymin_text = doc.createTextNode(str(y_))
    ymin.appendChild(ymin_text)
    bndbox.appendChild(ymin)

    xmax = doc.createElement('xmax')
    xmax_text = doc.createTextNode(str(x_+w_))
    xmax.appendChild(xmax_text)
    bndbox.appendChild(xmax)

    ymax = doc.createElement('ymax')
    ymax_text = doc.createTextNode(str(y_+h_))
    ymax.appendChild(ymax_text)
    bndbox.appendChild(ymax)
    object.appendChild(bndbox)

    DOCUMENT.appendChild(object)

    ############item:Python处理XML之Minidom################

    ########### 将DOM对象doc写入文件

    f = open(os.path.join(dest_dir, str(idx)+'_arcode.xml'), 'w')

    doc.writexml(f, indent='\t', newl='\n', addindent='\t', encoding='utf-8')
    f.close()

三、数据集组织方式和yolov5方式是一样的

32 、 YOLO5训练自己的模型以及转ncnn模型_ncnn模型训练 然后训练模型

ubuntu@ubuntu:~/ultralytics$ yolo task=detect mode=train model=/home/ubuntu/ultralytics/yolov8n.pt epochs=1000 batch=16 data=/home/ubuntu/ultralytics/ultralytics/datasets/trainData.yaml

四、转onnx

ubuntu@ubuntu:~/ultralytics$ python3 export.py --weights /home/ubuntu/ultralytics/runs/detect/train/weights/best.pt --include  onnx

五、转TensorRT

ubuntu@ubuntu:~/NVIDIA_CUDA-11.1_Samples/TensorRT-8.6.1.6/bin$ ./trtexec --onnx=/home/ubuntu/ultralytics/runs/detect/train/weights/best.onnx --saveEngine=/home/ubuntu/ultralytics/runs/detect/train/weights/best.engine

测试代码:待补充

举报

相关推荐

0 条评论