第五天学 pyside2

2023-10-27 18:38
文章标签 pyside2 第五天

本文主要是介绍第五天学 pyside2,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

经过5天的学习,我做了一个界面。

我称它为 yolox-qt 1.0

下面看下界面

来测一下功能

 

 

 

 训练完成自动删除路径

 

 

 功能基本成功

在测试中发现了一个问题将是在显示图片的时候

我写死了,你们可以自己改进一下。

# -*- coding: utf-8 -*-# Form implementation generated from reading ui file '1.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again.  Do not edit this file unless you know what you are doing.
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
# coding=utf-8
import pyautogui
import os
import time
import pyperclip
import os
from yolox.exp import get_exp
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.utils import fuse_model, get_model_info, postprocess, visIMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
from PySide2.QtWidgets import QMessageBoxfrom PyQt5 import QtCore, QtWidgets ,QtGui
import sys
from 学生系统 import democlass Ui_Form(object):def setupUi(self, Form):Form.setObjectName("Form")Form.resize(716, 598)self.plainTextEdit = QtWidgets.QPlainTextEdit(Form)self.plainTextEdit.setGeometry(QtCore.QRect(180, 110, 381, 31))self.plainTextEdit.setObjectName("plainTextEdit")self.label = QtWidgets.QLabel(Form)self.label.setGeometry(QtCore.QRect(10, 110, 181, 20))self.label.setObjectName("label")self.label_2 = QtWidgets.QLabel(Form)self.label_2.setGeometry(QtCore.QRect(160, 0, 251, 61))self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)self.label_2.setObjectName("label_2")self.label_3 = QtWidgets.QLabel(Form)self.label_3.setGeometry(QtCore.QRect(50, 310, 471, 231))self.label_3.setObjectName("label_3")self.widget = QtWidgets.QWidget(Form)self.widget.setGeometry(QtCore.QRect(40, 170, 341, 43))self.widget.setObjectName("widget")self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)self.horizontalLayout.setContentsMargins(0, 0, 0, 0)self.horizontalLayout.setObjectName("horizontalLayout")self.radioButton = QtWidgets.QRadioButton(self.widget)self.radioButton.setMinimumSize(QtCore.QSize(91, 41))self.radioButton.setObjectName("radioButton")self.buttonGroup = QtWidgets.QButtonGroup(Form)self.buttonGroup.setObjectName("buttonGroup")self.buttonGroup.addButton(self.radioButton)self.horizontalLayout.addWidget(self.radioButton)self.radioButton_2 = QtWidgets.QRadioButton(self.widget)self.radioButton_2.setObjectName("radioButton_2")self.buttonGroup.addButton(self.radioButton_2)self.horizontalLayout.addWidget(self.radioButton_2)self.widget1 = QtWidgets.QWidget(Form)self.widget1.setGeometry(QtCore.QRect(40, 220, 401, 71))self.widget1.setObjectName("widget1")self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget1)self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)self.horizontalLayout_2.setObjectName("horizontalLayout_2")self.pushButton = QtWidgets.QPushButton(self.widget1)self.pushButton.setObjectName("pushButton")self.horizontalLayout_2.addWidget(self.pushButton)self.pushButton_2 = QtWidgets.QPushButton(self.widget1)self.pushButton_2.setObjectName("pushButton_2")self.horizontalLayout_2.addWidget(self.pushButton_2)self.pushButton_3 = QtWidgets.QPushButton(self.widget1)self.pushButton_3.setObjectName("pushButton_3")self.horizontalLayout_2.addWidget(self.pushButton_3)self.retranslateUi(Form)QtCore.QMetaObject.connectSlotsByName(Form)def retranslateUi(self, Form):_translate = QtCore.QCoreApplication.translateForm.setWindowTitle(_translate("Form", "Form"))self.label.setText(_translate("Form", "请输入  图片  的  路径"))self.label_2.setText(_translate("Form", "       yolox     可视化    界面 "))self.label_3.setText(_translate("Form", "                           显示界面中"))self.radioButton.setText(_translate("Form", "yolox_s"))self.radioButton_2.setText(_translate("Form", "yolox_m"))self.buttonGroup.buttonClicked.connect(self.qz)self.pushButton.setText(_translate("Form", "测试"))self.pushButton.clicked.connect(self.ceshi)self.pushButton_2.setText(_translate("Form", "显示"))self.pushButton_2.clicked.connect(self.xianshi)self.pushButton_3.setText(_translate("Form", "关闭软件"))self.pushButton_3.clicked.connect(self.guanbi)def ceshi(self):global a,cself.pushButton.setEnabled(False)print("测试中")app_dir = "C:\Windows\system32\cmd.exe"b=self.plainTextEdit.toPlainText()# C:/Users/pc/Desktop/new/qt/yolox/dog.jpg# 执行命令列表if a==1:cmds = ["python demo.py image -n yolox-s -c C:/Users/pc/Desktop/new/qt/yolox/w/yolox_s.pth --path "+b+" --conf 0.25 --nms 0.45 --tsize 320 --save_result --device gpu "]elif a==2:cmds = ["python demo.py image -n yolox-m -c C:/Users/pc/Desktop/new/qt/yolox/w/yolox_m.pth --path "+b+" --conf 0.25 --nms 0.45 --tsize 640 --save_result --device gpu"]# 打开windows应用time.sleep(5)os.startfile(app_dir)# 自动跑输入命令pyautogui.typewrite(["enter"], '0.25')# 暂停5秒,定位应用界面# time.sleep(3)# 获取应用坐标,通过鼠标位置获取python demo.py image -n yolox-m -c C:/Users/pc/Desktop/new/qt/yolox/w/yolox_m.pth --path C:/Users/pc/Desktop/new/qt/yolox/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result --device gpu# 获取鼠标位置x, y = pyautogui.position()# 输入命令for i in cmds:"""解决CMD中中文不可用情况,使用右键复制"""# 放置到剪贴板pyperclip.copy(i)# 右键粘贴命令pyautogui.click(x=x, y=y, button='right')time.sleep(0.5)# pyautogui.typewrite([x for x in i], '0.25')pyautogui.typewrite(["enter"], '0.25')os.system("taskkill /f /im cmd.exe")self.plainTextEdit.clear()c=1self.pushButton.setEnabled(True)print("训练完成")return  cdef xianshi(self):global a, cprint("显示")if c==1:if a==1:print("显示s")jpg = QtGui.QPixmap("C:\\Users\\31319\\Desktop\\qt\\yolox\\YOLOX_outputs\\yolox_s\\vis_res\\2022\\dog.jpg").scaled(310, 200)self.label_3.setPixmap(jpg)elif a==2:print("显示m")jpg = QtGui.QPixmap("C:\\Users\\31319\\Desktop\\qt\\yolox\\YOLOX_outputs\\yolox_m\\vis_res\\2022\\dog.jpg").scaled(310,200)self.label_3.setPixmap(jpg)else:QtWidgets.QMessageBox.critical(widget, "错误", "你还没有测试")def guanbi(self):print("软件已关闭")exit(0)def qz(self):global aself.neir = self.buttonGroup.checkedButton().text()print("你选择的模型是")print(self.neir)if self.neir == 'yolox_s':a=1elif self.neir == 'yolox_m':a = 2return aif __name__ == "__main__":c=0a=3app = QtWidgets.QApplication(sys.argv)widget = QtWidgets.QWidget()ui = Ui_Form()ui.setupUi(widget)widget.show()sys.exit(app.exec_())
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.import argparseimport os
import time
from loguru import loggerimport cv2import torchfrom yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, visIMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]def make_parser():parser = argparse.ArgumentParser("YOLOX Demo!")parser.add_argument("demo", default="image", help="demo type, eg. image, video and webcam")parser.add_argument("-expn", "--experiment-name", type=str, default=None)parser.add_argument("-n", "--name", type=str, default=None, help="model name")parser.add_argument("--path", default="./assets/dog.jpg",help="path to images or video")parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")parser.add_argument("--save_result",action="store_true",help="whether to save the inference result of image/video",)# exp fileparser.add_argument("-f","--exp_file",default=None,type=str,help="pls input your experiment description file",)parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")parser.add_argument("--device",default="cpu",type=str,help="device to run our model, can either be cpu or gpu",)parser.add_argument("--conf", default=0.3, type=float, help="test conf")parser.add_argument("--nms", default=0.3, type=float, help="test nms threshold")parser.add_argument("--tsize", default=None, type=int, help="test img size")parser.add_argument("--fp16",dest="fp16",default=False,action="store_true",help="Adopting mix precision evaluating.",)parser.add_argument("--legacy",dest="legacy",default=False,action="store_true",help="To be compatible with older versions",)parser.add_argument("--fuse",dest="fuse",default=False,action="store_true",help="Fuse conv and bn for testing.",)parser.add_argument("--trt",dest="trt",default=False,action="store_true",help="Using TensorRT model for testing.",)return parserdef get_image_list(path):image_names = []for maindir, subdir, file_name_list in os.walk(path):for filename in file_name_list:apath = os.path.join(maindir, filename)ext = os.path.splitext(apath)[1]if ext in IMAGE_EXT:image_names.append(apath)return image_namesclass Predictor(object):def __init__(self,model,exp,cls_names=COCO_CLASSES,trt_file=None,decoder=None,device="cpu",fp16=False,legacy=False,):self.model = modelself.cls_names = cls_namesself.decoder = decoderself.num_classes = exp.num_classesself.confthre = exp.test_confself.nmsthre = exp.nmsthreself.test_size = exp.test_sizeself.device = deviceself.fp16 = fp16self.preproc = ValTransform(legacy=legacy)if trt_file is not None:from torch2trt import TRTModulemodel_trt = TRTModule()model_trt.load_state_dict(torch.load(trt_file))x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()self.model(x)self.model = model_trtdef inference(self, img):img_info = {"id": 0}if isinstance(img, str):img_info["file_name"] = os.path.basename(img)img = cv2.imread(img)else:img_info["file_name"] = Noneheight, width = img.shape[:2]img_info["height"] = heightimg_info["width"] = widthimg_info["raw_img"] = imgratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])img_info["ratio"] = ratioimg, _ = self.preproc(img, None, self.test_size)img = torch.from_numpy(img).unsqueeze(0)img = img.float()if self.device == "gpu":img = img.cuda()if self.fp16:img = img.half()  # to FP16with torch.no_grad():t0 = time.time()outputs = self.model(img)if self.decoder is not None:outputs = self.decoder(outputs, dtype=outputs.type())outputs = postprocess(outputs, self.num_classes, self.confthre,self.nmsthre, class_agnostic=True)logger.info("Infer time: {:.4f}s".format(time.time() - t0))return outputs, img_infodef visual(self, output, img_info, cls_conf=0.35):ratio = img_info["ratio"]img = img_info["raw_img"]if output is None:return imgoutput = output.cpu()bboxes = output[:, 0:4]# preprocessing: resizebboxes /= ratiocls = output[:, 6]scores = output[:, 4] * output[:, 5]vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)return vis_resdef image_demo(predictor, vis_folder, path, current_time, save_result):if os.path.isdir(path):files = get_image_list(path)else:files = [path]files.sort()for image_name in files:outputs, img_info = predictor.inference(image_name)result_image = predictor.visual(outputs[0], img_info, predictor.confthre)if save_result:save_folder = os.path.join(vis_folder, time.strftime("%Y", current_time))os.makedirs(save_folder, exist_ok=True)save_file_name = os.path.join(save_folder, os.path.basename(image_name))logger.info("Saving detection result in {}".format(save_file_name))cv2.imwrite(save_file_name, result_image)ch = cv2.waitKey(0)if ch == 27 or ch == ord("q") or ch == ord("Q"):breakdef imageflow_demo(predictor, vis_folder, current_time, args):cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid)width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # floatheight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # floatfps = cap.get(cv2.CAP_PROP_FPS)save_folder = os.path.join(vis_folder, time.strftime("%Y_%m_%d", current_time))os.makedirs(save_folder, exist_ok=True)if args.demo == "video":save_path = os.path.join(save_folder, args.path.split("/")[-1])else:save_path = os.path.join(save_folder, "camera.mp4")logger.info(f"video save_path is {save_path}")vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)))while True:ret_val, frame = cap.read()if ret_val:outputs, img_info = predictor.inference(frame)result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)if args.save_result:vid_writer.write(result_frame)ch = cv2.waitKey(1)if ch == 27 or ch == ord("q") or ch == ord("Q"):breakelse:breakdef main(exp, args):if not args.experiment_name:args.experiment_name = exp.exp_namefile_name = os.path.join(exp.output_dir, args.experiment_name)os.makedirs(file_name, exist_ok=True)vis_folder = Noneif args.save_result:vis_folder = os.path.join(file_name, "vis_res")os.makedirs(vis_folder, exist_ok=True)if args.trt:args.device = "gpu"logger.info("Args: {}".format(args))if args.conf is not None:exp.test_conf = args.confif args.nms is not None:exp.nmsthre = args.nmsif args.tsize is not None:exp.test_size = (args.tsize, args.tsize)model = exp.get_model()logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))if args.device == "gpu":model.cuda()if args.fp16:model.half()  # to FP16model.eval()if not args.trt:if args.ckpt is None:ckpt_file = os.path.join(file_name, "best_ckpt.pth")else:ckpt_file = args.ckptlogger.info("loading checkpoint")ckpt = torch.load(ckpt_file, map_location="cpu")# load the model state dictmodel.load_state_dict(ckpt["model"])logger.info("loaded checkpoint done.")if args.fuse:logger.info("\tFusing model...")model = fuse_model(model)if args.trt:assert not args.fuse, "TensorRT model is not support model fusing!"trt_file = os.path.join(file_name, "model_trt.pth")assert os.path.exists(trt_file), "TensorRT model is not found!\n Run python3 tools/trt.py first!"model.head.decode_in_inference = Falsedecoder = model.head.decode_outputslogger.info("Using TensorRT to inference")else:trt_file = Nonedecoder = Nonepredictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder,args.device, args.fp16, args.legacy,)current_time = time.localtime()if args.demo == "image":image_demo(predictor, vis_folder, args.path, current_time, args.save_result)elif args.demo == "video" or args.demo == "webcam":imageflow_demo(predictor, vis_folder, current_time, args)if __name__ == '__main__':args = make_parser().parse_args()exp = get_exp(args.exp_file, args.name)main(exp, args)

链接:https://pan.baidu.com/s/1Z08uL6-sRe-VX-1GywCT8w 
提取码:gope

环境 

 

 

这篇关于第五天学 pyside2的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/287590

相关文章

Java基础回顾系列-第五天-高级编程之API类库

Java基础回顾系列-第五天-高级编程之API类库 Java基础类库StringBufferStringBuilderStringCharSequence接口AutoCloseable接口RuntimeSystemCleaner对象克隆 数字操作类Math数学计算类Random随机数生成类BigInteger/BigDecimal大数字操作类 日期操作类DateSimpleDateForma

【60天备战软考高级系统架构设计师——第五天:需求分析方法与工具】

在完成了需求获取的初步工作后,今天我们将专注于需求分析的方法与工具。需求分析是将需求转化为可实现系统的关键步骤,直接影响到系统的最终效果。 需求分析方法 用例分析 用例分析通过描述用户与系统的交互行为,明确系统需要实现的功能。用例通常包括基本事件流、备选事件流、前置条件和后置条件等。工具:可以使用 UML(统一建模语言)工具来绘制用例图。 数据流图 (DFD) 数据流图描述了数据在系统内部

Vue笔记总结(Xmind格式):第五天

Xmind鸟瞰图: 简单文字总结:   1. 生命周期 Vue 框架内置函数,随着组件的生命周期阶段,自动执行 作用: 特定的时间点,执行特定的操作 场景: 组件创建完毕后,可以在created 生命周期函数中发起Ajax 请求,从而初始化 data 数据 分类: 4大阶段8个方法 创建 挂载 更新 销毁 2.Promise Promise,简单说就是一个

React 入门第五天:高效处理表单与表单验证

在学习React的第五天,我深入研究了React中的表单处理和表单验证。作为前端开发的核心技能之一,表单处理是我们日常开发中无法避免的任务。虽然在Vue中,我们也会频繁地与表单打交道,但React的表单处理方式与Vue有些不同,这也是我学习的重点之一。 1. React中表单处理的基础 React中表单处理的核心在于受控组件与非受控组件的概念。所谓受控组件,是指组件的状态由React来控制,其

js第五天-对象

object let obj = {uname: 'pink',age: 18,gender: 'w'} 增 对象名.属性=新值 这个和cpp不一样,可以在大括号外面新增属性 <script>let obj = {uname: 'pink',age: 18,gender: 'w'}obj.hobby = 'football'console.log(obj);</s

寒假第五天--递推递归--递归的函数

递归的函数 Time Limit: 1000MS Memory limit: 65536K 题目描述 给定一个函数 f(a, b, c): 如果 a ≤ 0 或 b ≤ 0 或 c ≤ 0 返回值为 1; 如果 a > 20 或 b > 20 或 c > 20 返回值为 f(20, 20, 20); 如果 a < b 并且 b < c 返回 f(a, b, c

寒假第五天--递推递归--不容易系列之(3)—— LELE的RPG难题

不容易系列之(3)—— LELE的RPG难题 Time Limit: 1000MS Memory limit: 32768K 题目描述 人称“AC女之杀手”的超级偶像LELE最近忽然玩起了深沉,这可急坏了众多“Cole”(LELE的粉丝,即"可乐"),经过多方打探,某资深Cole终于知道了原因,原来,LELE最近研究起了著名的RPG难题: 有排成一行的n个方格,用红(Red)

寒假第五天--递推递归--三国佚事——巴蜀之危

三国佚事——巴蜀之危 Time Limit: 1000MS Memory limit: 65536K 题目描述 话说天下大势,分久必合,合久必分。。。却道那魏蜀吴三国鼎力之时,多少英雄豪杰以热血谱写那千古之绝唱。古人诚不我欺,确是应了那句“一将功成万骨枯”。  是夜,明月高悬。诸葛丞相轻摇羽扇,一脸愁苦。原来是日前蜀国战事吃紧,丞相彻夜未眠,奋笔急书,于每个烽火台写下安排书

寒假第五天--递推递归--Number Sequence

Number Sequence Time Limit: 1000MS Memory limit: 65536K 题目描述 A number sequence is defined as follows: f(1) = 1, f(2) = 1, f(n) = (A * f(n - 1) + B * f(n - 2)) mod 7. Given A, B, and n,

寒假第五天--递推递归--阿牛的EOF牛肉串

阿牛的EOF牛肉串 Time Limit: 1000MS Memory limit: 32768K 题目描述 今年的ACM暑期集训队一共有18人,分为6支队伍。其中有一个叫做EOF的队伍,由04级的阿牛、XC以及05级的COY组成。在共同的集训生活中,大家建立了深厚的友谊,阿牛准备做点什么来纪念这段激情燃烧的岁月,想了一想,阿牛从家里拿来了一块上等的牛肉干,准备在上面刻下一个