python姿态识别+Tensflow1.12+pyqt5+UI

2024-06-07 18:12

本文主要是介绍python姿态识别+Tensflow1.12+pyqt5+UI,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

python姿态识别+Tensflow1.12+pyqt5+UI

import datetimefrom PyQt5.QtCore import QCoreApplication
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
from vedio import vediofrom HumanPoseRec import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets'''
Example of usage:(1) Test on video file:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type video \--data_path data_test/exercise.avi \--output_folder output(2) Test on a folder of images:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type folder \--data_path data_test/apple/ \--output_folder output(3) Test on web camera:
python src/s5_test.py \--model_path model/trained_classifier.pickle \--data_type webcam \--data_path 0 \--output_folder output'''SRC_DATA_TYPE = "webcam"
SRC_DATA_PATH = "0"
SRC_MODEL_PATH = r'D:\pysave2023\Action-Recognition\model\trained_classifier.pickle'
output_folder = "output/"
ith_img = -1
predict_label = {}
if True:  # Include project pathimport sysimport osROOT = os.path.dirname(os.path.abspath(__file__)) + "/../"CURR_PATH = os.path.dirname(os.path.abspath(__file__)) + "/"sys.path.append(ROOT)import utils.lib_images_io as lib_images_ioimport utils.lib_plot as lib_plotimport utils.lib_commons as lib_commonsfrom utils.lib_openpose import SkeletonDetectorfrom utils.lib_tracker import Trackerfrom utils.lib_tracker import Trackerfrom utils.lib_classifier import ClassifierOnlineTestfrom utils.lib_classifier import *  # Import all sklearn related libraries# -- Command-line inputdef get_dst_folder_name(src_data_type, src_data_path):global folder_nametry:if src_data_type == "video":  # /root/data/video.avi --> videofolder_name = os.path.basename(src_data_path).split(".")[-2]elif src_data_type == "folder":  # /root/data/video/ --> videofolder_name = src_data_path.rstrip("/").split("/")[-1]elif src_data_type == "webcam":# month-day-hour-minute-seconds, e.g.: 02-26-15-51-12folder_name = lib_commons.get_time_string()except:passreturn folder_nameclass MultiPersonClassifier(object):''' This is a wrapper around ClassifierOnlineTestfor recognizing actions of multiple people.'''def __init__(self, model_path, classes):self.dict_id2clf = {}  # human id -> classifier of this person# Define a function for creating classifier for new people.self._create_classifier = lambda human_id: ClassifierOnlineTest(model_path, classes, WINDOW_SIZE, human_id)def classify(self, dict_id2skeleton):''' Classify the action type of each skeleton in dict_id2skeleton '''# Clear people not in viewold_ids = set(self.dict_id2clf)cur_ids = set(dict_id2skeleton)humans_not_in_view = list(old_ids - cur_ids)for human in humans_not_in_view:del self.dict_id2clf[human]# Predict each person's actionid2label = {}for id, skeleton in dict_id2skeleton.items():if id not in self.dict_id2clf:  # add this new personself.dict_id2clf[id] = self._create_classifier(id)classifier = self.dict_id2clf[id]id2label[id] = classifier.predict(skeleton)  # predict label# print("\n\nPredicting label for human{}".format(id))# print("  skeleton: {}".format(skeleton))# print("  label: {}".format(id2label[id]))return id2labeldef get_classifier(self, id):''' Get the classifier based on the person id.Arguments:id {int or "min"}'''if len(self.dict_id2clf) == 0:return Noneif id == 'min':id = min(self.dict_id2clf.keys())return self.dict_id2clf[id]def remove_skeletons_with_few_joints(skeletons):''' Remove bad skeletons before sending to the tracker '''good_skeletons = []for skeleton in skeletons:px = skeleton[2:2 + 13 * 2:2]py = skeleton[3:2 + 13 * 2:2]num_valid_joints = len([x for x in px if x != 0])num_leg_joints = len([x for x in px[-6:] if x != 0])total_size = max(py) - min(py)# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# IF JOINTS ARE MISSING, TRY CHANGING THESE VALUES:# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!if num_valid_joints >= 5 and total_size >= 0.1 and num_leg_joints >= 0:# add this skeleton only when all requirements are satisfiedgood_skeletons.append(skeleton)return good_skeletonscfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s5_test.py"]CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]# Action recognition: number of frames used to extract features.
WINDOW_SIZE = int(cfg_all["features"]["window_size"])
dict_id2label = {}
scale_h = 0
# Output folder
DST_FOLDER_NAME = get_dst_folder_name(SRC_DATA_TYPE, SRC_DATA_PATH)
DST_FOLDER = output_folder + "/" + DST_FOLDER_NAME + "/"
DST_SKELETON_FOLDER_NAME = cfg["output"]["skeleton_folder_name"]
DST_VIDEO_NAME = cfg["output"]["video_name"]
# framerate of output video.avi
DST_VIDEO_FPS = float(cfg["output"]["video_fps"])# Video setttings# If data_type is webcam, set the max frame rate.
SRC_WEBCAM_MAX_FPS = float(cfg["settings"]["source"]["webcam_max_framerate"])# If data_type is video, set the sampling interval.
# For example, if it's 3, then the video will be read 3 times faster.
SRC_VIDEO_SAMPLE_INTERVAL = int(cfg["settings"]["source"]["video_sample_interval"])# Openpose settings
OPENPOSE_MODEL = cfg["settings"]["openpose"]["model"]
OPENPOSE_IMG_SIZE = cfg["settings"]["openpose"]["img_size"]# Display settings
img_disp_desired_rows = int(cfg["settings"]["display"]["desired_rows"])
skeleton_detector = SkeletonDetector(OPENPOSE_MODEL, OPENPOSE_IMG_SIZE)multiperson_tracker = Tracker()
multiperson_classifier = MultiPersonClassifier(SRC_MODEL_PATH, CLASSES)os.makedirs(DST_FOLDER, exist_ok=True)
os.makedirs(DST_FOLDER + DST_SKELETON_FOLDER_NAME, exist_ok=True)# video writer
video_writer = lib_images_io.VideoWriter(DST_FOLDER + DST_VIDEO_NAME, DST_VIDEO_FPS)
video_writer2 = lib_images_io.VideoWriter(DST_FOLDER + DST_VIDEO_NAME, DST_VIDEO_FPS)
sssr = []def draw_result_img(img_disp, ith_img, humans, dict_id2skeleton, skeleton_detector, multiperson_classifier):''' Draw skeletons, labels, and prediction scores onto image for display '''global sssr# Resize to a proper size for displayr, c = img_disp.shape[0:2]desired_cols = int(1.0 * c * (img_disp_desired_rows / r))img_disp = cv2.resize(img_disp,dsize=(desired_cols, img_disp_desired_rows))# Draw all people's skeletonskeleton_detector.draw(img_disp, humans)# Draw bounding box and label of each personif len(dict_id2skeleton):for id, label in dict_id2label.items():skeleton = dict_id2skeleton[id]# scale the y data back to originalskeleton[1::2] = skeleton[1::2] / scale_h# print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")lib_plot.draw_action_result(img_disp, id, skeleton, label)# Add blank to the left for displaying prediction scores of each class# img_disp = lib_plot.add_white_region_to_left_of_image(img_disp)cv2.putText(img_disp, "Frame:" + str(ith_img),(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,color=(0, 0, 0), thickness=2)# Draw predicting score for only 1 personif len(dict_id2skeleton):classifier_of_a_person = multiperson_classifier.get_classifier(id='min')# 好像是卡在了这里sssr = classifier_of_a_person.draw_scores_onto_image(img_disp)print('-------------------------------------------------')print(sssr)return img_dispdef get_the_skeleton_data_to_save_to_disk(dict_id2skeleton):'''In each image, for each skeleton, save the:human_id, label, and the skeleton positions of length 18*2.So the total length per row is 2+36=38'''skels_to_save = []for human_id in dict_id2skeleton.keys():label = dict_id2label[human_id]skeleton = dict_id2skeleton[human_id]skels_to_save.append([[human_id, label] + skeleton.tolist()])return skels_to_saveclass Main(Ui_MainWindow, QMainWindow):def __init__(self):super().__init__()self.setupUi(self)self.vedio = vedio()self.timer_camera = QtCore.QTimer()self.timer_video = QtCore.QTimer()# 定时器函数self.timer_camera.timeout.connect(self.show_camera)self.timer_video.timeout.connect(self.show_video)self.button()self.label.setPixmap(QtGui.QPixmap('img.png').scaled(self.label.width(), self.label.height()))self.pushButton.clicked.connect(self.playVedio)def playVedio(self):self.vedio.show()self.vedio.slotStart()def setscore(self):try:self.actionname1.setText(str(float(sssr[0]) * 100))self.actionname2.setText(str(float(sssr[1]) * 100))self.actionname3.setText(str(float(sssr[2]) * 100))self.actionname4.setText(str(float(sssr[3]) * 100))self.actionname5.setText(str(float(sssr[4]) * 100))self.actionname6.setText(str(float(sssr[5]) * 100))self.actionname7.setText(str(float(sssr[6]) * 100))self.actionname8.setText(str(float(sssr[7]) * 100))self.actionname9.setText(str(float(sssr[8]) * 100))except:passdef camera_init(self):# 打开设置摄像头对象videoSourceIndex = 0self.cap1 = cv2.VideoCapture(0, cv2.CAP_DSHOW + videoSourceIndex)#self.cap = cv2.VideoCapture(0)self.CAM_NUM = 0# 显示摄像头界面def camera(self):if not self.timer_camera.isActive():flag = self.cap.open(self.CAM_NUM)if not flag:msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",buttons=QtWidgets.QMessageBox.Ok,defaultButton=QtWidgets.QMessageBox.Ok)else:self.timer_camera.start(50)else:self.timer_camera.stop()self.cap.release()def show_camera(self):global dict_id2label, scale_h, ith_imgtry:# -- Read image# 这里读视频帧进来flag, img = self.cap.read()ith_img += 1img_disp = img.copy()print(f"\nProcessing {ith_img}th image ...")# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Draw# 这里得到处理完后的图像img_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)if len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])# -- Display image, and write to video.avi# 这里把图像img_disp显示到界面,感觉是这的问题,因为img_disp一直都是有数据的show = cv2.cvtColor(img_disp, cv2.COLOR_BGR2RGB)showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],QtGui.QImage.Format_RGB888)self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))self.setscore()video_writer.write(img_disp)# -- Get skeleton data and save to fileskels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)finally:pass# video_writer.stop()# print("Program ends")def show_video(self):global dict_id2label, scale_h, ith_imgif images_loader.has_image():ith_img += 1try:# -- Read imageimg = images_loader.read_image()img_disp = img.copy()print(f"\nProcessing {ith_img}th image ...")# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Drawimg_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)# Print label of a personif len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])# -- Display image, and write to video.avishow = cv2.cvtColor(img_disp, cv2.COLOR_BGR2RGB)showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],QtGui.QImage.Format_RGB888)self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))self.setscore()video_writer2.write(img_disp)# -- Get skeleton data and save to fileskels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)finally:passdef button(self):self.action_3.triggered.connect(self.videoMode)self.action_4.triggered.connect(self.cameraMode)self.action_2.triggered.connect(self.reset)self.action_5.triggered.connect(self.photoMode)self.action.triggered.connect(self.save)self.actionexit.triggered.connect(QCoreApplication.instance().quit)def save(self):video_writer.stop()video_writer2.stop()print("Program ends")passdef videoMode(self):global ith_img, images_loader, SRC_DATA_typeith_img = -1try:self.timer_camera.stop()self.cap.release()except:pass# Output folderSRC_DATA_type = "video"SRC_DATA_path = QFileDialog.getOpenFileNames(self, '选择动作视频', '', '')[0]try:DST_FOLDER_name = get_dst_folder_name(SRC_DATA_type, str(SRC_DATA_path[0]))DST_folder = output_folder + "/" + DST_FOLDER_name + "/"DST_SKELETON_FOLDER_name = cfg["output"]["skeleton_folder_name"]os.makedirs(DST_folder, exist_ok=True)os.makedirs(DST_folder + DST_SKELETON_FOLDER_name, exist_ok=True)images_loader = lib_images_io.ReadFromVideo(SRC_DATA_path[0],sample_interval=SRC_VIDEO_SAMPLE_INTERVAL)self.timer_video.start(30)except:passdef cameraMode(self):global ith_imgith_img = -1try:self.timer_video.stop()except:passself.camera_init()self.camera()def photoMode(self):global ith_img, dict_id2label, scale_hith_img = 0dict_id2label = {}scale_h = 0try:self.timer_camera.stop()self.cap.release()except:passtry:self.timer_video.stop()except:passtry:self.filename = QFileDialog.getOpenFileNames(self, "打开图片", "./","*.jpg;;*.png;;All Files(*)")[0][0]print(self.filename)img = cv2.imread(self.filename)img_disp = img.copy()# -- Detect skeletonshumans = skeleton_detector.detect(img)skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)skeletons = remove_skeletons_with_few_joints(skeletons)# -- Track peopledict_id2skeleton = multiperson_tracker.track(skeletons)  # int id -> np.array() skeleton# -- Recognize action of each personif len(dict_id2skeleton):dict_id2label = multiperson_classifier.classify(dict_id2skeleton)# -- Drawimg_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,skeleton_detector, multiperson_classifier)# Print label of a personif len(dict_id2skeleton):print(dict_id2skeleton.keys())min_id = min(dict_id2skeleton.keys())if dict_id2label[min_id] != 'LABEL_UNKNOWN':english_to_chinese = {'stand': '站姿推举','walk': '摆手','run': '平板支撑','jump': '高抬腿','sit': '扎马步','squat': '深蹲','kick': '俯身飞鸟','punch': '招财猫','wave': '侧平举'}label_index = list(english_to_chinese.keys()).index(dict_id2label[min_id])print(label_index)if dict_id2label[min_id] in english_to_chinese:s = english_to_chinese[dict_id2label[min_id]]# 指定文本文件的文件名txt_filename = "data.txt"# 获取当前时间current_time = datetime.datetime.now()# 格式化时间为字符串formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")# 将每个变量转换为字符串s_str = str(s)sssr_str = str(sssr[label_index])# 创建一个包含当前时间、动作类型和动作分数的字符串data_point = f"{formatted_time}, {s_str}, {sssr_str}\n"with open(txt_filename, 'a+') as file:# 检查文件是否为空if file.tell() == 0:file.write(data_point)else:# 将文件指针移到文件的开头file.seek(0)lines = file.readlines()# 检查最后一行的前19个字符是否与新记录不同print(lines[-1][:19])if not lines or lines[-1][:19] != data_point[:19]:# 将新记录追加到文件中file.write(data_point)print("当前动作 :", s)print('动作分数:', sssr[label_index])# print("prediced label is :", dict_id2label[min_id])cv2.imwrite('photoMode.png', img_disp)photo = QtGui.QPixmap('photoMode.png').scaled(self.label.width(), self.label.height())self.label.setPixmap(photo)self.setscore()except:passdef reset(self):global ith_img, dict_id2label, scale_hith_img = 0dict_id2label = {}scale_h = 0try:self.timer_camera.stop()self.cap.release()except:passtry:self.timer_video.stop()except:passself.label.setPixmap(QtGui.QPixmap('img.png'))if __name__ == "__main__":app = QApplication(sys.argv)win = Main()win.show()sys.exit(app.exec_())

这篇关于python姿态识别+Tensflow1.12+pyqt5+UI的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/1039923

相关文章

Python 字符串占位

在Python中,可以使用字符串的格式化方法来实现字符串的占位。常见的方法有百分号操作符 % 以及 str.format() 方法 百分号操作符 % name = "张三"age = 20message = "我叫%s,今年%d岁。" % (name, age)print(message) # 我叫张三,今年20岁。 str.format() 方法 name = "张三"age

一道经典Python程序样例带你飞速掌握Python的字典和列表

Python中的列表(list)和字典(dict)是两种常用的数据结构,它们在数据组织和存储方面有很大的不同。 列表(List) 列表是Python中的一种有序集合,可以随时添加和删除其中的元素。列表中的元素可以是任何数据类型,包括数字、字符串、其他列表等。列表使用方括号[]表示,元素之间用逗号,分隔。 定义和使用 # 定义一个列表 fruits = ['apple', 'banana

Python应用开发——30天学习Streamlit Python包进行APP的构建(9)

st.area_chart 显示区域图。 这是围绕 st.altair_chart 的语法糖。主要区别在于该命令使用数据自身的列和指数来计算图表的 Altair 规格。因此,在许多 "只需绘制此图 "的情况下,该命令更易于使用,但可定制性较差。 如果 st.area_chart 无法正确猜测数据规格,请尝试使用 st.altair_chart 指定所需的图表。 Function signa

python实现最简单循环神经网络(RNNs)

Recurrent Neural Networks(RNNs) 的模型: 上图中红色部分是输入向量。文本、单词、数据都是输入,在网络里都以向量的形式进行表示。 绿色部分是隐藏向量。是加工处理过程。 蓝色部分是输出向量。 python代码表示如下: rnn = RNN()y = rnn.step(x) # x为输入向量,y为输出向量 RNNs神经网络由神经元组成, python

python 喷泉码

因为要完成毕业设计,毕业设计做的是数据分发与传输的东西。在网络中数据容易丢失,所以我用fountain code做所发送数据包的数据恢复。fountain code属于有限域编码的一部分,有很广泛的应用。 我们日常生活中使用的二维码,就用到foutain code做数据恢复。你遮住二维码的四分之一,用手机的相机也照样能识别。你遮住的四分之一就相当于丢失的数据包。 为了实现并理解foutain

python 点滴学

1 python 里面tuple是无法改变的 tuple = (1,),计算tuple里面只有一个元素,也要加上逗号 2  1 毕业论文改 2 leetcode第一题做出来

基于CTPN(tensorflow)+CRNN(pytorch)+CTC的不定长文本检测和识别

转发来源:https://swift.ctolib.com/ooooverflow-chinese-ocr.html chinese-ocr 基于CTPN(tensorflow)+CRNN(pytorch)+CTC的不定长文本检测和识别 环境部署 sh setup.sh 使用环境: python 3.6 + tensorflow 1.10 +pytorch 0.4.1 注:CPU环境

Python爬虫-贝壳新房

前言 本文是该专栏的第32篇,后面会持续分享python爬虫干货知识,记得关注。 本文以某房网为例,如下图所示,采集对应城市的新房房源数据。具体实现思路和详细逻辑,笔者将在正文结合完整代码进行详细介绍。接下来,跟着笔者直接往下看正文详细内容。(附带完整代码) 正文 地址:aHR0cHM6Ly93aC5mYW5nLmtlLmNvbS9sb3VwYW4v 目标:采集对应城市的

百度OCR识别结构结构化处理视频

https://edu.csdn.net/course/detail/10506

python 在pycharm下能导入外面的模块,到terminal下就不能导入

项目结构如下,在ic2ctw.py 中导入util,在pycharm下不报错,但是到terminal下运行报错  File "deal_data/ic2ctw.py", line 3, in <module>     import util 解决方案: 暂时方案:在终端下:export PYTHONPATH=/Users/fujingling/PycharmProjects/PSENe