Compare commits

..

3 Commits

Author SHA1 Message Date
cf167180b5 feat: 增加循跡回歸雙服務端 2024-07-13 15:24:03 +08:00
d81d0d7cff update 2024-07-04 17:49:41 +08:00
65aceec3a2 update .gitignore 2024-07-03 18:29:07 +08:00
11 changed files with 212 additions and 25 deletions

3
.gitignore vendored
View File

@@ -200,3 +200,6 @@ yolo_server/*.zip
# 任务识别 模型路径 # 任务识别 模型路径
person_yolo_server/model person_yolo_server/model
*.pdiparams
*.pdiparams.info
*.pdmodel

View File

@@ -7,6 +7,7 @@ logger_format = "{time} {level} {message}"
lane_infer_port = 6666 lane_infer_port = 6666
yolo_infer_port = 6667 yolo_infer_port = 6667
ocr_infer_port = 6668 ocr_infer_port = 6668
lane_infer_port1 = 6669
[camera] [camera]
front_camera_port = 5555 front_camera_port = 5555

View File

@@ -2,7 +2,8 @@ import paddle.inference as paddle_infer
import numpy as np import numpy as np
import paddle.vision.transforms as T import paddle.vision.transforms as T
class Lane_model_infer: class Lane_model_infer:
def __init__(self, model_dir="./lane_model"): # def __init__(self, model_dir="./lane_model/7_12_6"):
def __init__(self, model_dir="./lane_model/6_9"):
# 初始化 paddle 推理 # 初始化 paddle 推理
self.model_dir = model_dir self.model_dir = model_dir
self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams") self.config = paddle_infer.Config(model_dir + "/model.pdmodel", model_dir + "/model.pdiparams")
@@ -21,6 +22,9 @@ class Lane_model_infer:
self.normalize_transform = T.Normalize(mean=[127.5], std=[127.5]) self.normalize_transform = T.Normalize(mean=[127.5], std=[127.5])
# print(self.config.summary()) # print(self.config.summary())
def infer(self,src) -> np.ndarray: def infer(self,src) -> np.ndarray:
# 7_12_3 [60:240,:]
# crop_src = src[60:240,:]
# image = self.normalize_transform(crop_src)
image = self.normalize_transform(src) image = self.normalize_transform(src)
image = image.transpose(2, 0, 1) image = image.transpose(2, 0, 1)
image = np.expand_dims(image, axis=0) image = np.expand_dims(image, axis=0)

View File

@@ -7,6 +7,7 @@ from infer import Lane_model_infer
import numpy as np import numpy as np
import cv2 import cv2
lock = threading.Lock() lock = threading.Lock()
response = {'code': 0, 'data': 0} response = {'code': 0, 'data': 0}
@@ -15,8 +16,8 @@ response = {'code': 0, 'data': 0}
# 处理 server 响应数据 # 处理 server 响应数据
def server_resp(lane_infer_port): def server_resp(lane_infer_port):
logger.info("lane server thread init success")
global response global response
logger.info("lane server thread init success")
context = zmq.Context() context = zmq.Context()
# 启动 server # 启动 server
@@ -65,6 +66,18 @@ def lane_infer_server_main(queue):
daemon=True) daemon=True)
mythread.start() mythread.start()
import signal
import sys
def signal_handler(signum, frame):
logger.info("Received signal, exiting...")
camera_socket.close()
context.term()
sys.exit(0)
# 注册信号处理函数
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while True: while True:
camera_socket.send_string("") camera_socket.send_string("")

View File

@@ -0,0 +1,99 @@
import toml
import threading
from loguru import logger
import logging
import zmq
from infer import Lane_model_infer
import numpy as np
import cv2
lock = threading.Lock()
response = {'code': 0, 'data': 0}
# 处理 server 响应数据
def server_resp(lane_infer_port):
global response
global model_id
logger.info("lane server1 thread init success")
context = zmq.Context()
# 启动 server
socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{lane_infer_port}")
logger.info("lane infer1 server init success")
while True:
message = socket.recv_string()
with lock:
socket.send_pyobj(response)
def lane_infer1_server_main(queue):
# context2 = zmq.Context()
# socket_server = context2.socket(zmq.PUB)
# socket_server.setsockopt(zmq.SNDHWM,10)
# socket_server.bind("tcp://*:7778")
if queue != None:
class Handler(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
queue.put({'level': record.levelname.lower(), 'content': log_entry})
# logger.remove()
handler = Handler()
logger.add(handler, format="{time:MM-DD HH:mm:ss} {message}", level="DEBUG")
cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml')
# 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
# 连接摄像头 server 巡线只需要连接前摄像头
context = zmq.Context()
camera_socket = context.socket(zmq.REQ)
camera_socket.connect(f"tcp://localhost:{cfg['camera']['front_camera_port']}")
logger.info("connect camera success")
# 初始化 paddle 推理器
predictor = Lane_model_infer(model_dir="./lane_model/7_12_6")
logger.info("lane model load success")
# 启动 lane_infer_server 线程
mythread = threading.Thread(target=server_resp,
args=(cfg['server']['lane_infer_port1'],),
daemon=True)
mythread.start()
import signal
import sys
def signal_handler(signum, frame):
logger.info("Received signal, exiting...")
camera_socket.close()
context.term()
sys.exit(0)
# 注册信号处理函数
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while True:
camera_socket.send_string("")
message = camera_socket.recv()
np_array = np.frombuffer(message, dtype=np.uint8)
frame = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
frame = frame[60:240,:]
result = predictor.infer(frame)
with lock:
response['data'] = result
if cv2.waitKey(1) == 27:
break
mythread.join()
logger.info("lane infer1 server exit")
if __name__ == "__main__":
lane_infer1_server_main(None)

View File

@@ -0,0 +1,4 @@
十字增强
补充十字_备份原数据
与初代相似 直角弯内切 十字正常 红色球后的弯压线

View File

@@ -0,0 +1 @@
crop_frame = frame[60:240,:]能用的

View File

@@ -0,0 +1,8 @@
根目录下的模型 最初版本 很久没调了
2024-04-28-19-19-15
6_9 在 2024-04-28-19-19-15 数据基础上补充了十字 mobilenet v3 small 全图
7_10_2 新标注的数据集 mobilenet v3 small 全图
7_12_3 使用和 6_9 相同的数据集 mobilenet v3 large 裁剪上面 60 行像素 [60:240,:]
7_12_6 使用和 6_9 相同的数据集 mobilenet v3 small 裁剪上面 60 行像素 [60:240,:]

View File

@@ -5,6 +5,7 @@ import cv2
import numpy as np import numpy as np
import requests import requests
import base64 import base64
import datetime
@@ -31,8 +32,8 @@ def get_access_token():
def ocr_api_request(image_base64): def ocr_api_request(image_base64):
# url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic" # 高精度 # url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic" # 高精度
# url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate" # 高精度带位置 url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate" # 高精度带位置
url = "https://aip.baidubce.com/rest/2.0/ocr/v1/general" # 标准精度带位置 # url = "https://aip.baidubce.com/rest/2.0/ocr/v1/general" # 标准精度带位置
headers = { headers = {
'Content-Type': 'application/json' 'Content-Type': 'application/json'
} }
@@ -58,16 +59,22 @@ def ocr_api_request(image_base64):
if __name__ == "__main__": if __name__ == "__main__":
cfg = toml.load('../cfg_infer_server.toml') logger.info("ocr server 开始加载")
cfg = toml.load('/home/evan/Workplace/project_infer/cfg_infer_server.toml')
# 配置日志输出 # 配置日志输出
logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO") logger.add(cfg['debug']['logger_filename'], format=cfg['debug']['logger_format'], retention = 5, level="INFO")
context1 = zmq.Context() # context1 = zmq.Context()
camera_socket = context1.socket(zmq.REQ) # camera_socket = context1.socket(zmq.REQ)
camera_socket.connect(f"tcp://localhost:{cfg['camera']['camera2_port']}") # camera_socket.connect(f"tcp://localhost:{cfg['camera']['camera2_port']}")
logger.info("connect camera success") # logger.info("connect camera success")
# cap = cv2.VideoCapture(20)
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))
# cap.set(cv2.CAP_PROP_FPS, 20)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
# 初始化 server # 初始化 server
context = zmq.Context() context = zmq.Context()
@@ -75,26 +82,61 @@ if __name__ == "__main__":
socket = context.socket(zmq.REP) socket = context.socket(zmq.REP)
socket.bind(f"tcp://*:{cfg['server']['ocr_infer_port']}") socket.bind(f"tcp://*:{cfg['server']['ocr_infer_port']}")
import signal
import sys
def signal_handler(signum, frame):
logger.info(f"接收到退出信号 {signum}, 退出中")
socket.close()
context.term()
sys.exit(0)
# 注册信号处理函数
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
logger.info("ocr server 加载完成")
while True: while True:
message1 = socket.recv_string() message1 = socket.recv_string()
logger.info("recv client request") logger.info("收到客户端请求")
logger.info("构造摄像头")
cap = cv2.VideoCapture(20)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M','J','P','G'))
cap.set(cv2.CAP_PROP_FPS, 20)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
for _ in range(5): for _ in range(5):
camera_socket.send_string("") ret, frame = cap.read()
message = camera_socket.recv() cv2.waitKey(1)
np_array = np.frombuffer(message, dtype=np.uint8) if ret:
image = cv2.imdecode(np_array, cv2.IMREAD_COLOR) frame = frame[:,0:480]
output_file_path = 'output_image.jpg' frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
success = cv2.imwrite(output_file_path, image)
encoded_image = base64.b64encode(message).decode('utf-8') now = datetime.datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M%S")
output_file_path = f"./saved_picture/{timestamp}.jpg"
success = cv2.imwrite(output_file_path, frame)
result = ocr_api_request(encoded_image) _, frame = cv2.imencode('.jpg', frame)
print(result)
if result != None: encoded_image = base64.b64encode(frame).decode('utf-8')
socket.send_pyobj({'code': 0, 'content': result.get('words_result')})
result = ocr_api_request(encoded_image)
# print(result)
if result != None:
socket.send_pyobj({'code': 0, 'content': result.get('words_result')})
logger.info(f"ocr 返回 {result.get('words_result')}")
else:
socket.send_pyobj({'code': -1, 'content': " ocr 没找到文字"})
logger.error("ocr 没找到文字")
else: else:
socket.send_pyobj({'code': -1, 'content': None}) socket.send_pyobj({'code': -1, 'content': "ocr 摄像头读取出错"})
logger.critical("ocr 摄像头读取出错")
cap.release()
if cv2.waitKey(1) == 27: if cv2.waitKey(1) == 27:
break break

1
yolo_server/readme.txt Normal file
View File

@@ -0,0 +1 @@
上次使用模型 0622

View File

@@ -129,6 +129,17 @@ def yolo_infer_server_main(queue):
mythread2 = threading.Thread(target=camera_resp, args=(cfg['camera']['camera1_port'], cfg['camera']['front_camera_port']), daemon=True) mythread2 = threading.Thread(target=camera_resp, args=(cfg['camera']['camera1_port'], cfg['camera']['front_camera_port']), daemon=True)
mythread1.start() mythread1.start()
mythread2.start() mythread2.start()
import signal
import sys
def signal_handler(signum, frame):
logger.info("Received signal, exiting...")
sys.exit(0)
# 注册信号处理函数
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
while not exit_event.is_set(): while not exit_event.is_set():
with lock3: with lock3:
if start: if start: