diff --git a/cfg_main.toml b/cfg_main.toml index a0dd885..a0861a0 100644 --- a/cfg_main.toml +++ b/cfg_main.toml @@ -1,6 +1,6 @@ [debug] logger_filename = "log/file_{time}.log" -logger_format = "{time} {level} {message}" +logger_format = "[{level}] {file}:{line} <{time}> {message}" diff --git a/cfg_subtask.toml b/cfg_subtask.toml index 3cce9f4..8a02906 100644 --- a/cfg_subtask.toml +++ b/cfg_subtask.toml @@ -20,17 +20,17 @@ pid_ki = 0 pid_kd = 0 [get_rball] -pid_kp = 1.5 +pid_kp = 1.0 pid_ki = 0 pid_kd = 0 [put_bball] -pid_kp = 2.0 +pid_kp = 1.5 pid_ki = 0 pid_kd = 0 [put_hanoi1] -pid_kp = 0.7 +pid_kp = 0.5 pid_ki = 0 pid_kd = 0 @@ -42,7 +42,7 @@ pos_gap = 160 first_target = "mp" [put_hanoi3] -pid_kp = 1.7 +pid_kp = 1.5 pid_ki = 0 pid_kd = 0 diff --git a/subtask.py b/subtask.py index 507856b..382ee0c 100644 --- a/subtask.py +++ b/subtask.py @@ -1288,7 +1288,10 @@ class move_area1(): if counts >= 2: var.skip_llm_task_flag = True return - logger.error(var.llm_text) + logger.error(f"OCR 检出字符:\"{var.llm_text}\"") + + llm_bot.request(var.llm_text) + if len(var.llm_text) < 3: var.skip_llm_task_flag = True return @@ -1511,7 +1514,7 @@ class move_area2(): resp_commands = json5.loads(json_text[0]) - logger.info(resp_commands) + logger.info(f"解析后的动作序列 {resp_commands}") if len(resp_commands) == 0: return action_list = resp_commands @@ -1524,7 +1527,7 @@ class move_area2(): time.sleep(0.5) self.reset() except: - logger.warning("任务解析失败并退出,文心一言真是废物") + logger.warning("任务解析失败并退出,文心一言真是废物 (毋庸置疑)") pass else: diff --git a/utils.py b/utils.py index d772586..4d5835c 100644 --- a/utils.py +++ b/utils.py @@ -375,6 +375,9 @@ class label_filter: return (False, 0) class LLM_deepseek: def __init__(self): + self.response = None + self.status = False + self.chat = '' self.client = OpenAI(api_key="sk-c2e1073883304143981a9750b97c3518", base_url="https://api.deepseek.com") self.prompt = ''' 你是一个机器人动作规划者,请把我的话翻译成机器人动作规划并生成对应的 JSON 结果。请注意,只能使用以下指定的动作,不能创造新的动作: @@ -421,17 +424,42 @@ class LLM_deepseek: 强调一下,对于‘离开’这个指令,请忽略,这对我很重要! ''' - def get_command_json(self,chat): - response = self.client.chat.completions.create( - model="deepseek-chat", - messages=[ - {"role": "system", "content": self.prompt}, - {"role": "user", "content": '我的话如下:' + chat}, - ], - stream=False, - temperature=0.7 - ) - return response.choices[0].message.content + def request_thread(self): + logger.info("llm 请求远程服务器中 (request_thread)") + try: + self.response = self.client.chat.completions.create( + model="deepseek-chat", + messages=[ + {"role": "system", "content": self.prompt}, + {"role": "user", "content": '我的话如下:' + self.chat}, + ], + stream=False, + temperature=0.7 + ) + logger.info("llm 远程服务器正常返回 (request_thread)") + except: + logger.warning("llm 请求失败或返回异常,先检查网络连接 (request_thread)") + self.status = True + def request(self, _chat): + self.chat = _chat + thread = threading.Thread(target=self.request_thread, daemon=True) + thread.start() + logger.info("llm 开启请求线程") + def get_command_json(self,chat = ''): + # response = self.client.chat.completions.create( + # model="deepseek-chat", + # messages=[ + # {"role": "system", "content": self.prompt}, + # {"role": "user", "content": '我的话如下:' + chat}, + # ], + # stream=False, + # temperature=0.7 + # ) + logger.info("llm 阻塞等待服务器返回中") + while not self.status: # FIXME 阻塞等待是否合适 + pass + logger.info("llm 收到返回") + return self.response.choices[0].message.content class LLM: def __init__(self): self.init_done_flag = False