From 3b7a83b0f52a84cc3242941b9f036c7ffaccaced Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Tue, 16 May 2023 18:57:10 +0800 Subject: [PATCH 01/68] fix(quickstart): set temporary env instead of permanent --- .../go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd" | 2 +- .../go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" | 2 +- .../windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" | 2 +- .../windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd" | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd" "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd" index 553a36ca..82396b39 100644 --- "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd" +++ "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd" @@ -1,7 +1,7 @@ @ECHO OFF @CHCP 65001 -setx /M PATH "%cd%\ffmpeg\bin;%PATH%" +SET PATH="%cd%\ffmpeg\bin;%PATH%" TITLE [ChatGPT for QQ] ChatGPT 端正在运行... cd chatgpt && ..\python3.11\python.exe bot.py diff --git "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" index f7f6fbed..0b552350 100644 --- "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" +++ "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" @@ -3,7 +3,7 @@ TITLE [ChatGPT for QQ] go-cqhttp 端正在运行... -setx /M PATH "%cd%\ffmpeg\bin;%PATH%" +SET PATH="%cd%\ffmpeg\bin;%PATH%" cd go-cqhttp && go-cqhttp -faststart TITLE [ChatGPT for QQ] go-cqhttp 端已停止运行 diff --git "a/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" index c3782e45..4878dfa0 100644 --- "a/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" +++ "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" @@ -3,7 +3,7 @@ TITLE [ChatGPT for QQ] ChatGPT 端正在运行... -setx /M PATH "%cd%\ffmpeg\bin;%PATH%" +SET PATH="%cd%\ffmpeg\bin;%PATH%" cd chatgpt && ..\python3.11\python.exe bot.py TITLE [ChatGPT for QQ] ChatGPT 端已停止运行 diff --git "a/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd" "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd" index 8eb5a18d..1c12d4ba 100644 --- "a/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd" +++ "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd" @@ -3,7 +3,7 @@ TITLE [ChatGPT for QQ] Mirai 端正在运行... -setx /M PATH "%cd%\ffmpeg\bin;%PATH%" +SET PATH="%cd%\ffmpeg\bin;%PATH%" cd mirai && mcl TITLE [ChatGPT for QQ] Mirai 端已停止运行 From 9fec2048ea3ce5ed5dd57f88a3ac106f63649a77 Mon Sep 17 00:00:00 2001 From: IceThunder Date: Tue, 16 May 2023 23:27:38 +0800 Subject: [PATCH 02/68] Update poe-claude100k,poe-gpt4,poe-neevaai --- adapter/quora/poe.py | 4 +++- constants.py | 2 ++ manager/bot.py | 5 +++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/adapter/quora/poe.py b/adapter/quora/poe.py index f31f0b51..8f0344aa 100644 --- a/adapter/quora/poe.py +++ b/adapter/quora/poe.py @@ -11,13 +11,15 @@ class PoeBot(Enum): - """Poe 支持的机器人:{'capybara': 'Sage', 'beaver': 'GPT-4', 'a2_2': 'Claude+','a2': 'Claude', 'chinchilla': 'ChatGPT', + """Poe 支持的机器人:{'capybara': 'Sage', 'beaver': 'GPT-4', 'a2_2': 'Claude+','a2': 'Claude','a2_100k': 'Claude100k', 'chinchilla': 'ChatGPT', 'hutia': 'NeevaAI', 'nutria': 'Dragonfly'} """ Sage = "capybara" GPT4 = "beaver" Claude2 = "a2_2" Claude = "a2" + Claude100k = "a2_100k" ChatGPT = "chinchilla" + NeevaAI = "hutia" Dragonfly = "nutria" @staticmethod diff --git a/constants.py b/constants.py index a3965a49..9d6f6b2c 100644 --- a/constants.py +++ b/constants.py @@ -15,8 +15,10 @@ class LlmName(Enum): PoeGPT4 = "poe-gpt4" PoeClaude2 = "poe-claude2" PoeClaude = "poe-claude" + PoeClaude100k = "poe-claude100k" PoeChatGPT = "poe-chatgpt" PoeDragonfly = "poe-dragonfly" + PoeNeevaAI = "poe-neevaai" ChatGPT_Web = "chatgpt-web" ChatGPT_Api = "chatgpt-api" Bing = "bing" diff --git a/manager/bot.py b/manager/bot.py index 50597dff..97ead534 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -484,8 +484,13 @@ def bots_info(self): bot_info += f"* {LlmName.ChatGLM.value} : 清华 ChatGLM-6B (本地)\n" if len(self.bots['poe-web']) > 0: bot_info += f"* {LlmName.PoeSage.value} : POE Sage 模型\n" + bot_info += f"* {LlmName.PoeGPT4.value} : POE ChatGPT4 模型\n" bot_info += f"* {LlmName.PoeClaude.value} : POE Claude 模型\n" + bot_info += f"* {LlmName.PoeClaude2.value} : POE Claude+ 模型\n" + bot_info += f"* {LlmName.PoeClaude100k.value} : POE Claude 100k 模型\n" bot_info += f"* {LlmName.PoeChatGPT.value} : POE ChatGPT 模型\n" + bot_info += f"* {LlmName.PoeDragonfly.value} : POE Dragonfly 模型\n" + bot_info += f"* {LlmName.PoeNeevaAI.value} : POE NeevaAI 模型\n" if len(self.bots['slack-accesstoken']) > 0: bot_info += f"* {LlmName.SlackClaude.value} : Slack Claude 模型\n" return bot_info From 7e40a0c30cfaded51db4d9c54b0ef613a0de7ae0 Mon Sep 17 00:00:00 2001 From: Dark Litss <8984680+lss233@users.noreply.github.com> Date: Mon, 22 May 2023 21:26:53 +0800 Subject: [PATCH 03/68] Update docker-compose.go-cqhttp.yaml --- docker-compose.go-cqhttp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.go-cqhttp.yaml b/docker-compose.go-cqhttp.yaml index f0239e94..3a4675b0 100644 --- a/docker-compose.go-cqhttp.yaml +++ b/docker-compose.go-cqhttp.yaml @@ -1,7 +1,7 @@ version: '3.4' services: gocqhttp: - image: ghcr.io/mrs4s/go-cqhttp:master + image: silicer/go-cqhttp:latest restart: always environment: LANG: 'C.UTF-8' From 72b932e10bdb6b2d20b5945cf4cfd0b02b6e04c7 Mon Sep 17 00:00:00 2001 From: FelixFeli Date: Tue, 23 May 2023 15:01:30 +0800 Subject: [PATCH 04/68] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9B=BE=E7=94=9F?= =?UTF-8?q?=E5=9B=BEinit=5Fimages=E5=AD=97=E6=AE=B5=E4=B8=BA=E7=A9=BA?= =?UTF-8?q?=E7=9A=84bug=20=E8=BF=87=E6=BB=A4=E5=9B=BE=E7=94=9F=E5=9B=BE?= =?UTF-8?q?=E6=8F=90=E7=A4=BA=E8=AF=8D=E4=B8=AD=E7=9A=84=E9=A2=9D=E5=A4=96?= =?UTF-8?q?=E5=AD=97=E7=AC=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drawing/sdwebui.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drawing/sdwebui.py b/drawing/sdwebui.py index 6ed7fc6c..e07107db 100644 --- a/drawing/sdwebui.py +++ b/drawing/sdwebui.py @@ -58,6 +58,10 @@ async def text_to_img(self, prompt): return [Image(base64=i) for i in r.get('images', [])] async def img_to_img(self, init_images: List[Image], prompt=''): + # 需要调用get_bytes方法,才能获取到base64字段内容 + for x in init_images: await x.get_bytes() + # 消息链显示字符串中有“[图片]”字样,需要过滤 + prompt = prompt.replace("[图片]", "") payload = { 'init_images': [x.base64 for x in init_images], 'enable_hr': 'false', From 4862152ec5dd26ad94ee4865a3d9b427c523de88 Mon Sep 17 00:00:00 2001 From: Milin <417156994@qq.com> Date: Tue, 23 May 2023 10:20:20 +0800 Subject: [PATCH 05/68] Add config to fix conversation error. Reset conversation after timeout. --- config.py | 3 +++ conversation.py | 15 +++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/config.py b/config.py index 5c24b78c..7d7dd464 100644 --- a/config.py +++ b/config.py @@ -434,6 +434,9 @@ class System(BaseModel): accept_friend_request: bool = False """自动接收好友请求""" + auto_reset_timeout_seconds: int = 8 * 3600 + """会话闲置多长时间后会重置, -1 不重置""" + class BaiduCloud(BaseModel): check: bool = False diff --git a/conversation.py b/conversation.py index c2293044..f412268b 100644 --- a/conversation.py +++ b/conversation.py @@ -1,5 +1,6 @@ import asyncio import contextlib +import time from datetime import datetime from typing import List, Dict, Optional @@ -73,6 +74,8 @@ def __init__(self, _type: str, session_id: str): self.last_resp = '' + self.last_resp_time = -1 + self.switch_renderer() if config.text_to_speech.always: @@ -144,10 +147,12 @@ def switch_renderer(self, mode: Optional[str] = None): async def reset(self): await self.adapter.on_reset() self.last_resp = '' + self.last_resp_time = -1 yield config.response.reset @retry((httpx.ConnectError, httpx.ConnectTimeout, TimeoutError)) async def ask(self, prompt: str, chain: MessageChain = None, name: str = None): + await self.check_and_reset() # 检查是否为 画图指令 for prefix in config.trigger.prefix_image: if prompt.startswith(prefix) and not isinstance(self.adapter, YiyanAdapter): @@ -190,6 +195,7 @@ async def ask(self, prompt: str, chain: MessageChain = None, name: str = None): else: yield await self.renderer.render(item) self.last_resp = item or '' + self.last_resp_time = int(time.time()) yield await self.renderer.result() async def rollback(self): @@ -235,6 +241,15 @@ def delete_message(self, respond_msg): # TODO: adapt to all platforms pass + async def check_and_reset(self): + timeout_seconds = config.system.auto_reset_timeout_seconds + current_time = time.time() + if timeout_seconds == -1 or self.last_resp_time == -1 or current_time - self.last_resp_time < timeout_seconds: + return + logger.debug(f"Reset conversation({self.session_id}) after {current_time - self.last_resp_time} seconds.") + async for _resp in self.reset(): + logger.debug(_resp) + class ConversationHandler: """ From d43663575f0691eb72b05a6e28a26f13af9b35ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 02:10:38 +0000 Subject: [PATCH 06/68] chore(deps): update openai requirement from ~=0.27.4 to ~=0.27.7 Updates the requirements on [openai](https://github.com/openai/openai-python) to permit the latest version. - [Release notes](https://github.com/openai/openai-python/releases) - [Commits](https://github.com/openai/openai-python/compare/v0.27.4...v0.27.7) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3794aa23..3d736ed7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ python-markdown-math~=0.8 pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 -openai~=0.27.4 +openai~=0.27.7 EdgeGPT==0.3.8.1 aiohttp~=3.8.4 OpenAIAuth~=0.3.6 From fd5feb2218a2fdd7a6cdce17393575e35b9d2409 Mon Sep 17 00:00:00 2001 From: Bitwise <134407644+B17w153@users.noreply.github.com> Date: Fri, 26 May 2023 00:54:53 +0800 Subject: [PATCH 07/68] =?UTF-8?q?doc:=20=E6=8F=90=E4=BE=9B=E4=B8=80?= =?UTF-8?q?=E7=A7=8D=E6=96=B0=E7=9A=84=E9=83=A8=E7=BD=B2=E6=80=9D=E8=B7=AF?= =?UTF-8?q?=EF=BC=8C=E6=96=B9=E5=BC=8F=E5=92=8C=E4=B8=80=E9=94=AE=E9=83=A8?= =?UTF-8?q?=E7=BD=B2=E8=84=9A=E6=9C=AC=E3=80=82=20(#878)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/README.md b/README.md index db689e11..1741e94c 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,21 @@ 如果你是手机党,可以看这个纯用手机的部署教程(使用 Linux 服务器):https://www.bilibili.com/video/av949514538 + +
+ AidLux: 仅使用旧安卓手机进行部署 +执行下面这行命令启动自动安装脚本。 + +```bash +bash -c "$(wget -O- https://gist.githubusercontent.com/B17w153/f77c2726c4eca4e05b488f9af58823a5/raw/4410356eba091d3259c48506fb68112e68db729b/install_bot_aidlux.sh)" +``` +[部署教程](https://github.com/lss233/chatgpt-for-bot-docs/tree/main/bu-shu-jiao-cheng/kuai-su-bu-shu-jiao-cheng/linux-yi-jian-bu-shu-jiao-cheng.md) + + +
+ + +
Linux: 通过快速部署脚本部署 (新人推荐) 执行下面这行命令启动自动部署脚本。 From 6008bf9688d2e318c692bddaf6cf6808e3ceb96c Mon Sep 17 00:00:00 2001 From: Cloxl <88774611+Cloxl@users.noreply.github.com> Date: Tue, 30 May 2023 18:17:45 +0800 Subject: [PATCH 08/68] fix poe gpt 4 --- adapter/quora/poe.py | 1 + constants.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/adapter/quora/poe.py b/adapter/quora/poe.py index 8f0344aa..04b6659d 100644 --- a/adapter/quora/poe.py +++ b/adapter/quora/poe.py @@ -32,6 +32,7 @@ def parse(bot_name: str): if str(bot.name).lower() == tmp_name or str(bot.value).lower() == tmp_name or f"poe-{str(bot.name).lower()}" == tmp_name + or f"poe-{str(bot.value).lower()}" == tmp_name ), None, ) diff --git a/constants.py b/constants.py index 9d6f6b2c..d1c87898 100644 --- a/constants.py +++ b/constants.py @@ -12,7 +12,7 @@ class LlmName(Enum): SlackClaude = "slack-claude" PoeSage = "poe-sage" - PoeGPT4 = "poe-gpt4" + PoeGPT4 = "poe-beaver" PoeClaude2 = "poe-claude2" PoeClaude = "poe-claude" PoeClaude100k = "poe-claude100k" From 6a854a264f28b451230d5c6c6b3d334f60fcbd80 Mon Sep 17 00:00:00 2001 From: Cloxl <88774611+Cloxl@users.noreply.github.com> Date: Tue, 30 May 2023 18:32:37 +0800 Subject: [PATCH 09/68] =?UTF-8?q?poe:beaver=E2=86=92gpt4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/constants.py b/constants.py index d1c87898..9d6f6b2c 100644 --- a/constants.py +++ b/constants.py @@ -12,7 +12,7 @@ class LlmName(Enum): SlackClaude = "slack-claude" PoeSage = "poe-sage" - PoeGPT4 = "poe-beaver" + PoeGPT4 = "poe-gpt4" PoeClaude2 = "poe-claude2" PoeClaude = "poe-claude" PoeClaude100k = "poe-claude100k" From abbf4513054a139afe56ef2ed0b66eece8911424 Mon Sep 17 00:00:00 2001 From: Dark Litss <8984680+lss233@users.noreply.github.com> Date: Wed, 31 May 2023 18:22:32 +0800 Subject: [PATCH 10/68] feat-upgrade-poe-api --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3d736ed7..9fd4dbbb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.3.1 +poe-api~=0.4.3 regex~=2023.3.23 httpx From 2fea9e302251064d3d7f8439676067216219bb91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 02:04:20 +0000 Subject: [PATCH 11/68] chore(deps): update bingimagecreator requirement from ~=0.1.5 to ~=0.4.1 Updates the requirements on [bingimagecreator](https://github.com/acheong08/BingImageCreator) to permit the latest version. - [Release notes](https://github.com/acheong08/BingImageCreator/releases) - [Commits](https://github.com/acheong08/BingImageCreator/compare/0.1.5...0.4.1) --- updated-dependencies: - dependency-name: bingimagecreator dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9fd4dbbb..fae7bb62 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ EdgeGPT==0.3.8.1 aiohttp~=3.8.4 OpenAIAuth~=0.3.6 urllib3~=1.26.15 -BingImageCreator~=0.1.5 +BingImageCreator~=0.4.1 requests~=2.28.2 uuid~=1.30 From 1eff396e1f23791dba888b91ba703d9ba3d9a4fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 02:07:27 +0000 Subject: [PATCH 12/68] chore(deps): update requests requirement from ~=2.28.2 to ~=2.31.0 Updates the requirements on [requests](https://github.com/psf/requests) to permit the latest version. - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.28.2...v2.31.0) --- updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fae7bb62..d9d14cd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ OpenAIAuth~=0.3.6 urllib3~=1.26.15 BingImageCreator~=0.4.1 -requests~=2.28.2 +requests~=2.31.0 uuid~=1.30 python-telegram-bot==20.3 From 0c1ffe94a62604f4bc30e14947f3931769ff4534 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 02:04:08 +0000 Subject: [PATCH 13/68] chore(deps): bump edgegpt from 0.3.8.1 to 0.8.1 Bumps [edgegpt](https://github.com/acheong08/EdgeGPT) from 0.3.8.1 to 0.8.1. - [Release notes](https://github.com/acheong08/EdgeGPT/releases) - [Commits](https://github.com/acheong08/EdgeGPT/compare/0.3.8.1...0.8.1) --- updated-dependencies: - dependency-name: edgegpt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d9d14cd9..6e0aec70 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 openai~=0.27.7 -EdgeGPT==0.3.8.1 +EdgeGPT==0.8.1 aiohttp~=3.8.4 OpenAIAuth~=0.3.6 urllib3~=1.26.15 From e0c523355e032f6636e3631432512ceea18a0985 Mon Sep 17 00:00:00 2001 From: Elijah Tan <60286615+Elijah-0616@users.noreply.github.com> Date: Sat, 10 Jun 2023 12:10:04 +0800 Subject: [PATCH 14/68] upgrade-poe-api --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6e0aec70..91d10fc2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.3 +poe-api~=0.4.6 regex~=2023.3.23 httpx From b30390067033474d24d0a50995f4cf40e0010bec Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 19:50:39 +0800 Subject: [PATCH 15/68] fix: bing drawing --- adapter/ms/bing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index 1c322110..0c20882e 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -129,8 +129,8 @@ async def text_to_img(self, prompt: str): logger.debug(f"[Bing Image] Prompt: {prompt}") try: async with ImageGenAsync( - next((cookie['value'] for cookie in self.bot.cookies if cookie['name'] == '_U'), None), - False + all_cookies=self.bot.chat_hub.cookies, + quiet=True ) as image_generator: images = await image_generator.get_images(prompt) From cfcb8c37e6aaa24fb9ac4cd4b94d2740dd67c125 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 21:58:51 +0800 Subject: [PATCH 16/68] feat: password login is back --- manager/bot.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/manager/bot.py b/manager/bot.py index 97ead534..b57740dc 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -5,7 +5,7 @@ from typing import List, Dict from urllib.parse import urlparse -import OpenAIAuth +import httpx import openai import regex import requests @@ -312,8 +312,8 @@ async def login_openai(self): # sourcery skip: raise-specific-error bot.account = account logger.success("登录成功!", i=i + 1) counter = counter + 1 - except OpenAIAuth.Error as e: - logger.error("登录失败! 请检查 IP 、代理或者账号密码是否正确{exc}", exc=e) + except httpx.HTTPStatusError as e: + logger.error("登录失败! 可能是账号密码错误,或者 Endpoint 不支持 该登录方式。{exc}", exc=e) except (ConnectTimeout, RequestException, SSLError, urllib3.exceptions.MaxRetryError, ClientConnectorError) as e: logger.error("登录失败! 连接 OpenAI 服务器失败,请更换代理节点重试!{exc}", exc=e) except APIKeyNoFundsError: @@ -431,18 +431,26 @@ def get_access_token(): if cached_account.get('password'): logger.info("尝试使用 email + password 登录中...") - logger.warning("警告:该方法已不推荐使用,建议使用 access_token 登录。") config.pop('access_token', None) config.pop('session_token', None) config['email'] = cached_account.get('email') config['password'] = cached_account.get('password') - bot = V1Chatbot(config=config) - self.__save_login_cache(account=account, cache={ - "session_token": bot.config.get('session_token'), - "access_token": get_access_token() - }) - if await __V1_check_auth(): - return ChatGPTBrowserChatbot(bot, account.mode) + async with httpx.AsyncClient(proxies=config.get('proxy', None)) as client: + resp = await client.post( + url=f"{V1.BASE_URL}login", + json={ + "username": config['email'], + "password": config['password'], + }, + ) + resp.raise_for_status() + config['access_token'] = resp.json().get('accessToken') + self.__save_login_cache(account=account, cache={ + "access_token": config['access_token'] + }) + bot = V1Chatbot(config=config) + if await __V1_check_auth(): + return ChatGPTBrowserChatbot(bot, account.mode) # Invalidate cache self.__save_login_cache(account=account, cache={}) raise Exception("All login method failed") From 3fa9fbbe707aa096da7877d71f6fa41e74c95cfa Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 22:11:07 +0800 Subject: [PATCH 17/68] fix: remaining time calcuation --- adapter/chatgpt/web.py | 14 ++++++++++---- chatbot/chatgpt.py | 2 ++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/adapter/chatgpt/web.py b/adapter/chatgpt/web.py index 1d7c3603..fc50b9b9 100644 --- a/adapter/chatgpt/web.py +++ b/adapter/chatgpt/web.py @@ -1,6 +1,7 @@ import datetime from typing import Generator, Union +import revChatGPT from loguru import logger from adapter.botservice import BotAdapter @@ -40,6 +41,9 @@ def __init__(self, session_id: str = "unknown"): if self.bot.account.paid: self.supported_models.append('text-davinci-002-render-paid') self.supported_models.append('gpt-4') + self.supported_models.append('gpt-4-mobile') + self.supported_models.append('gpt-4-browsing') + self.supported_models.append('gpt-4-plugins') async def switch_model(self, model_name): if ( @@ -92,20 +96,22 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: # 确保是当前的会话,才更新 parent_id if self.conversation_id == resp["conversation_id"]: self.parent_id = resp["parent_id"] + logger.debug("resp = " + str(resp)) yield resp["message"] if last_response: logger.debug(f"[ChatGPT-Web] {last_response['conversation_id']} - {last_response['message']}") except AttributeError as e: if str(e).startswith("'str' object has no attribute 'get'"): yield "出现故障,请发送”{reset}“重新开始!".format(reset=config.trigger.reset_command) - except V1Error as e: - if e.code == 2: + except revChatGPT.typings.Error as e: + if e.code == 429: current_time = datetime.datetime.now() self.bot.refresh_accessed_at() logger.debug(f"[ChatGPT-Web] accessed at: {str(self.bot.accessed_at)}") first_accessed_at = self.bot.accessed_at[0] if len(self.bot.accessed_at) > 0 \ - else current_time - datetime.timedelta(hours=1) - remaining = divmod(current_time - first_accessed_at, datetime.timedelta(seconds=60)) + else current_time + next_available_time = first_accessed_at + datetime.timedelta(hours=1) + remaining = divmod(next_available_time - current_time, datetime.timedelta(seconds=60)) minute = remaining[0] second = remaining[1].seconds raise BotRatelimitException(f"{minute}分{second}秒") from e diff --git a/chatbot/chatgpt.py b/chatbot/chatgpt.py index b26bafad..85572688 100644 --- a/chatbot/chatgpt.py +++ b/chatbot/chatgpt.py @@ -44,6 +44,8 @@ def refresh_accessed_at(self): current_time = datetime.datetime.now() while len(self.accessed_at) > 0 and current_time - self.accessed_at[0] > datetime.timedelta(hours=1): self.accessed_at.pop(0) + if len(self.accessed_at) == 0: + self.accessed_at.append(current_time) async def delete_conversation(self, conversation_id): await self.bot.delete_conversation(conversation_id) From 471491ff5fbc9ce5f247310884889fa67e638539 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 22:12:26 +0800 Subject: [PATCH 18/68] chore(deps): bump revChatGPT to 6.1.4 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 91d10fc2..a19c264b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT==4.2.3 +revChatGPT==6.1.4 toml~=0.10.2 Pillow>=9.3.0 tinydb~=4.7.1 From 157249a7904fc357f8b8d1260428a882a6f2f6f6 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 22:37:30 +0800 Subject: [PATCH 19/68] fix: yiyan is working again --- adapter/baidu/yiyan.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/adapter/baidu/yiyan.py b/adapter/baidu/yiyan.py index b947031e..9ce6157e 100644 --- a/adapter/baidu/yiyan.py +++ b/adapter/baidu/yiyan.py @@ -81,6 +81,7 @@ async def new_conversation(self, prompt: str): json={ "sessionName": prompt, "timestamp": get_ts(), + "plugins": [], "deviceType": "pc" } ) @@ -118,6 +119,9 @@ async def ask(self, prompt) -> Generator[str, None, None]: "deviceType": "pc", "code": 0, "msg": "", + "plugins": [], + "pluginInfo": "", + "jt": "", "sign": self.client.headers['Acs-Token'] } ) @@ -184,7 +188,7 @@ def __check_response(self, resp): async def get_sign(self): # 目前只需要这一个参数来计算 Acs-Token self.acs_client.headers['Cookie'] = f"BAIDUID={self.account.BAIDUID};" - req = await self.acs_client.get("https://chatgpt-proxy.lss233.com/yiyan-api/acs") + req = await self.acs_client.get("https://chatgpt-proxy.lss233.com/yiyan-api/acs", timeout=30) return req.json()['acs'] async def __download_image(self, url: str) -> bytes: From cccf0045cf2aa3617696d5f7e76633f0c06400e2 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 23:54:26 +0800 Subject: [PATCH 20/68] feat: support xinghuo --- adapter/xunfei/xinghuo.py | 117 ++++++++++++++++++++++++++++++++++++++ config.py | 18 +++++- constants.py | 1 + conversation.py | 3 + manager/bot.py | 30 +++++++++- 5 files changed, 167 insertions(+), 2 deletions(-) create mode 100644 adapter/xunfei/xinghuo.py diff --git a/adapter/xunfei/xinghuo.py b/adapter/xunfei/xinghuo.py new file mode 100644 index 00000000..14d09709 --- /dev/null +++ b/adapter/xunfei/xinghuo.py @@ -0,0 +1,117 @@ +from io import BytesIO + +from typing import Generator + +from adapter.botservice import BotAdapter +from config import XinghuoCookiePath +from constants import botManager +from exceptions import BotOperationNotSupportedException +from loguru import logger +import httpx +import base64 +from PIL import Image + + +class XinghuoAdapter(BotAdapter): + """ + Credit: https://github.com/dfvips/xunfeixinghuo + """ + account: XinghuoCookiePath + client: httpx.AsyncClient + + def __init__(self, session_id: str = ""): + super().__init__(session_id) + self.session_id = session_id + self.account = botManager.pick('xinghuo-cookie') + self.client = httpx.AsyncClient(proxies=self.account.proxy) + self.__setup_headers(self.client) + self.conversation_id = None + self.parent_chat_id = '' + + async def delete_conversation(self, session_id): + return await self.client.post("https://xinghuo.xfyun.cn/iflygpt/u/chat-list/v1/del-chat-list", json={ + 'chatListId': session_id + }) + + async def rollback(self): + raise BotOperationNotSupportedException() + + async def on_reset(self): + await self.client.aclose() + self.client = httpx.AsyncClient(proxies=self.account.proxy) + self.__setup_headers(self.client) + self.conversation_id = None + self.parent_chat_id = 0 + + def __setup_headers(self, client): + client.headers['Cookie'] = f"ssoSessionId={self.account.ssoSessionId};" + client.headers[ + 'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' + client.headers['Sec-Fetch-User'] = '?1' + client.headers['Sec-Fetch-Mode'] = 'navigate' + client.headers['Sec-Fetch-Site'] = 'none' + client.headers['Sec-Ch-Ua-Platform'] = '"Windows"' + client.headers['Sec-Ch-Ua-Mobile'] = '?0' + client.headers['Sec-Ch-Ua'] = '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"' + client.headers['Origin'] = 'https://xinghuo.xfyun.cn' + client.headers['Referer'] = 'https://xinghuo.xfyun.cn/desk' + client.headers['Connection'] = 'keep-alive' + client.headers['X-Requested-With'] = 'XMLHttpRequest' + + async def new_conversation(self): + req = await self.client.post( + url="https://xinghuo.xfyun.cn/iflygpt/u/chat-list/v1/create-chat-list", + json={} + ) + req.raise_for_status() + self.__check_response(req.json()) + self.conversation_id = req.json()['data']['id'] + self.parent_chat_id = 0 + + async def ask(self, prompt) -> Generator[str, None, None]: + if not self.conversation_id: + await self.new_conversation() + + full_response = '' + async with self.client.stream( + "POST", + url="https://xinghuo.xfyun.cn/iflygpt/u/chat_message/chat", + data={ + 'fd': self.account.fd, + 'chatId': self.conversation_id, + 'text': prompt, + 'GtToken': self.account.GtToken, + 'clientType': '1' + }, + ) as req: + async for line in req.aiter_lines(): + if not line: + continue + if line == 'data:': + break + encoded_data = line[len("data:"):] + missing_padding = len(encoded_data) % 4 + if missing_padding != 0: + encoded_data += '=' * (4 - missing_padding) + decoded_data = base64.b64decode(encoded_data).decode('utf-8') + if encoded_data != 'zw': + decoded_data = decoded_data.replace('\n\n', '\n') + full_response += decoded_data + yield full_response + + logger.debug(f"[Xinghuo] {self.conversation_id} - {full_response}") + + async def preset_ask(self, role: str, text: str): + if role.endswith('bot') or role in {'assistant', 'xinghuo'}: + logger.debug(f"[预设] 响应:{text}") + yield text + else: + logger.debug(f"[预设] 发送:{text}") + item = None + async for item in self.ask(text): ... + if item: + logger.debug(f"[预设] Chatbot 回应:{item}") + + def __check_response(self, resp): + if int(resp['code']) != 0: + raise Exception(resp['msg']) \ No newline at end of file diff --git a/config.py b/config.py index 7d7dd464..d6aac5fc 100644 --- a/config.py +++ b/config.py @@ -56,6 +56,7 @@ class HttpService(BaseModel): debug: bool = False """是否开启debug,错误时展示日志""" + class WecomBot(BaseModel): host: str = "0.0.0.0" """企业微信回调地址,需要能够被公网访问,0.0.0.0则不限制访问地址""" @@ -73,7 +74,7 @@ class WecomBot(BaseModel): """企业微信应用 API 令牌 的 Token""" encoding_aes_key: str """企业微信应用 API 令牌 的 EncodingAESKey""" - + class OpenAIGPT3Params(BaseModel): temperature: float = 0.5 @@ -219,11 +220,25 @@ class YiyanCookiePath(BaseModel): """可选的代理地址,留空则检测系统代理""" +class XinghuoCookiePath(BaseModel): + ssoSessionId: str + """星火 Cookie 中的 ssoSessionId 字段""" + fd: Optional[str] = "" + """星火请求中的 fd 字段""" + GtToken: Optional[str] = "" + """星火请求中的 GtToken 字段""" + proxy: Optional[str] = None + """可选的代理地址,留空则检测系统代理""" + class YiyanAuths(BaseModel): accounts: List[YiyanCookiePath] = [] """文心一言的账号列表""" +class XinghuoAuths(BaseModel): + accounts: List[XinghuoCookiePath] = [] + """讯飞星火大模型的账号列表""" + class ChatGLMAPI(BaseModel): api_endpoint: str """自定义 ChatGLM API 的接入点""" @@ -520,6 +535,7 @@ class Config(BaseModel): chatglm: ChatGLMAuths = ChatGLMAuths() poe: PoeAuths = PoeAuths() slack: SlackAuths = SlackAuths() + xinghuo: XinghuoAuths = XinghuoAuths() # === Response Settings === text_to_image: TextToImage = TextToImage() diff --git a/constants.py b/constants.py index 9d6f6b2c..868b2e84 100644 --- a/constants.py +++ b/constants.py @@ -28,6 +28,7 @@ class LlmName(Enum): Bard = "bard" YiYan = "yiyan" ChatGLM = "chatglm-api" + XunfeiXinghuo = "xinghuo" class BotPlatform(Enum): diff --git a/conversation.py b/conversation.py index f412268b..515c5508 100644 --- a/conversation.py +++ b/conversation.py @@ -18,6 +18,7 @@ from adapter.claude.slack import ClaudeInSlackAdapter from adapter.google.bard import BardAdapter from adapter.ms.bing import BingAdapter +from adapter.xunfei.xinghuo import XinghuoAdapter from drawing import DrawingAPI, SDWebUI as SDDrawing, OpenAI as OpenAIDrawing from adapter.quora.poe import PoeBot, PoeAdapter from adapter.thudm.chatglm_6b import ChatGLM6BAdapter @@ -107,6 +108,8 @@ def __init__(self, _type: str, session_id: str): self.adapter = ChatGLM6BAdapter(self.session_id) elif _type == LlmName.SlackClaude.value: self.adapter = ClaudeInSlackAdapter(self.session_id) + elif _type == LlmName.XunfeiXinghuo.value: + self.adapter = XinghuoAdapter(self.session_id) else: raise BotTypeNotFoundException(_type) self.type = _type diff --git a/manager/bot.py b/manager/bot.py index b57740dc..433b75bd 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -23,7 +23,7 @@ import utils.network as network from chatbot.chatgpt import ChatGPTBrowserChatbot from config import OpenAIAuthBase, OpenAIAPIKey, Config, BingCookiePath, BardCookiePath, YiyanCookiePath, ChatGLMAPI, \ - PoeCookieAuth, SlackAuths, SlackAppAccessToken + PoeCookieAuth, SlackAppAccessToken, XinghuoCookiePath from exceptions import NoAvailableBotException, APIKeyNoFundsError @@ -37,6 +37,7 @@ class BotManager: "bing-cookie": [], "bard-cookie": [], "yiyan-cookie": [], + "xinghuo-cookie": [], "slack-accesstoken": [], } """Bot list""" @@ -62,6 +63,9 @@ class BotManager: slack: List[SlackAppAccessToken] """Slack Account Infos""" + xinghuo: List[XinghuoCookiePath] + """Xinghuo Account Infos""" + roundrobin: Dict[str, itertools.cycle] = {} def __init__(self, config: Config) -> None: @@ -73,6 +77,7 @@ def __init__(self, config: Config) -> None: self.yiyan = config.yiyan.accounts if config.yiyan else [] self.chatglm = config.chatglm.accounts if config.chatglm else [] self.slack = config.slack.accounts if config.slack else [] + self.xinghuo = config.xinghuo.accounts if config.xinghuo else [] try: os.mkdir('data') logger.warning( @@ -89,6 +94,7 @@ async def login(self): "bing-cookie": [], "bard-cookie": [], "yiyan-cookie": [], + "xinghuo-cookie": [], "chatglm-api": [], "slack-accesstoken": [], } @@ -101,6 +107,8 @@ async def login(self): self.login_bard() if len(self.slack) > 0: self.login_slack() + if len(self.xinghuo) > 0: + self.login_xinghuo() if len(self.openai) > 0: # 考虑到有人会写错全局配置 for account in self.config.openai.accounts: @@ -164,6 +172,8 @@ async def login(self): self.config.response.default_ai = 'yiyan' elif len(self.bots['chatglm-api']) > 0: self.config.response.default_ai = 'chatglm-api' + elif len(self.bots['xinghuo-cookie']) > 0: + self.config.response.default_ai = 'xinghuo' elif len(self.bots['slack-accesstoken']) > 0: self.config.response.default_ai = 'slack-claude' else: @@ -237,6 +247,22 @@ def login_slack(self): logger.error("所有 Claude (Slack) 账号均解析失败!") logger.success(f"成功解析 {len(self.bots['slack-accesstoken'])}/{len(self.slack)} 个 Claude (Slack) 账号!") + def login_xinghuo(self): + try: + for i, account in enumerate(self.xinghuo): + logger.info("正在解析第 {i} 个 讯飞星火 账号", i=i + 1) + if proxy := self.__check_proxy(account.proxy): + account.proxy = proxy + self.bots["xinghuo-cookie"].append(account) + logger.success("解析成功!", i=i + 1) + except Exception as e: + logger.error("解析失败:") + logger.exception(e) + if len(self.bots["xinghuo-cookie"]) < 1: + logger.error("所有 讯飞星火 账号均解析失败!") + logger.success(f"成功解析 {len(self.bots['xinghuo-cookie'])}/{len(self.xinghuo)} 个 讯飞星火 账号!") + + def login_poe(self): from adapter.quora.poe import PoeClientWrapper try: @@ -501,4 +527,6 @@ def bots_info(self): bot_info += f"* {LlmName.PoeNeevaAI.value} : POE NeevaAI 模型\n" if len(self.bots['slack-accesstoken']) > 0: bot_info += f"* {LlmName.SlackClaude.value} : Slack Claude 模型\n" + if len(self.bots['xinghuo-cookie']) > 0: + bot_info += f"* {LlmName.XunfeiXinghuo.value} : 星火大模型\n" return bot_info From 9205c52389d578039532cbbf11564b5fdb49734e Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sat, 10 Jun 2023 23:54:50 +0800 Subject: [PATCH 21/68] feat: allow bing to use preset --- adapter/ms/bing.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index 0c20882e..5db0b9c6 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -156,4 +156,13 @@ async def __download_image(self, url) -> GraiaImage: return GraiaImage(data_bytes=await resp.read()) async def preset_ask(self, role: str, text: str): - yield None # Bing 不使用预设功能 + if role.endswith('bot') or role in {'assistant', 'bing'}: + logger.debug(f"[预设] 响应:{text}") + yield text + else: + logger.debug(f"[预设] 发送:{text}") + item = None + async for item in self.ask(text): ... + if item: + logger.debug(f"[预设] Chatbot 回应:{item}") + From 4b537e3b1daee9d5c3802e3b1d589cad03e74faf Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 00:07:57 +0800 Subject: [PATCH 22/68] feat: check exp time before checking conversation --- manager/bot.py | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/manager/bot.py b/manager/bot.py index 433b75bd..92322a76 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -1,3 +1,4 @@ +import datetime import hashlib import itertools import os @@ -5,6 +6,9 @@ from typing import List, Dict from urllib.parse import urlparse +import base64 +import json +import time import httpx import openai import regex @@ -424,18 +428,43 @@ async def __login_V1(self, account: OpenAIAuthBase) -> ChatGPTBrowserChatbot: if cached_account.get('model'): # Ready for backward-compatibility & forward-compatibility config['model'] = cached_account.get('model') + def get_access_token(): + return bot.session.headers.get('Authorization').removeprefix('Bearer ') + # 我承认这部分代码有点蠢 async def __V1_check_auth() -> bool: try: + access_token = get_access_token() + _, payload, _ = access_token.split(".") + + # Decode the payload using base64 decoding + payload_data = base64.urlsafe_b64decode(payload + "=" * ((4 - len(payload) % 4) % 4)) + + # Parse the JSON string to get the payload as a dictionary + payload_dict = json.loads(payload_data) + + # Check the "exp" key in the payload dictionary to get the expiration time + exp_time = payload_dict["exp"] + email = payload_dict["https://api.openai.com/profile"]['email'] + + # Convert the expiration time to a Unix timestamp + exp_timestamp = int(exp_time) + + # Compare the current time (also in Unix timestamp format) to the expiration time to check if the token has expired + current_timestamp = int(time.time()) + if current_timestamp >= exp_timestamp: + logger.error(f"[ChatGPT-Web] - {email} 的 access_token 已过期") + return False + else: + remaining_seconds = exp_timestamp - current_timestamp + remaining_days = remaining_seconds // (24 * 60 * 60) + logger.info(f"[ChatGPT-Web] - {email} 的 access_token 还有 {remaining_days} 天过期") await bot.get_conversations(0, 1) return True except (V1Error, KeyError) as e: logger.error(e) return False - def get_access_token(): - return bot.session.headers.get('Authorization').removeprefix('Bearer ') - if cached_account.get('access_token'): logger.info("尝试使用 access_token 登录中...") config['access_token'] = cached_account.get('access_token') @@ -461,7 +490,7 @@ def get_access_token(): config.pop('session_token', None) config['email'] = cached_account.get('email') config['password'] = cached_account.get('password') - async with httpx.AsyncClient(proxies=config.get('proxy', None)) as client: + async with httpx.AsyncClient(proxies=config.get('proxy', None), timeout=60, trust_env=True) as client: resp = await client.post( url=f"{V1.BASE_URL}login", json={ From 81d48740feaaf6012d02ec086909b2943fbf5ad6 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 00:10:01 +0800 Subject: [PATCH 23/68] fix(xinghuo): add geeError notice --- adapter/xunfei/xinghuo.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/adapter/xunfei/xinghuo.py b/adapter/xunfei/xinghuo.py index 14d09709..4a4c0121 100644 --- a/adapter/xunfei/xinghuo.py +++ b/adapter/xunfei/xinghuo.py @@ -89,6 +89,9 @@ async def ask(self, prompt) -> Generator[str, None, None]: continue if line == 'data:': break + if line == 'data:[geeError]': + yield "错误:出现验证码,请到星火网页端发送一次消息再试。" + break encoded_data = line[len("data:"):] missing_padding = len(encoded_data) % 4 if missing_padding != 0: From 558f08ed3d48ee7b59dd88c78de4217efd5f7ce0 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 00:35:01 +0800 Subject: [PATCH 24/68] fix: update message of request too many --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index d6aac5fc..18ae7f88 100644 --- a/config.py +++ b/config.py @@ -384,7 +384,7 @@ class Response(BaseModel): error_session_authenciate_failed: str = "身份验证失败!无法登录至 ChatGPT 服务器,请检查账号信息是否正确!\n{exc}" """发生网络错误时发送的消息,请注意可以插入 {exc} 作为异常占位符""" - error_request_too_many: str = "糟糕!当前收到的请求太多了,我需要一段时间冷静冷静。你可以选择“重置会话”,或者过一会儿再来找我!\n预计恢复时间:{exc}\n" + error_request_too_many: str = "糟糕!当前 ChatGPT 接入点收到的请求太多了,我需要一段时间冷静冷静。请过一会儿再来找我!\n预计恢复时间:{exc}(Code: 429)\n" error_request_concurrent_error: str = "当前有其他人正在和我进行聊天,请稍后再给我发消息吧!" From 7c51626275b39bcd5f31a047372c1be12b373488 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 00:48:29 +0800 Subject: [PATCH 25/68] chore(deps): bump OpenAIAuth to 1.0.2 --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index a19c264b..d4734144 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,11 +15,11 @@ pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 openai~=0.27.7 -EdgeGPT==0.8.1 +EdgeGPT==0.8.2 aiohttp~=3.8.4 -OpenAIAuth~=0.3.6 +OpenAIAuth~=1.0.2 urllib3~=1.26.15 -BingImageCreator~=0.4.1 +BingImageCreator~=0.4.2 requests~=2.31.0 uuid~=1.30 From da8a470f2ac757d56db5214d0ab17587d4e59708 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 14:24:22 +0800 Subject: [PATCH 26/68] fix: remove unused logs --- adapter/chatgpt/web.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adapter/chatgpt/web.py b/adapter/chatgpt/web.py index fc50b9b9..35a3fb86 100644 --- a/adapter/chatgpt/web.py +++ b/adapter/chatgpt/web.py @@ -96,7 +96,7 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: # 确保是当前的会话,才更新 parent_id if self.conversation_id == resp["conversation_id"]: self.parent_id = resp["parent_id"] - logger.debug("resp = " + str(resp)) + yield resp["message"] if last_response: logger.debug(f"[ChatGPT-Web] {last_response['conversation_id']} - {last_response['message']}") From 5b4b6ab0ab6b5044313e04d2de363e5ae296cf89 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 14:24:39 +0800 Subject: [PATCH 27/68] fix(bing): force locale --- adapter/ms/bing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index 5db0b9c6..073ac958 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -62,7 +62,8 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: try: async for final, response in self.bot.ask_stream(prompt=prompt, conversation_style=self.conversation_style, - wss_link=config.bing.wss_link): + wss_link=config.bing.wss_link, + locale="zh-cn"): if not response: continue From b9c2b8a3ec91c7bec7b92b02bc36e74712944de1 Mon Sep 17 00:00:00 2001 From: lss233 <8984680+lss233@users.noreply.github.com> Date: Sun, 11 Jun 2023 14:24:58 +0800 Subject: [PATCH 28/68] fix(xinghuo): add default GtToken --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index 18ae7f88..60369dbc 100644 --- a/config.py +++ b/config.py @@ -225,7 +225,7 @@ class XinghuoCookiePath(BaseModel): """星火 Cookie 中的 ssoSessionId 字段""" fd: Optional[str] = "" """星火请求中的 fd 字段""" - GtToken: Optional[str] = "" + GtToken: Optional[str] = "R0VFAAYyNDAzOTU0YzM5Y2M0ZTRlNDY2MTE2MDA4ZGZlYjZjMGQzNGMyMGY0YjQ1NTA1NDg3OWQ0ZWJlOTk0NzQxNGI1MWUzM2IzZDUyZTEyMGM3MWYxNjlmNWY2YmYwMWMxNDI2YzIxOTlmZjMzYTI5YmY3YjQ1M2RjZGQwZWNjMDdiYjMzMmY4OTE2OTRhYTk1OWIyZWVlNzFjNmI5ZWFmY2MxNDFkNjk2MWYzYWQ3ZDAyYjZkM2U0YTllYWZlOTM0Njc4NmMyZmQ4NTRiYWViMTI2NjhlZmFhMWRiNmRmMDc5MzQxN2EyYzMzZDhiN2M4NzJjMzQ3YTYwNDFiMGZkZjkxN2Q2OTRlOWFiZWMwN2U0ZTg3Y2UwM2UxNDlmODBjMzA0MmE4NTAyNzhiNjU0MTU3ZjBlMmMzN2UxMTQ0MjA3ZWE0MDIzZTMyNDRiMjJmMjcwYjE5NGZiMWJhMmFlNGQ4YzkxMWNmZmQ0OGQzYzBlYmQxMTk1ZjE5MDJmMTVjNWUyMDI3ZmNmMDI0ODIxYWJiMWZhNzc3MTExOTBiZmZhMWRhYmRlYzVhYTkwMGRlMjU2YjFhNGQ4ZGYwYzQ0ZjI4MGJiNzcyNGIyOTlkYjU0ZGMyYjllY2U1NjNlYjQzZWE5MzhkMmQ3NTFjMTVkMGY0NDNkYjdhNzdlMmQ4NzM1NTQ3NDI0ZDBjNzRmMTA0NzY4NmI2M2UwZWRiMDM0ZjNhODc1NGZkYjgxMDBlNDA0MmZlZDYzZmFlYmYyNTExMTI5NTIyOTg0ZDMzN2UxYTBhN2NiZWZlZGMxOTVjOWQ2MGVhOTMyY2E5M2VhYmZkODI1YjBiMzU0ZDViYzUzMmM5YzI5NjA2ZWU3MmFmNGYwNGRkNTlhNDEzYzJiZmYyODllZjBkNWJlNWU5ZjZkZWVlMjk4MDUyMTU2OTQwNzE3ZDQ5M2NlM2E4YmIwN2YyZjE4MzgzZmEwNjQxNGZlYmFlNzdmN2QwNTZlYTQ3NDEwMmNlZjU1YmZhNjNjMDM2MmI5OTU2NjBkZjg4YzFjYzA2MmY0NjU2OTE0ZGIwMWE3ODQxNjA2YjdlZWE3ZDJjZTM4NjE5YTcwYjg0MmVkZTBmM2Y1MzI3ZGI2YmU5M2ZjYTNiMzg4OTJkOGQ3NWI4Y2M4YjQ3NjBkNDExZmQ3ZmFlNGIxY2YwMGE5ZDk2MmM2ZDYzMWE1YmRjNmYzMmU0Y2U5MDYwOGNiMDMzMTlkZGE2ZDlkMGU4OGUwMzUwMDkwZTQ5MGRhMmY5ODU1MGU4ZmQ1ODc3NmQ0Yjg5MDM1Y2FiNTg3MjMyMGMwOTJmOTUyODkwYmQ3YjIwYTMzODI5Y2MwY2VlZTE0MWY5N2FiN2IzYmJjNDg3MWM0M2E3ZTViYWNjZWZiZjg4MjM1ZDRiNWMzMjBjM2IxNGM2ZWE2NWVkZjc0OWI0ZDNlNzZjOWYyMTkwZDM0ZTVkYTZkNjM1NjFmZWNmMWYyODIxMTMyNjIyOGFjMWU0MTA2NjY1OWQ4Y2JlZTRmMjIwYzI2NjNmNzYxYzBhZGEyY2VkZjkyNDkzZWExNzFhN2NhZThiNTMxNDNmNzEzM2RhY2UyOWNmYjQ4ZTk5YzE2YjcyM2ZmZTJjZDk5MjU0NGM5OWNhOTFlMDRlMWNiNTQ5ZjU4MGQxY2I4YWU5MWU0MDlmZDZmYjhjNGYzYTRmODA2ZWFiZjRlMDI3OWJmOTM4NmQwN2I5MTBmYzlkYzNjMGM2ODIzYjg4OWFjNWZkZjBhYWNjYzNhYmU0MDRmMTg3Y2Q0MGNmMjcyNWFmY2VkYzAzYmVjZGY2MmMzNWRkNzQ5MGExYjQ1MDdlNTczNDI1OTliYTJhMjNmM2FmNDg1NGM3ODZkYzBiZWIzYTllMGEwYWUyMTllNmZhNzYyN2YyNTI5ZDc3YzQ3MGY1YzIxNzI1NzhhM2EwYzM3NzM0NTM4MTlhYjE3ODJiNmRmOGM1NTI2YjQzZjUzNTZlNDVhM2Q5MDc4N2IwZGNkZTdmYmYzM2ZkMWQ2NGY2NjdmOWYzNDIzZjJkMmU2NzgyMTY5ZWM3MTE1Y2E3MDdlYWRhOGJmNzI0OTJmMGM3Y2QxNjJjMDI4NmFjOThmNDhmOWEyYWQzZDAwYzg5YmViYzA3NTA4ZjYwYzE1OGVmYjk5ZjBkOGY4MzQ1ODI5Yzg4Yzc0YTA3OGQyZjU5NTFjNmQzNTc1N2QyNjI0NWVjNTk0Y2JkMzc2YmVhMGNiZmEzMWYwZTA5MGRhYzhlYzNlYjQ0ZGIxN2M4MWE5NWY4MTE4MDAwNDJkMjQ2MmMzMjk2ODU5Yjg3ZjRhZmI1MDYxM2MxY2FiYTZkZDI0ODdiZDQ3MmVmNzBjMzFkN2YwNjZmZTMxOThiYzFhOWFlZjIwZTQzY2FlNDBkMDkxZWEzMmNiYTBhNDM0YmQ2ZDU2NDQ3YTU4YTNjODZjYTk0NjQ3MGNiZjM4ZjM3ZjU2YTZkZmQ4MDY0OWEyZGU3MzllN2EyZWE3M2RlNDE5NDljNmI4ODU2YmE5ZTM4Njc2YmRhNzA1MWE5MjlmMWU1YTczZjEwYTg2ZjgwNDJjZDQxZTMwYjVjMTA1ODYzNzlhMGY3NmRlOWExODZiZmU2N2Y5NzZhOTY3MTg0ZjNkYmFhYWU0YjdmNmFlMjM5MTlkNDljNDNiODc4MzRjMjA0MzY4YThkOGEyYzRkNjc3MzhkMTU0NmFiNTVjMWE0YTQ0Y2M3MzE5OGM4Y2YzOTAxZGI0ZGY1MzFmNGY5NTI4MDE5MjZjN2I2MDg1YjQzODI0YmFiMTQ3NTIxZTYwNWQzYzhmZjljYjNmOTRlNzg3MDJiYzc1MzE4NTRhN2M3ZDE2OWQyMzcyYjUzMDBhNGQzNzhhYWNjOTk3ZDM1ZTZjODYwZGQwMWNlYTMwZjU1YTFlMjQxMTMxMTQwZjQwMWJmZGJkNWU3NzA4OWE5YzljNDIzY2E2ODk3OGE2ODMwYWEzYTlkZGJiZmMyYTE3NGZhOTc4NmI3ZTYyYmIzNTZlNjRiMzBiYzI4ZDMyYTVjMDMxYzgxZjZlOGEyMGMwNWFlNjJlYWM2ZWExNDY5OTFiZjk1Yzc4NzQzMjMwYTIyNzk1MWRlMzI4NjFjYjU5ZGQ3N2QxOWQ5MTMxNDgwYmY2ZTgyYTkwNzgwMTBlYjAzMzIzYjcxNGY0NzM5NDNmY2MwNTM3ODJmOTIwMGFkNzlmNzZiNjkxNDdmZGQwOTdhZTUwMTk1YjE4M2Q2YWM5NjVmN2NkNDNhMGI3MTEwOTNkZTM5NGM3OTYwNjNlNTBhMDAyNzNkOTE2MzQzODY2MzFkZThkMzViYTUxNmI4MTIyZWZjNzE5MTU0OTQ2NTIyYzc0YjhmNTY2OTMwZDM3YmIwZjJkM2Q4ODgyZGQwZTU0YTcyODM1NmYyZDk2ZWVlNzZiYmZlYjI1YTFjM2ZhNTg5OGY5OTM0YTc4NTBjYzRlNjY4NjE5YWMzOTg2MmE5NDhjMDVhMTc0MzE0MjIwOGFhMjk5OGY2ZmIwMmZlZWI2YTk0M2Q1NzcyN2JhZWU4ZmY5NGFmZjgzZGVjMTUyZmYxOWVkYmM1Y2RiZDkzYzBiNDc1OTEzMjFhYTY4MjI1MDA4ODhmYWJhMzAzNjdlZmRjYmJjNzhjYzE5MWI1MDViNTlmMjBhY2RiYTYzMzQyYzE1YTI2M2NiOGE1NDQ3NzQ4ODU3YWYxMzllMDJlMzY0ODlkNjRlNTRiMTc5YTgwOGRmMWU5YTk1ODY2YzE2YTYzM2EyZmUyYjA2MzM4OTI5YTc4MmRlMGFkZDgwZDZiYWU3Y2M1ZjljMWEzYzA5MGU4MTVlNjc2MGJjMzA0ZWU3ZmY1MDM5OGRiNDc0YTJkNWMzYWVhNTMxZjc0ZDU3NGNhZGNhZTIzZmZiZjcyY2FhNmU5YTNjNjFhYzNiMDJjNDdjYzQzZGJhYjA2NTgwNTkyZmE5YjMyNGMxMGJhMGRjNjgzZWIyYzRiNDg4NzFiMjk2YmIxNDBhMWUyZWRlOTE0NmY3MThkZTE4ZWU0M2QwZTk4NWY3NWQ1YWYyYjlkNjU5ODM5YzQwZWFiMzg2" """星火请求中的 GtToken 字段""" proxy: Optional[str] = None """可选的代理地址,留空则检测系统代理""" From e8458d5c78de0c8217f500093cc906744f6c18c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 02:01:16 +0000 Subject: [PATCH 29/68] chore(deps): update aiocqhttp requirement from ~=1.4.3 to ~=1.4.4 Updates the requirements on [aiocqhttp](https://github.com/nonebot/aiocqhttp) to permit the latest version. - [Release notes](https://github.com/nonebot/aiocqhttp/releases) - [Changelog](https://github.com/nonebot/aiocqhttp/blob/master/docs/changelog.md) - [Commits](https://github.com/nonebot/aiocqhttp/compare/v1.4.3...v1.4.4) --- updated-dependencies: - dependency-name: aiocqhttp dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d4734144..5cc10645 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,7 +25,7 @@ requests~=2.31.0 uuid~=1.30 python-telegram-bot==20.3 -aiocqhttp~=1.4.3 +aiocqhttp~=1.4.4 tls-client python-dateutil~=2.8.2 discord.py From 83a6cd790762d6d0e13502d242a96658c6bfdc86 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 02:01:02 +0000 Subject: [PATCH 30/68] chore(deps): update pydantic requirement from ~=1.10.7 to ~=1.10.9 Updates the requirements on [pydantic](https://github.com/pydantic/pydantic) to permit the latest version. - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v1.10.7...v1.10.9) --- updated-dependencies: - dependency-name: pydantic dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5cc10645..6d946f43 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ tinydb~=4.7.1 loguru~=0.7.0 asyncio~=3.4.3 -pydantic~=1.10.7 +pydantic~=1.10.9 markdown~=3.4.3 python-markdown-math~=0.8 From 863c01c7328dccdfa095b5e2078a7f1d792ebe21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 02:01:12 +0000 Subject: [PATCH 31/68] chore(deps): update regex requirement from ~=2023.3.23 to ~=2023.6.3 Updates the requirements on [regex](https://github.com/mrabarnett/mrab-regex) to permit the latest version. - [Changelog](https://github.com/mrabarnett/mrab-regex/blob/hg/changelog.txt) - [Commits](https://github.com/mrabarnett/mrab-regex/compare/2023.3.23...2023.6.3) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6d946f43..8312c0be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,7 +32,7 @@ discord.py azure-cognitiveservices-speech poe-api~=0.4.6 -regex~=2023.3.23 +regex~=2023.6.3 httpx Quart==0.17.0 From 217b46306f4c5f6fdc50663a55433a54cd87d169 Mon Sep 17 00:00:00 2001 From: canxin121 <69547456+canxin121@users.noreply.github.com> Date: Mon, 12 Jun 2023 18:14:36 +0800 Subject: [PATCH 32/68] update --- adapter/xunfei/xinghuo.py | 6 ++++-- config.py | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/adapter/xunfei/xinghuo.py b/adapter/xunfei/xinghuo.py index 4a4c0121..3bdf6e4a 100644 --- a/adapter/xunfei/xinghuo.py +++ b/adapter/xunfei/xinghuo.py @@ -75,13 +75,15 @@ async def ask(self, prompt) -> Generator[str, None, None]: full_response = '' async with self.client.stream( "POST", - url="https://xinghuo.xfyun.cn/iflygpt/u/chat_message/chat", + url="https://xinghuo.xfyun.cn/iflygpt-chat/u/chat_message/chat", data={ 'fd': self.account.fd, 'chatId': self.conversation_id, 'text': prompt, 'GtToken': self.account.GtToken, - 'clientType': '1' + 'sid': self.account.sid, + 'clientType': '1', + 'isBot':'0' }, ) as req: async for line in req.aiter_lines(): diff --git a/config.py b/config.py index 60369dbc..ad8743e9 100644 --- a/config.py +++ b/config.py @@ -227,6 +227,8 @@ class XinghuoCookiePath(BaseModel): """星火请求中的 fd 字段""" GtToken: Optional[str] = "R0VFAAYyNDAzOTU0YzM5Y2M0ZTRlNDY2MTE2MDA4ZGZlYjZjMGQzNGMyMGY0YjQ1NTA1NDg3OWQ0ZWJlOTk0NzQxNGI1MWUzM2IzZDUyZTEyMGM3MWYxNjlmNWY2YmYwMWMxNDI2YzIxOTlmZjMzYTI5YmY3YjQ1M2RjZGQwZWNjMDdiYjMzMmY4OTE2OTRhYTk1OWIyZWVlNzFjNmI5ZWFmY2MxNDFkNjk2MWYzYWQ3ZDAyYjZkM2U0YTllYWZlOTM0Njc4NmMyZmQ4NTRiYWViMTI2NjhlZmFhMWRiNmRmMDc5MzQxN2EyYzMzZDhiN2M4NzJjMzQ3YTYwNDFiMGZkZjkxN2Q2OTRlOWFiZWMwN2U0ZTg3Y2UwM2UxNDlmODBjMzA0MmE4NTAyNzhiNjU0MTU3ZjBlMmMzN2UxMTQ0MjA3ZWE0MDIzZTMyNDRiMjJmMjcwYjE5NGZiMWJhMmFlNGQ4YzkxMWNmZmQ0OGQzYzBlYmQxMTk1ZjE5MDJmMTVjNWUyMDI3ZmNmMDI0ODIxYWJiMWZhNzc3MTExOTBiZmZhMWRhYmRlYzVhYTkwMGRlMjU2YjFhNGQ4ZGYwYzQ0ZjI4MGJiNzcyNGIyOTlkYjU0ZGMyYjllY2U1NjNlYjQzZWE5MzhkMmQ3NTFjMTVkMGY0NDNkYjdhNzdlMmQ4NzM1NTQ3NDI0ZDBjNzRmMTA0NzY4NmI2M2UwZWRiMDM0ZjNhODc1NGZkYjgxMDBlNDA0MmZlZDYzZmFlYmYyNTExMTI5NTIyOTg0ZDMzN2UxYTBhN2NiZWZlZGMxOTVjOWQ2MGVhOTMyY2E5M2VhYmZkODI1YjBiMzU0ZDViYzUzMmM5YzI5NjA2ZWU3MmFmNGYwNGRkNTlhNDEzYzJiZmYyODllZjBkNWJlNWU5ZjZkZWVlMjk4MDUyMTU2OTQwNzE3ZDQ5M2NlM2E4YmIwN2YyZjE4MzgzZmEwNjQxNGZlYmFlNzdmN2QwNTZlYTQ3NDEwMmNlZjU1YmZhNjNjMDM2MmI5OTU2NjBkZjg4YzFjYzA2MmY0NjU2OTE0ZGIwMWE3ODQxNjA2YjdlZWE3ZDJjZTM4NjE5YTcwYjg0MmVkZTBmM2Y1MzI3ZGI2YmU5M2ZjYTNiMzg4OTJkOGQ3NWI4Y2M4YjQ3NjBkNDExZmQ3ZmFlNGIxY2YwMGE5ZDk2MmM2ZDYzMWE1YmRjNmYzMmU0Y2U5MDYwOGNiMDMzMTlkZGE2ZDlkMGU4OGUwMzUwMDkwZTQ5MGRhMmY5ODU1MGU4ZmQ1ODc3NmQ0Yjg5MDM1Y2FiNTg3MjMyMGMwOTJmOTUyODkwYmQ3YjIwYTMzODI5Y2MwY2VlZTE0MWY5N2FiN2IzYmJjNDg3MWM0M2E3ZTViYWNjZWZiZjg4MjM1ZDRiNWMzMjBjM2IxNGM2ZWE2NWVkZjc0OWI0ZDNlNzZjOWYyMTkwZDM0ZTVkYTZkNjM1NjFmZWNmMWYyODIxMTMyNjIyOGFjMWU0MTA2NjY1OWQ4Y2JlZTRmMjIwYzI2NjNmNzYxYzBhZGEyY2VkZjkyNDkzZWExNzFhN2NhZThiNTMxNDNmNzEzM2RhY2UyOWNmYjQ4ZTk5YzE2YjcyM2ZmZTJjZDk5MjU0NGM5OWNhOTFlMDRlMWNiNTQ5ZjU4MGQxY2I4YWU5MWU0MDlmZDZmYjhjNGYzYTRmODA2ZWFiZjRlMDI3OWJmOTM4NmQwN2I5MTBmYzlkYzNjMGM2ODIzYjg4OWFjNWZkZjBhYWNjYzNhYmU0MDRmMTg3Y2Q0MGNmMjcyNWFmY2VkYzAzYmVjZGY2MmMzNWRkNzQ5MGExYjQ1MDdlNTczNDI1OTliYTJhMjNmM2FmNDg1NGM3ODZkYzBiZWIzYTllMGEwYWUyMTllNmZhNzYyN2YyNTI5ZDc3YzQ3MGY1YzIxNzI1NzhhM2EwYzM3NzM0NTM4MTlhYjE3ODJiNmRmOGM1NTI2YjQzZjUzNTZlNDVhM2Q5MDc4N2IwZGNkZTdmYmYzM2ZkMWQ2NGY2NjdmOWYzNDIzZjJkMmU2NzgyMTY5ZWM3MTE1Y2E3MDdlYWRhOGJmNzI0OTJmMGM3Y2QxNjJjMDI4NmFjOThmNDhmOWEyYWQzZDAwYzg5YmViYzA3NTA4ZjYwYzE1OGVmYjk5ZjBkOGY4MzQ1ODI5Yzg4Yzc0YTA3OGQyZjU5NTFjNmQzNTc1N2QyNjI0NWVjNTk0Y2JkMzc2YmVhMGNiZmEzMWYwZTA5MGRhYzhlYzNlYjQ0ZGIxN2M4MWE5NWY4MTE4MDAwNDJkMjQ2MmMzMjk2ODU5Yjg3ZjRhZmI1MDYxM2MxY2FiYTZkZDI0ODdiZDQ3MmVmNzBjMzFkN2YwNjZmZTMxOThiYzFhOWFlZjIwZTQzY2FlNDBkMDkxZWEzMmNiYTBhNDM0YmQ2ZDU2NDQ3YTU4YTNjODZjYTk0NjQ3MGNiZjM4ZjM3ZjU2YTZkZmQ4MDY0OWEyZGU3MzllN2EyZWE3M2RlNDE5NDljNmI4ODU2YmE5ZTM4Njc2YmRhNzA1MWE5MjlmMWU1YTczZjEwYTg2ZjgwNDJjZDQxZTMwYjVjMTA1ODYzNzlhMGY3NmRlOWExODZiZmU2N2Y5NzZhOTY3MTg0ZjNkYmFhYWU0YjdmNmFlMjM5MTlkNDljNDNiODc4MzRjMjA0MzY4YThkOGEyYzRkNjc3MzhkMTU0NmFiNTVjMWE0YTQ0Y2M3MzE5OGM4Y2YzOTAxZGI0ZGY1MzFmNGY5NTI4MDE5MjZjN2I2MDg1YjQzODI0YmFiMTQ3NTIxZTYwNWQzYzhmZjljYjNmOTRlNzg3MDJiYzc1MzE4NTRhN2M3ZDE2OWQyMzcyYjUzMDBhNGQzNzhhYWNjOTk3ZDM1ZTZjODYwZGQwMWNlYTMwZjU1YTFlMjQxMTMxMTQwZjQwMWJmZGJkNWU3NzA4OWE5YzljNDIzY2E2ODk3OGE2ODMwYWEzYTlkZGJiZmMyYTE3NGZhOTc4NmI3ZTYyYmIzNTZlNjRiMzBiYzI4ZDMyYTVjMDMxYzgxZjZlOGEyMGMwNWFlNjJlYWM2ZWExNDY5OTFiZjk1Yzc4NzQzMjMwYTIyNzk1MWRlMzI4NjFjYjU5ZGQ3N2QxOWQ5MTMxNDgwYmY2ZTgyYTkwNzgwMTBlYjAzMzIzYjcxNGY0NzM5NDNmY2MwNTM3ODJmOTIwMGFkNzlmNzZiNjkxNDdmZGQwOTdhZTUwMTk1YjE4M2Q2YWM5NjVmN2NkNDNhMGI3MTEwOTNkZTM5NGM3OTYwNjNlNTBhMDAyNzNkOTE2MzQzODY2MzFkZThkMzViYTUxNmI4MTIyZWZjNzE5MTU0OTQ2NTIyYzc0YjhmNTY2OTMwZDM3YmIwZjJkM2Q4ODgyZGQwZTU0YTcyODM1NmYyZDk2ZWVlNzZiYmZlYjI1YTFjM2ZhNTg5OGY5OTM0YTc4NTBjYzRlNjY4NjE5YWMzOTg2MmE5NDhjMDVhMTc0MzE0MjIwOGFhMjk5OGY2ZmIwMmZlZWI2YTk0M2Q1NzcyN2JhZWU4ZmY5NGFmZjgzZGVjMTUyZmYxOWVkYmM1Y2RiZDkzYzBiNDc1OTEzMjFhYTY4MjI1MDA4ODhmYWJhMzAzNjdlZmRjYmJjNzhjYzE5MWI1MDViNTlmMjBhY2RiYTYzMzQyYzE1YTI2M2NiOGE1NDQ3NzQ4ODU3YWYxMzllMDJlMzY0ODlkNjRlNTRiMTc5YTgwOGRmMWU5YTk1ODY2YzE2YTYzM2EyZmUyYjA2MzM4OTI5YTc4MmRlMGFkZDgwZDZiYWU3Y2M1ZjljMWEzYzA5MGU4MTVlNjc2MGJjMzA0ZWU3ZmY1MDM5OGRiNDc0YTJkNWMzYWVhNTMxZjc0ZDU3NGNhZGNhZTIzZmZiZjcyY2FhNmU5YTNjNjFhYzNiMDJjNDdjYzQzZGJhYjA2NTgwNTkyZmE5YjMyNGMxMGJhMGRjNjgzZWIyYzRiNDg4NzFiMjk2YmIxNDBhMWUyZWRlOTE0NmY3MThkZTE4ZWU0M2QwZTk4NWY3NWQ1YWYyYjlkNjU5ODM5YzQwZWFiMzg2" """星火请求中的 GtToken 字段""" + sid: Optional[str] = "" + """星火请求中的 sid 字段""" proxy: Optional[str] = None """可选的代理地址,留空则检测系统代理""" From 342e3033b58c458e89a3f95a2dc1041563740e99 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Tue, 13 Jun 2023 20:24:07 -0700 Subject: [PATCH 33/68] Add new 3.5 models released by OpenAI today [3.5-turbo-061, 3.5-turbo-16k, 3.5-turbo-16k-0613] Add new models released by OpenAI today: - "gpt-3.5-turbo-0613" -"gpt-3.5-turbo-16k" - "gpt-3.5-turbo-16k-0613" --- adapter/chatgpt/api.py | 3 +++ config.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index da18931d..657c835e 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -43,6 +43,9 @@ def __init__(self, session_id: str = "unknown"): self.supported_models = [ "gpt-3.5-turbo", "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-16k-0613", "gpt-4", "gpt-4-0314", "gpt-4-32k", diff --git a/config.py b/config.py index ad8743e9..674175d9 100644 --- a/config.py +++ b/config.py @@ -357,6 +357,9 @@ class Trigger(BaseModel): allowed_models: List[str] = [ "gpt-3.5-turbo", "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-16k-0613", "text-davinci-002-render-sha", "text-davinci-002-render-paid" ] From e220a1419f27fd9ebec17cc01623b598d6ee9af5 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Tue, 13 Jun 2023 20:31:28 -0700 Subject: [PATCH 34/68] Added new GPT4 models as well --- adapter/chatgpt/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 657c835e..6c10b3a4 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -50,6 +50,8 @@ def __init__(self, session_id: str = "unknown"): "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613", ] async def switch_model(self, model_name): From 907270bdc2d1afc02bbf6321dd328276491d00d9 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Tue, 13 Jun 2023 20:34:41 -0700 Subject: [PATCH 35/68] Remove unwanted changes --- config.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/config.py b/config.py index 674175d9..ad8743e9 100644 --- a/config.py +++ b/config.py @@ -357,9 +357,6 @@ class Trigger(BaseModel): allowed_models: List[str] = [ "gpt-3.5-turbo", "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-16k-0613", "text-davinci-002-render-sha", "text-davinci-002-render-paid" ] From 28e42fcad267723d705cf6d038317a54a8425fb7 Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Fri, 16 Jun 2023 02:22:20 -0700 Subject: [PATCH 36/68] Upgrade revChatGPT version to support new OpenAI models https://github.com/acheong08/ChatGPT/pull/1425 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8312c0be..eff141a7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT==6.1.4 +revChatGPT==6.3.2 toml~=0.10.2 Pillow>=9.3.0 tinydb~=4.7.1 From 4c7c1030c5477736bf590c79985769bf6e2904f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 02:02:55 +0000 Subject: [PATCH 37/68] chore(deps): bump revchatgpt from 6.3.2 to 6.3.3 Bumps [revchatgpt](https://github.com/acheong08/ChatGPT) from 6.3.2 to 6.3.3. - [Release notes](https://github.com/acheong08/ChatGPT/releases) - [Commits](https://github.com/acheong08/ChatGPT/compare/6.3.2...6.3.3) --- updated-dependencies: - dependency-name: revchatgpt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eff141a7..b7b4fb6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT==6.3.2 +revChatGPT==6.3.3 toml~=0.10.2 Pillow>=9.3.0 tinydb~=4.7.1 From 4c05f7cb918a181d1b8d668d357c989e19b46478 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 02:02:44 +0000 Subject: [PATCH 38/68] chore(deps): update tinydb requirement from ~=4.7.1 to ~=4.8.0 Updates the requirements on [tinydb](https://github.com/msiemens/tinydb) to permit the latest version. - [Release notes](https://github.com/msiemens/tinydb/releases) - [Changelog](https://github.com/msiemens/tinydb/blob/master/docs/changelog.rst) - [Commits](https://github.com/msiemens/tinydb/compare/v4.7.1...v4.8.0) --- updated-dependencies: - dependency-name: tinydb dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b7b4fb6a..55c763dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ graiax-silkcoder revChatGPT==6.3.3 toml~=0.10.2 Pillow>=9.3.0 -tinydb~=4.7.1 +tinydb~=4.8.0 loguru~=0.7.0 asyncio~=3.4.3 From e6e9f59d284bf85cd368f6c706abd10b521960c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 02:02:37 +0000 Subject: [PATCH 39/68] chore(deps): update openai requirement from ~=0.27.7 to ~=0.27.8 Updates the requirements on [openai](https://github.com/openai/openai-python) to permit the latest version. - [Release notes](https://github.com/openai/openai-python/releases) - [Commits](https://github.com/openai/openai-python/compare/v0.27.7...v0.27.8) --- updated-dependencies: - dependency-name: openai dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 55c763dd..ab4a0f6f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ python-markdown-math~=0.8 pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 -openai~=0.27.7 +openai~=0.27.8 EdgeGPT==0.8.2 aiohttp~=3.8.4 OpenAIAuth~=1.0.2 From 71e96472cd96c6aab47d0b4e8eb9ed082f77ad5e Mon Sep 17 00:00:00 2001 From: "sourcery-ai[bot]" <58596630+sourcery-ai[bot]@users.noreply.github.com> Date: Sun, 25 Jun 2023 11:08:35 +0800 Subject: [PATCH 40/68] Fixes baiducloud bug and partial code formatting (Sourcery refactored) (#984) * Fixes baiducloud bug and partial code formatting * 'Refactored by Sourcery' --------- Co-authored-by: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Co-authored-by: Sourcery AI <> --- adapter/google/bard.py | 9 ++- adapter/ms/bing.py | 2 +- bot.py | 11 ++- manager/bot.py | 149 ++++++++++++++++++++------------------ middlewares/baiducloud.py | 18 +++-- utils/network.py | 16 ++-- utils/text_to_img.py | 2 +- 7 files changed, 110 insertions(+), 97 deletions(-) diff --git a/adapter/google/bard.py b/adapter/google/bard.py index bef8f95f..4d5d4355 100644 --- a/adapter/google/bard.py +++ b/adapter/google/bard.py @@ -2,13 +2,14 @@ from typing import Generator from adapter.botservice import BotAdapter +import json +from urllib.parse import quote +from exceptions import BotOperationNotSupportedException from config import BardCookiePath from constants import botManager -from exceptions import BotOperationNotSupportedException from loguru import logger -import json import httpx -from urllib.parse import quote + hashu = lambda word: ctypes.c_uint64(hash(word)).value @@ -77,7 +78,7 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: data = json.loads(json.loads(lines)[0][2]) result = data[0][0] self.bard_session_id = data[1][0] - self.r = data[1][1] # 用于下一次请求, 这个位置是固定的 + self.r = data[1][1] # 用于下一次请求, 这个位置是固定的 # self.rc = data[4][1][0] for check in data: if not check: diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index 073ac958..cafa5b53 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -3,6 +3,7 @@ from typing import Generator, Union, List import aiohttp +import re import asyncio from PIL import Image @@ -15,7 +16,6 @@ from drawing import DrawingAPI from exceptions import BotOperationNotSupportedException from loguru import logger -import re from ImageGen import ImageGenAsync from graia.ariadne.message.element import Image as GraiaImage diff --git a/bot.py b/bot.py index 56234574..d2ff3f0e 100644 --- a/bot.py +++ b/bot.py @@ -1,7 +1,5 @@ import os import sys - -sys.path.append(os.getcwd()) import creart from asyncio import AbstractEventLoop import asyncio @@ -10,6 +8,8 @@ from constants import config, botManager from utils.edge_tts import load_edge_tts_voices +sys.path.append(os.getcwd()) + hook() loop = creart.create(AbstractEventLoop) @@ -21,27 +21,33 @@ if config.mirai: logger.info("检测到 mirai 配置,将启动 mirai 模式……") from platforms.ariadne_bot import start_task + bots.append(loop.create_task(start_task())) if config.onebot: logger.info("检测到 Onebot 配置,将启动 Onebot 模式……") from platforms.onebot_bot import start_task + bots.append(loop.create_task(start_task())) if config.telegram: logger.info("检测到 telegram 配置,将启动 telegram bot 模式……") from platforms.telegram_bot import start_task + bots.append(loop.create_task(start_task())) if config.discord: logger.info("检测到 discord 配置,将启动 discord bot 模式……") from platforms.discord_bot import start_task + bots.append(loop.create_task(start_task())) if config.http: logger.info("检测到 http 配置,将启动 http service 模式……") from platforms.http_service import start_task + bots.append(loop.create_task(start_task())) if config.wecom: logger.info("检测到 Wecom 配置,将启动 Wecom Bot 模式……") from platforms.wecom_bot import start_task + bots.append(loop.create_task(start_task())) try: logger.info("[Edge TTS] 读取 Edge TTS 可用音色列表……") @@ -53,4 +59,3 @@ loop.run_until_complete(asyncio.gather(*bots)) loop.run_forever() - diff --git a/manager/bot.py b/manager/bot.py index 92322a76..578ef844 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -1,4 +1,4 @@ -import datetime +import asyncio import hashlib import itertools import os @@ -90,6 +90,39 @@ def __init__(self, config: Config) -> None: pass self.cache_db = TinyDB('data/login_caches.json') + async def handle_openai(self): + # 考虑到有人会写错全局配置 + for account in self.config.openai.accounts: + account = account.dict() + if 'browserless_endpoint' in account: + logger.warning("警告: browserless_endpoint 配置位置有误,正在将其调整为全局配置") + self.config.openai.browserless_endpoint = account['browserless_endpoint'] + if 'api_endpoint' in account: + logger.warning("警告: api_endpoint 配置位置有误,正在将其调整为全局配置") + self.config.openai.api_endpoint = account['api_endpoint'] + + # 应用 browserless_endpoint 配置 + if self.config.openai.browserless_endpoint: + V1.BASE_URL = self.config.openai.browserless_endpoint or V1.BASE_URL + logger.info(f"当前的 browserless_endpoint 为:{V1.BASE_URL}") + + # 历史遗留问题 1 + if V1.BASE_URL == 'https://bypass.duti.tech/api/': + logger.error("检测到你还在使用旧的 browserless_endpoint,已为您切换。") + V1.BASE_URL = "https://bypass.churchless.tech/api/" + # 历史遗留问题 2 + if not V1.BASE_URL.endswith("api/"): + logger.warning( + f"提示:你可能要将 browserless_endpoint 修改为 \"{self.config.openai.browserless_endpoint}api/\"") + + # 应用 api_endpoint 配置 + if self.config.openai.api_endpoint: + openai.api_base = self.config.openai.api_endpoint or openai.api_base + if openai.api_base.endswith("/"): + openai.api_base.removesuffix("/") + logger.info(f"当前的 api_endpoint 为:{openai.api_base}") + await self.login_openai() + async def login(self): self.bots = { "chatgpt-web": [], @@ -102,86 +135,58 @@ async def login(self): "chatglm-api": [], "slack-accesstoken": [], } + self.__setup_system_proxy() - if len(self.bing) > 0: - self.login_bing() - if len(self.poe) > 0: - self.login_poe() - if len(self.bard) > 0: - self.login_bard() - if len(self.slack) > 0: - self.login_slack() - if len(self.xinghuo) > 0: - self.login_xinghuo() - if len(self.openai) > 0: - # 考虑到有人会写错全局配置 - for account in self.config.openai.accounts: - account = account.dict() - if 'browserless_endpoint' in account: - logger.warning("警告: browserless_endpoint 配置位置有误,正在将其调整为全局配置") - self.config.openai.browserless_endpoint = account['browserless_endpoint'] - if 'api_endpoint' in account: - logger.warning("警告: api_endpoint 配置位置有误,正在将其调整为全局配置") - self.config.openai.api_endpoint = account['api_endpoint'] - - # 应用 browserless_endpoint 配置 - if self.config.openai.browserless_endpoint: - V1.BASE_URL = self.config.openai.browserless_endpoint or V1.BASE_URL - logger.info(f"当前的 browserless_endpoint 为:{V1.BASE_URL}") - - # 历史遗留问题 1 - if V1.BASE_URL == 'https://bypass.duti.tech/api/': - logger.error("检测到你还在使用旧的 browserless_endpoint,已为您切换。") - V1.BASE_URL = "https://bypass.churchless.tech/api/" - # 历史遗留问题 2 - if not V1.BASE_URL.endswith("api/"): - logger.warning( - f"提示:你可能要将 browserless_endpoint 修改为 \"{self.config.openai.browserless_endpoint}api/\"") - - # 应用 api_endpoint 配置 - if self.config.openai.api_endpoint: - openai.api_base = self.config.openai.api_endpoint or openai.api_base - if openai.api_base.endswith("/"): - openai.api_base.removesuffix("/") - logger.info(f"当前的 api_endpoint 为:{openai.api_base}") - - await self.login_openai() - if len(self.yiyan) > 0: - self.login_yiyan() - if len(self.chatglm) > 0: - self.login_chatglm() + + login_funcs = { + 'bing': self.login_bing, + 'poe': self.login_poe, + 'bard': self.login_bard, + 'slack': self.login_slack, + 'xinghuo': self.login_xinghuo, + 'openai': self.handle_openai, + 'yiyan': self.login_yiyan, + 'chatglm': self.login_chatglm + } + + for key, login_func in login_funcs.items(): + if hasattr(self, key) and len(getattr(self, key)) > 0: + if asyncio.iscoroutinefunction(login_func): + await login_func() + else: + login_func() + count = sum(len(v) for v in self.bots.values()) + if count < 1: logger.error("没有登录成功的账号,程序无法启动!") exit(-2) else: - # 输出登录状况 for k, v in self.bots.items(): logger.info(f"AI 类型:{k} - 可用账号: {len(v)} 个") - # 自动推测默认 AI + if not self.config.response.default_ai: - if len(self.bots['poe-web']) > 0: - self.config.response.default_ai = 'poe-chatgpt' - elif len(self.bots['slack-accesstoken']) > 0: - self.config.response.default_ai = 'slack-claude' - elif len(self.bots['chatgpt-web']) > 0: - self.config.response.default_ai = 'chatgpt-web' - elif len(self.bots['openai-api']) > 0: - self.config.response.default_ai = 'chatgpt-api' - elif len(self.bots['bing-cookie']) > 0: - self.config.response.default_ai = 'bing' - elif len(self.bots['bard-cookie']) > 0: - self.config.response.default_ai = 'bard' - elif len(self.bots['yiyan-cookie']) > 0: - self.config.response.default_ai = 'yiyan' - elif len(self.bots['chatglm-api']) > 0: - self.config.response.default_ai = 'chatglm-api' - elif len(self.bots['xinghuo-cookie']) > 0: - self.config.response.default_ai = 'xinghuo' - elif len(self.bots['slack-accesstoken']) > 0: - self.config.response.default_ai = 'slack-claude' - else: - self.config.response.default_ai = 'chatgpt-web' + # 自动推测默认 AI + default_ai_mappings = { + "poe-web": "poe-chatgpt", + "slack-accesstoken": "slack-claude", + "chatgpt-web": "chatgpt-web", + "openai-api": "chatgpt-api", + "bing-cookie": "bing", + "bard-cookie": "bard", + "yiyan-cookie": "yiyan", + "chatglm-api": "chatglm-api", + "xinghuo-cookie": "xinghuo", + } + + self.config.response.default_ai = next( + ( + default_ai + for key, default_ai in default_ai_mappings.items() + if len(self.bots[key]) > 0 + ), + 'chatgpt-web', + ) def reset_bot(self, bot): from adapter.quora.poe import PoeClientWrapper diff --git a/middlewares/baiducloud.py b/middlewares/baiducloud.py index e87f235c..3e404450 100644 --- a/middlewares/baiducloud.py +++ b/middlewares/baiducloud.py @@ -110,15 +110,17 @@ async def handle_respond(self, session_id: str, prompt: str, rendered: str, resp conclusion = f"{config.baiducloud.prompt_message}\n原因:{msg}" return await action(session_id, prompt, conclusion, respond) - except aiohttp.ClientError as e: - logger.error(f"HTTP error occurred: {e}") - - await respond("[百度云文本审核] 判定出错\n以下是原消息:") + except Exception as e: + respond_message = "[百度云文本审核] 判定出错\n以下是原消息:" + if isinstance(e, aiohttp.ClientError): + error_message = f"[百度云文本审核] HTTP错误: {e}" + elif isinstance(e, json.JSONDecodeError): + error_message = f"[百度云文本审核] JSON解码错误: {e}" + else: + error_message = f"[百度云文本审核] 其他错误:{e}" + logger.error(error_message) + await respond(respond_message) should_pass = True - except json.JSONDecodeError as e: - logger.error(f"[百度云文本审核] JSON decode error occurred: {e}") - except StopIteration as e: - logger.error(f"[百度云文本审核] StopIteration exception occurred: {e}") if should_pass: return await action(session_id, prompt, rendered, respond) diff --git a/utils/network.py b/utils/network.py index 99735b59..45133a5d 100644 --- a/utils/network.py +++ b/utils/network.py @@ -3,14 +3,14 @@ def is_open(ip, port): """Check if a host and port is open""" - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(5) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(5) try: # True if open, False if not - is_open = s.connect_ex((ip, int(port))) == 0 - if is_open: - s.shutdown(socket.SHUT_RDWR) + is_port_open = sock.connect_ex((ip, int(port))) == 0 + if is_port_open: + sock.shutdown(socket.SHUT_RDWR) except Exception: - is_open = False - s.close() - return is_open + is_port_open = False + sock.close() + return is_port_open diff --git a/utils/text_to_img.py b/utils/text_to_img.py index 2f061e1f..fa2adc8c 100644 --- a/utils/text_to_img.py +++ b/utils/text_to_img.py @@ -10,6 +10,7 @@ from typing import Optional import aiohttp +import unicodedata import asyncio import imgkit from pydantic import BaseModel @@ -20,7 +21,6 @@ import markdown import qrcode -import unicodedata from PIL import Image from PIL import ImageDraw, ImageFont from charset_normalizer import from_bytes From 51abcb86cb21b193cfbd619c92d3dafb81f38e16 Mon Sep 17 00:00:00 2001 From: Huoyuuu <86390123+Huoyuuu@users.noreply.github.com> Date: Wed, 5 Jul 2023 16:27:46 +0800 Subject: [PATCH 41/68] =?UTF-8?q?fix:=20=E6=9B=B4=E6=96=B0=20Bard=20?= =?UTF-8?q?=E8=84=9A=E6=9C=AC=E4=BB=A5=E6=AD=A3=E7=A1=AE=E8=8E=B7=E5=8F=96?= =?UTF-8?q?=E6=95=B0=E6=8D=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bard 返回数据的组织方式发生了变化,脚本在读取数据时出现问题。原先代码 `result = data[0][0]` 引发了 `TypeError` 错误。经过实际分析,已将代码修改为 `result = data[4][0][1][0]`。 修改后机器人能够正常运行。 --- adapter/google/bard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adapter/google/bard.py b/adapter/google/bard.py index 4d5d4355..841f32be 100644 --- a/adapter/google/bard.py +++ b/adapter/google/bard.py @@ -76,7 +76,7 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: for lines in res: if "wrb.fr" in lines: data = json.loads(json.loads(lines)[0][2]) - result = data[0][0] + result = data[4][0][1][0] self.bard_session_id = data[1][0] self.r = data[1][1] # 用于下一次请求, 这个位置是固定的 # self.rc = data[4][1][0] From fb325825cb4189ad7cc1a3a193fea6ee809c32a4 Mon Sep 17 00:00:00 2001 From: Elijah Tan Date: Mon, 3 Jul 2023 20:55:48 +0800 Subject: [PATCH 42/68] =?UTF-8?q?FIX=EF=BC=9Apoe=20payload=20error?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ab4a0f6f..865a2c73 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.6 +poe-api~=0.4.7 regex~=2023.6.3 httpx From 11081c4d6089b4b3ead624a3fc74c8c4c24daa89 Mon Sep 17 00:00:00 2001 From: Elijah Tan Date: Tue, 4 Jul 2023 14:11:33 +0800 Subject: [PATCH 43/68] fix: Error Download too many times --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 865a2c73..2fb0fa8d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.7 +poe-api~=0.4.8 regex~=2023.6.3 httpx From ec4dbeea4ec97c1747efc3f8206b2aacf62df896 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Tue, 11 Jul 2023 10:09:02 +0800 Subject: [PATCH 44/68] =?UTF-8?q?=E9=87=8D=E6=9E=84=E6=B5=81=E5=BC=8FAPI?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=EF=BC=8C=E5=8A=A0=E5=85=A5=E4=BC=9A=E8=AF=9D?= =?UTF-8?q?=E5=8E=8B=E7=BC=A9=E5=8A=9F=E8=83=BD=20(#1033)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Refactoring api (#1001) * Use asynchrony to implement openai api access itself * Update api.py * Update api.py * Update api.py * Update api.py * Update api.py * Added api address verification * Update requirements.txt * fix api bug (Sourcery refactored) (#1007) * Use asynchrony to implement openai api access itself * Update api.py * Update api.py * Update api.py * Update api.py * Update api.py * Added api address verification * fix api bug * Update requirements.txt * 'Refactored by Sourcery' --------- Co-authored-by: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Co-authored-by: Sourcery AI <> * 重构流式API请求,加入会话压缩功能 (Sourcery refactored) (#1032) * Use asynchrony to implement openai api access itself * Update api.py * Update api.py * Update api.py * Update api.py * Update api.py * Added api address verification * fix api bug * Update requirements.txt * 新增压缩会话功能 * 'Refactored by Sourcery' --------- Co-authored-by: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Co-authored-by: Sourcery AI <> --------- Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com> --- adapter/chatgpt/api.py | 279 +++++++++++++++++++++++++++++++++-------- config.py | 11 +- manager/bot.py | 17 ++- requirements.txt | 12 +- universal.py | 6 +- utils/text_to_img.py | 24 ++-- 6 files changed, 275 insertions(+), 74 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 6c10b3a4..254233bc 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -1,45 +1,128 @@ -import ctypes -import os -from typing import Generator -import openai + +import json +import time +from typing import AsyncGenerator + +import aiohttp +import async_timeout +import tiktoken + from loguru import logger -from revChatGPT.V3 import Chatbot as OpenAIChatbot from adapter.botservice import BotAdapter from config import OpenAIAPIKey from constants import botManager, config +import tiktoken + + +DEFAULT_ENGINE: str = "gpt-3.5-turbo" + + +class OpenAIChatbot: + def __init__(self, api_info: OpenAIAPIKey): + self.api_key = api_info.api_key + self.proxy = api_info.proxy + self.presence_penalty = config.openai.gpt_params.presence_penalty + self.frequency_penalty = config.openai.gpt_params.frequency_penalty + self.top_p = config.openai.gpt_params.top_p + self.temperature = config.openai.gpt_params.temperature + self.max_tokens = config.openai.gpt_params.max_tokens + self.engine = api_info.model or DEFAULT_ENGINE + self.timeout = config.response.max_timeout + self.conversation: dict[str, list[dict]] = { + "default": [ + { + "role": "system", + "content": "You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent date:[current date]", + }, + ], + } -hashu = lambda word: ctypes.c_uint64(hash(word)).value + async def rollback(self, session_id: str = "default", n: int = 1) -> None: + try: + if session_id not in self.conversation: + raise ValueError(f"会话 ID {session_id} 不存在。") + + if n > len(self.conversation[session_id]): + raise ValueError(f"回滚次数 {n} 超过了会话 {session_id} 的消息数量。") + + for _ in range(n): + self.conversation[session_id].pop() + + except ValueError as ve: + logger.error(ve) + raise + except Exception as e: + logger.error(f"未知错误: {e}") + raise + + def add_to_conversation( + self, + message: str, + role: str, + session_id: str = "default", + ) -> None: + self.conversation[session_id].append({"role": role, "content": message}) + + # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb + def count_tokens(self, session_id: str = "default", model: str = DEFAULT_ENGINE): + """Return the number of tokens used by a list of messages.""" + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + + if model in { + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-4", + "gpt-4-32k" + }: + tokens_per_message = 3 + tokens_per_name = 1 + elif model == "gpt-3.5-turbo-0301": + tokens_per_message = 4 # every message follows {role/name}\n{content}\n + tokens_per_name = -1 # if there's a name, the role is omitted + else: + logger.warning("未找到相应模型计算方法,使用默认方法进行计算") + tokens_per_message = 3 + tokens_per_name = 1 + + num_tokens = 0 + for message in self.conversation[session_id]: + num_tokens += tokens_per_message + for key, value in message.items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with assistant + return num_tokens + + def get_max_tokens(self, session_id: str, model: str) -> int: + """Get max tokens""" + return self.max_tokens - self.count_tokens(session_id, model) class ChatGPTAPIAdapter(BotAdapter): api_info: OpenAIAPIKey = None """API Key""" - bot: OpenAIChatbot = None - """实例""" - - hashed_user_id: str - def __init__(self, session_id: str = "unknown"): + self.latest_role = None self.__conversation_keep_from = 0 self.session_id = session_id - self.hashed_user_id = "user-" + hashu("session_id").to_bytes(8, "big").hex() self.api_info = botManager.pick('openai-api') - self.bot = OpenAIChatbot( - api_key=self.api_info.api_key, - proxy=self.api_info.proxy, - presence_penalty=config.openai.gpt3_params.presence_penalty, - frequency_penalty=config.openai.gpt3_params.frequency_penalty, - top_p=config.openai.gpt3_params.top_p, - temperature=config.openai.gpt3_params.temperature, - max_tokens=config.openai.gpt3_params.max_tokens, - ) + self.bot = OpenAIChatbot(self.api_info) self.conversation_id = None self.parent_id = None super().__init__() self.bot.conversation[self.session_id] = [] - self.current_model = self.api_info.model or "gpt-3.5-turbo" + self.current_model = self.bot.engine self.supported_models = [ "gpt-3.5-turbo", "gpt-3.5-turbo-0301", @@ -54,6 +137,20 @@ def __init__(self, session_id: str = "unknown"): "gpt-4-32k-0613", ] + def manage_conversation(self, session_id: str, prompt: str): + if session_id not in self.bot.conversation: + self.bot.conversation[session_id] = [ + {"role": "system", "content": prompt} + ] + self.__conversation_keep_from = 1 + + while self.bot.max_tokens - self.bot.count_tokens(session_id) < config.openai.gpt_params.min_tokens and \ + len(self.bot.conversation[session_id]) > self.__conversation_keep_from: + self.bot.conversation[session_id].pop(self.__conversation_keep_from) + logger.debug( + f"清理 token,历史记录遗忘后使用 token 数:{str(self.bot.count_tokens(session_id))}" + ) + async def switch_model(self, model_name): self.current_model = model_name self.bot.engine = self.current_model @@ -61,7 +158,7 @@ async def switch_model(self, model_name): async def rollback(self): if len(self.bot.conversation[self.session_id]) <= 0: return False - self.bot.rollback(convo_id=self.session_id, n=2) + await self.bot.rollback(self.session_id, n=2) return True async def on_reset(self): @@ -69,39 +166,121 @@ async def on_reset(self): self.bot.api_key = self.api_info.api_key self.bot.proxy = self.api_info.proxy self.bot.conversation[self.session_id] = [] + self.bot.engine = self.api_info.model self.__conversation_keep_from = 0 - async def ask(self, prompt: str) -> Generator[str, None, None]: + async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]: self.api_info = botManager.pick('openai-api') - self.bot.api_key = self.api_info.api_key - self.bot.proxy = self.api_info.proxy - self.bot.session.proxies.update( - { - "http": self.bot.proxy, - "https": self.bot.proxy, - }, - ) + api_key = self.api_info.api_key + proxy = self.api_info.proxy + api_endpoint = config.openai.api_endpoint or "https://api.openai.com/v1" - if self.session_id not in self.bot.conversation: - self.bot.conversation[self.session_id] = [ - {"role": "system", "content": self.bot.system_prompt} - ] - self.__conversation_keep_from = 1 + if not messages: + messages = self.bot.conversation[session_id] - while self.bot.max_tokens - self.bot.get_token_count(self.session_id) < config.openai.gpt3_params.min_tokens and \ - len(self.bot.conversation[self.session_id]) > self.__conversation_keep_from: - self.bot.conversation[self.session_id].pop(self.__conversation_keep_from) - logger.debug( - f"清理 token,历史记录遗忘后使用 token 数:{str(self.bot.get_token_count(self.session_id))}" - ) + headers = { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {api_key}' + } + data = { + 'model': self.bot.engine, + 'messages': messages, + 'stream': True, + 'temperature': self.bot.temperature, + 'top_p': self.bot.top_p, + 'presence_penalty': self.bot.presence_penalty, + 'frequency_penalty': self.bot.frequency_penalty, + "user": 'user', + 'max_tokens': self.bot.get_max_tokens(self.session_id, self.bot.engine), + } + async with aiohttp.ClientSession() as session: + with async_timeout.timeout(self.bot.timeout): + async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), proxy=proxy) as resp: + if resp.status != 200: + response_text = await resp.text() + raise Exception( + f"{resp.status} {resp.reason} {response_text}", + ) + + response_role: str = '' + completion_text: str = '' + + async for line in resp.content: + line = line.decode('utf-8').strip() + if not line.startswith("data: "): + continue + line = line[len("data: "):] + if line == "[DONE]": + break + if not line: + continue + try: + event = json.loads(line) + except json.JSONDecodeError: + raise Exception(f"JSON解码错误: {line}") from None + if 'error' in event: + raise Exception(f"响应错误: {event['error']}") + if 'choices' in event and len(event['choices']) > 0 and 'delta' in event['choices'][0]: + delta = event['choices'][0]['delta'] + if 'role' in delta: + response_role = delta['role'] + if 'content' in delta: + event_text = delta['content'] + completion_text += event_text + self.latest_role = response_role + yield completion_text + self.bot.add_to_conversation(completion_text, response_role, session_id) + + async def compressed_session(self, session_id: str): + if session_id not in self.bot.conversation or not self.bot.conversation[session_id]: + logger.debug(f"不存在该会话,不进行压缩: {session_id}") + return + + if config.openai.gpt_params.compressed_session and self.bot.count_tokens( + session_id) > config.openai.gpt_params.compressed_tokens: + logger.debug('开始进行会话压缩') + + filtered_data = [entry for entry in self.bot.conversation[session_id] if entry['role'] != 'system'] + self.bot.conversation[session_id] = [entry for entry in self.bot.conversation[session_id] if + entry['role'] not in ['assistant', 'user']] + + filtered_data.append(({"role": "system", + "content": "Summarize the discussion briefly in 200 words or less to use as a prompt for future context."})) + + async for text in self.request_with_stream(session_id=session_id, messages=filtered_data): + pass + + token_count = self.bot.count_tokens(self.session_id, self.bot.engine) + logger.debug(f"压缩会话后使用 token 数:{token_count}") + + async def ask(self, prompt: str) -> AsyncGenerator[str, None]: + """Send a message to api and return the response with stream.""" + + self.manage_conversation(self.session_id, prompt) + + await self.compressed_session(self.session_id) + + event_time = None + + try: + + logger.debug(f"[尝试使用ChatGPT-API:{self.bot.engine}] 请求:{prompt}") + self.bot.add_to_conversation(prompt, "user", session_id=self.session_id) + start_time = time.time() + + async for completion_text in self.request_with_stream(session_id=self.session_id): + yield completion_text + + token_count = self.bot.count_tokens(self.session_id, self.bot.engine) + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{completion_text}") + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}") + event_time = time.time() - start_time + if event_time is not None: + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 接收到全部消息花费了{event_time:.2f}秒") - os.environ['API_URL'] = f'{openai.api_base}/chat/completions' - full_response = '' - async for resp in self.bot.ask_stream_async(prompt=prompt, role=self.hashed_user_id, convo_id=self.session_id): - full_response += resp - yield full_response - logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{full_response}") - logger.debug(f"使用 token 数:{str(self.bot.get_token_count(self.session_id))}") + except Exception as e: + logger.error(f"[ChatGPT-API:{self.bot.engine}] 请求失败:\n{e}") + yield f"发生错误: \n{e}" async def preset_ask(self, role: str, text: str): if role.endswith('bot') or role in {'assistant', 'chatgpt'}: diff --git a/config.py b/config.py index ad8743e9..c428707c 100644 --- a/config.py +++ b/config.py @@ -76,13 +76,15 @@ class WecomBot(BaseModel): """企业微信应用 API 令牌 的 EncodingAESKey""" -class OpenAIGPT3Params(BaseModel): +class OpenAIParams(BaseModel): temperature: float = 0.5 max_tokens: int = 4000 top_p: float = 1.0 presence_penalty: float = 0.0 frequency_penalty: float = 0.0 min_tokens: int = 1000 + compressed_session: bool = False + compressed_tokens: int = 1000 class OpenAIAuths(BaseModel): @@ -91,7 +93,7 @@ class OpenAIAuths(BaseModel): api_endpoint: Optional[str] = None """自定义 OpenAI API 的接入点""" - gpt3_params: OpenAIGPT3Params = OpenAIGPT3Params() + gpt_params: OpenAIParams = OpenAIParams() accounts: List[Union[OpenAIEmailAuth, OpenAISessionTokenAuth, OpenAIAccessTokenAuth, OpenAIAPIKey]] = [] @@ -225,13 +227,15 @@ class XinghuoCookiePath(BaseModel): """星火 Cookie 中的 ssoSessionId 字段""" fd: Optional[str] = "" """星火请求中的 fd 字段""" - GtToken: Optional[str] = "R0VFAAYyNDAzOTU0YzM5Y2M0ZTRlNDY2MTE2MDA4ZGZlYjZjMGQzNGMyMGY0YjQ1NTA1NDg3OWQ0ZWJlOTk0NzQxNGI1MWUzM2IzZDUyZTEyMGM3MWYxNjlmNWY2YmYwMWMxNDI2YzIxOTlmZjMzYTI5YmY3YjQ1M2RjZGQwZWNjMDdiYjMzMmY4OTE2OTRhYTk1OWIyZWVlNzFjNmI5ZWFmY2MxNDFkNjk2MWYzYWQ3ZDAyYjZkM2U0YTllYWZlOTM0Njc4NmMyZmQ4NTRiYWViMTI2NjhlZmFhMWRiNmRmMDc5MzQxN2EyYzMzZDhiN2M4NzJjMzQ3YTYwNDFiMGZkZjkxN2Q2OTRlOWFiZWMwN2U0ZTg3Y2UwM2UxNDlmODBjMzA0MmE4NTAyNzhiNjU0MTU3ZjBlMmMzN2UxMTQ0MjA3ZWE0MDIzZTMyNDRiMjJmMjcwYjE5NGZiMWJhMmFlNGQ4YzkxMWNmZmQ0OGQzYzBlYmQxMTk1ZjE5MDJmMTVjNWUyMDI3ZmNmMDI0ODIxYWJiMWZhNzc3MTExOTBiZmZhMWRhYmRlYzVhYTkwMGRlMjU2YjFhNGQ4ZGYwYzQ0ZjI4MGJiNzcyNGIyOTlkYjU0ZGMyYjllY2U1NjNlYjQzZWE5MzhkMmQ3NTFjMTVkMGY0NDNkYjdhNzdlMmQ4NzM1NTQ3NDI0ZDBjNzRmMTA0NzY4NmI2M2UwZWRiMDM0ZjNhODc1NGZkYjgxMDBlNDA0MmZlZDYzZmFlYmYyNTExMTI5NTIyOTg0ZDMzN2UxYTBhN2NiZWZlZGMxOTVjOWQ2MGVhOTMyY2E5M2VhYmZkODI1YjBiMzU0ZDViYzUzMmM5YzI5NjA2ZWU3MmFmNGYwNGRkNTlhNDEzYzJiZmYyODllZjBkNWJlNWU5ZjZkZWVlMjk4MDUyMTU2OTQwNzE3ZDQ5M2NlM2E4YmIwN2YyZjE4MzgzZmEwNjQxNGZlYmFlNzdmN2QwNTZlYTQ3NDEwMmNlZjU1YmZhNjNjMDM2MmI5OTU2NjBkZjg4YzFjYzA2MmY0NjU2OTE0ZGIwMWE3ODQxNjA2YjdlZWE3ZDJjZTM4NjE5YTcwYjg0MmVkZTBmM2Y1MzI3ZGI2YmU5M2ZjYTNiMzg4OTJkOGQ3NWI4Y2M4YjQ3NjBkNDExZmQ3ZmFlNGIxY2YwMGE5ZDk2MmM2ZDYzMWE1YmRjNmYzMmU0Y2U5MDYwOGNiMDMzMTlkZGE2ZDlkMGU4OGUwMzUwMDkwZTQ5MGRhMmY5ODU1MGU4ZmQ1ODc3NmQ0Yjg5MDM1Y2FiNTg3MjMyMGMwOTJmOTUyODkwYmQ3YjIwYTMzODI5Y2MwY2VlZTE0MWY5N2FiN2IzYmJjNDg3MWM0M2E3ZTViYWNjZWZiZjg4MjM1ZDRiNWMzMjBjM2IxNGM2ZWE2NWVkZjc0OWI0ZDNlNzZjOWYyMTkwZDM0ZTVkYTZkNjM1NjFmZWNmMWYyODIxMTMyNjIyOGFjMWU0MTA2NjY1OWQ4Y2JlZTRmMjIwYzI2NjNmNzYxYzBhZGEyY2VkZjkyNDkzZWExNzFhN2NhZThiNTMxNDNmNzEzM2RhY2UyOWNmYjQ4ZTk5YzE2YjcyM2ZmZTJjZDk5MjU0NGM5OWNhOTFlMDRlMWNiNTQ5ZjU4MGQxY2I4YWU5MWU0MDlmZDZmYjhjNGYzYTRmODA2ZWFiZjRlMDI3OWJmOTM4NmQwN2I5MTBmYzlkYzNjMGM2ODIzYjg4OWFjNWZkZjBhYWNjYzNhYmU0MDRmMTg3Y2Q0MGNmMjcyNWFmY2VkYzAzYmVjZGY2MmMzNWRkNzQ5MGExYjQ1MDdlNTczNDI1OTliYTJhMjNmM2FmNDg1NGM3ODZkYzBiZWIzYTllMGEwYWUyMTllNmZhNzYyN2YyNTI5ZDc3YzQ3MGY1YzIxNzI1NzhhM2EwYzM3NzM0NTM4MTlhYjE3ODJiNmRmOGM1NTI2YjQzZjUzNTZlNDVhM2Q5MDc4N2IwZGNkZTdmYmYzM2ZkMWQ2NGY2NjdmOWYzNDIzZjJkMmU2NzgyMTY5ZWM3MTE1Y2E3MDdlYWRhOGJmNzI0OTJmMGM3Y2QxNjJjMDI4NmFjOThmNDhmOWEyYWQzZDAwYzg5YmViYzA3NTA4ZjYwYzE1OGVmYjk5ZjBkOGY4MzQ1ODI5Yzg4Yzc0YTA3OGQyZjU5NTFjNmQzNTc1N2QyNjI0NWVjNTk0Y2JkMzc2YmVhMGNiZmEzMWYwZTA5MGRhYzhlYzNlYjQ0ZGIxN2M4MWE5NWY4MTE4MDAwNDJkMjQ2MmMzMjk2ODU5Yjg3ZjRhZmI1MDYxM2MxY2FiYTZkZDI0ODdiZDQ3MmVmNzBjMzFkN2YwNjZmZTMxOThiYzFhOWFlZjIwZTQzY2FlNDBkMDkxZWEzMmNiYTBhNDM0YmQ2ZDU2NDQ3YTU4YTNjODZjYTk0NjQ3MGNiZjM4ZjM3ZjU2YTZkZmQ4MDY0OWEyZGU3MzllN2EyZWE3M2RlNDE5NDljNmI4ODU2YmE5ZTM4Njc2YmRhNzA1MWE5MjlmMWU1YTczZjEwYTg2ZjgwNDJjZDQxZTMwYjVjMTA1ODYzNzlhMGY3NmRlOWExODZiZmU2N2Y5NzZhOTY3MTg0ZjNkYmFhYWU0YjdmNmFlMjM5MTlkNDljNDNiODc4MzRjMjA0MzY4YThkOGEyYzRkNjc3MzhkMTU0NmFiNTVjMWE0YTQ0Y2M3MzE5OGM4Y2YzOTAxZGI0ZGY1MzFmNGY5NTI4MDE5MjZjN2I2MDg1YjQzODI0YmFiMTQ3NTIxZTYwNWQzYzhmZjljYjNmOTRlNzg3MDJiYzc1MzE4NTRhN2M3ZDE2OWQyMzcyYjUzMDBhNGQzNzhhYWNjOTk3ZDM1ZTZjODYwZGQwMWNlYTMwZjU1YTFlMjQxMTMxMTQwZjQwMWJmZGJkNWU3NzA4OWE5YzljNDIzY2E2ODk3OGE2ODMwYWEzYTlkZGJiZmMyYTE3NGZhOTc4NmI3ZTYyYmIzNTZlNjRiMzBiYzI4ZDMyYTVjMDMxYzgxZjZlOGEyMGMwNWFlNjJlYWM2ZWExNDY5OTFiZjk1Yzc4NzQzMjMwYTIyNzk1MWRlMzI4NjFjYjU5ZGQ3N2QxOWQ5MTMxNDgwYmY2ZTgyYTkwNzgwMTBlYjAzMzIzYjcxNGY0NzM5NDNmY2MwNTM3ODJmOTIwMGFkNzlmNzZiNjkxNDdmZGQwOTdhZTUwMTk1YjE4M2Q2YWM5NjVmN2NkNDNhMGI3MTEwOTNkZTM5NGM3OTYwNjNlNTBhMDAyNzNkOTE2MzQzODY2MzFkZThkMzViYTUxNmI4MTIyZWZjNzE5MTU0OTQ2NTIyYzc0YjhmNTY2OTMwZDM3YmIwZjJkM2Q4ODgyZGQwZTU0YTcyODM1NmYyZDk2ZWVlNzZiYmZlYjI1YTFjM2ZhNTg5OGY5OTM0YTc4NTBjYzRlNjY4NjE5YWMzOTg2MmE5NDhjMDVhMTc0MzE0MjIwOGFhMjk5OGY2ZmIwMmZlZWI2YTk0M2Q1NzcyN2JhZWU4ZmY5NGFmZjgzZGVjMTUyZmYxOWVkYmM1Y2RiZDkzYzBiNDc1OTEzMjFhYTY4MjI1MDA4ODhmYWJhMzAzNjdlZmRjYmJjNzhjYzE5MWI1MDViNTlmMjBhY2RiYTYzMzQyYzE1YTI2M2NiOGE1NDQ3NzQ4ODU3YWYxMzllMDJlMzY0ODlkNjRlNTRiMTc5YTgwOGRmMWU5YTk1ODY2YzE2YTYzM2EyZmUyYjA2MzM4OTI5YTc4MmRlMGFkZDgwZDZiYWU3Y2M1ZjljMWEzYzA5MGU4MTVlNjc2MGJjMzA0ZWU3ZmY1MDM5OGRiNDc0YTJkNWMzYWVhNTMxZjc0ZDU3NGNhZGNhZTIzZmZiZjcyY2FhNmU5YTNjNjFhYzNiMDJjNDdjYzQzZGJhYjA2NTgwNTkyZmE5YjMyNGMxMGJhMGRjNjgzZWIyYzRiNDg4NzFiMjk2YmIxNDBhMWUyZWRlOTE0NmY3MThkZTE4ZWU0M2QwZTk4NWY3NWQ1YWYyYjlkNjU5ODM5YzQwZWFiMzg2" + GtToken: Optional[ + str] = "R0VFAAYyNDAzOTU0YzM5Y2M0ZTRlNDY2MTE2MDA4ZGZlYjZjMGQzNGMyMGY0YjQ1NTA1NDg3OWQ0ZWJlOTk0NzQxNGI1MWUzM2IzZDUyZTEyMGM3MWYxNjlmNWY2YmYwMWMxNDI2YzIxOTlmZjMzYTI5YmY3YjQ1M2RjZGQwZWNjMDdiYjMzMmY4OTE2OTRhYTk1OWIyZWVlNzFjNmI5ZWFmY2MxNDFkNjk2MWYzYWQ3ZDAyYjZkM2U0YTllYWZlOTM0Njc4NmMyZmQ4NTRiYWViMTI2NjhlZmFhMWRiNmRmMDc5MzQxN2EyYzMzZDhiN2M4NzJjMzQ3YTYwNDFiMGZkZjkxN2Q2OTRlOWFiZWMwN2U0ZTg3Y2UwM2UxNDlmODBjMzA0MmE4NTAyNzhiNjU0MTU3ZjBlMmMzN2UxMTQ0MjA3ZWE0MDIzZTMyNDRiMjJmMjcwYjE5NGZiMWJhMmFlNGQ4YzkxMWNmZmQ0OGQzYzBlYmQxMTk1ZjE5MDJmMTVjNWUyMDI3ZmNmMDI0ODIxYWJiMWZhNzc3MTExOTBiZmZhMWRhYmRlYzVhYTkwMGRlMjU2YjFhNGQ4ZGYwYzQ0ZjI4MGJiNzcyNGIyOTlkYjU0ZGMyYjllY2U1NjNlYjQzZWE5MzhkMmQ3NTFjMTVkMGY0NDNkYjdhNzdlMmQ4NzM1NTQ3NDI0ZDBjNzRmMTA0NzY4NmI2M2UwZWRiMDM0ZjNhODc1NGZkYjgxMDBlNDA0MmZlZDYzZmFlYmYyNTExMTI5NTIyOTg0ZDMzN2UxYTBhN2NiZWZlZGMxOTVjOWQ2MGVhOTMyY2E5M2VhYmZkODI1YjBiMzU0ZDViYzUzMmM5YzI5NjA2ZWU3MmFmNGYwNGRkNTlhNDEzYzJiZmYyODllZjBkNWJlNWU5ZjZkZWVlMjk4MDUyMTU2OTQwNzE3ZDQ5M2NlM2E4YmIwN2YyZjE4MzgzZmEwNjQxNGZlYmFlNzdmN2QwNTZlYTQ3NDEwMmNlZjU1YmZhNjNjMDM2MmI5OTU2NjBkZjg4YzFjYzA2MmY0NjU2OTE0ZGIwMWE3ODQxNjA2YjdlZWE3ZDJjZTM4NjE5YTcwYjg0MmVkZTBmM2Y1MzI3ZGI2YmU5M2ZjYTNiMzg4OTJkOGQ3NWI4Y2M4YjQ3NjBkNDExZmQ3ZmFlNGIxY2YwMGE5ZDk2MmM2ZDYzMWE1YmRjNmYzMmU0Y2U5MDYwOGNiMDMzMTlkZGE2ZDlkMGU4OGUwMzUwMDkwZTQ5MGRhMmY5ODU1MGU4ZmQ1ODc3NmQ0Yjg5MDM1Y2FiNTg3MjMyMGMwOTJmOTUyODkwYmQ3YjIwYTMzODI5Y2MwY2VlZTE0MWY5N2FiN2IzYmJjNDg3MWM0M2E3ZTViYWNjZWZiZjg4MjM1ZDRiNWMzMjBjM2IxNGM2ZWE2NWVkZjc0OWI0ZDNlNzZjOWYyMTkwZDM0ZTVkYTZkNjM1NjFmZWNmMWYyODIxMTMyNjIyOGFjMWU0MTA2NjY1OWQ4Y2JlZTRmMjIwYzI2NjNmNzYxYzBhZGEyY2VkZjkyNDkzZWExNzFhN2NhZThiNTMxNDNmNzEzM2RhY2UyOWNmYjQ4ZTk5YzE2YjcyM2ZmZTJjZDk5MjU0NGM5OWNhOTFlMDRlMWNiNTQ5ZjU4MGQxY2I4YWU5MWU0MDlmZDZmYjhjNGYzYTRmODA2ZWFiZjRlMDI3OWJmOTM4NmQwN2I5MTBmYzlkYzNjMGM2ODIzYjg4OWFjNWZkZjBhYWNjYzNhYmU0MDRmMTg3Y2Q0MGNmMjcyNWFmY2VkYzAzYmVjZGY2MmMzNWRkNzQ5MGExYjQ1MDdlNTczNDI1OTliYTJhMjNmM2FmNDg1NGM3ODZkYzBiZWIzYTllMGEwYWUyMTllNmZhNzYyN2YyNTI5ZDc3YzQ3MGY1YzIxNzI1NzhhM2EwYzM3NzM0NTM4MTlhYjE3ODJiNmRmOGM1NTI2YjQzZjUzNTZlNDVhM2Q5MDc4N2IwZGNkZTdmYmYzM2ZkMWQ2NGY2NjdmOWYzNDIzZjJkMmU2NzgyMTY5ZWM3MTE1Y2E3MDdlYWRhOGJmNzI0OTJmMGM3Y2QxNjJjMDI4NmFjOThmNDhmOWEyYWQzZDAwYzg5YmViYzA3NTA4ZjYwYzE1OGVmYjk5ZjBkOGY4MzQ1ODI5Yzg4Yzc0YTA3OGQyZjU5NTFjNmQzNTc1N2QyNjI0NWVjNTk0Y2JkMzc2YmVhMGNiZmEzMWYwZTA5MGRhYzhlYzNlYjQ0ZGIxN2M4MWE5NWY4MTE4MDAwNDJkMjQ2MmMzMjk2ODU5Yjg3ZjRhZmI1MDYxM2MxY2FiYTZkZDI0ODdiZDQ3MmVmNzBjMzFkN2YwNjZmZTMxOThiYzFhOWFlZjIwZTQzY2FlNDBkMDkxZWEzMmNiYTBhNDM0YmQ2ZDU2NDQ3YTU4YTNjODZjYTk0NjQ3MGNiZjM4ZjM3ZjU2YTZkZmQ4MDY0OWEyZGU3MzllN2EyZWE3M2RlNDE5NDljNmI4ODU2YmE5ZTM4Njc2YmRhNzA1MWE5MjlmMWU1YTczZjEwYTg2ZjgwNDJjZDQxZTMwYjVjMTA1ODYzNzlhMGY3NmRlOWExODZiZmU2N2Y5NzZhOTY3MTg0ZjNkYmFhYWU0YjdmNmFlMjM5MTlkNDljNDNiODc4MzRjMjA0MzY4YThkOGEyYzRkNjc3MzhkMTU0NmFiNTVjMWE0YTQ0Y2M3MzE5OGM4Y2YzOTAxZGI0ZGY1MzFmNGY5NTI4MDE5MjZjN2I2MDg1YjQzODI0YmFiMTQ3NTIxZTYwNWQzYzhmZjljYjNmOTRlNzg3MDJiYzc1MzE4NTRhN2M3ZDE2OWQyMzcyYjUzMDBhNGQzNzhhYWNjOTk3ZDM1ZTZjODYwZGQwMWNlYTMwZjU1YTFlMjQxMTMxMTQwZjQwMWJmZGJkNWU3NzA4OWE5YzljNDIzY2E2ODk3OGE2ODMwYWEzYTlkZGJiZmMyYTE3NGZhOTc4NmI3ZTYyYmIzNTZlNjRiMzBiYzI4ZDMyYTVjMDMxYzgxZjZlOGEyMGMwNWFlNjJlYWM2ZWExNDY5OTFiZjk1Yzc4NzQzMjMwYTIyNzk1MWRlMzI4NjFjYjU5ZGQ3N2QxOWQ5MTMxNDgwYmY2ZTgyYTkwNzgwMTBlYjAzMzIzYjcxNGY0NzM5NDNmY2MwNTM3ODJmOTIwMGFkNzlmNzZiNjkxNDdmZGQwOTdhZTUwMTk1YjE4M2Q2YWM5NjVmN2NkNDNhMGI3MTEwOTNkZTM5NGM3OTYwNjNlNTBhMDAyNzNkOTE2MzQzODY2MzFkZThkMzViYTUxNmI4MTIyZWZjNzE5MTU0OTQ2NTIyYzc0YjhmNTY2OTMwZDM3YmIwZjJkM2Q4ODgyZGQwZTU0YTcyODM1NmYyZDk2ZWVlNzZiYmZlYjI1YTFjM2ZhNTg5OGY5OTM0YTc4NTBjYzRlNjY4NjE5YWMzOTg2MmE5NDhjMDVhMTc0MzE0MjIwOGFhMjk5OGY2ZmIwMmZlZWI2YTk0M2Q1NzcyN2JhZWU4ZmY5NGFmZjgzZGVjMTUyZmYxOWVkYmM1Y2RiZDkzYzBiNDc1OTEzMjFhYTY4MjI1MDA4ODhmYWJhMzAzNjdlZmRjYmJjNzhjYzE5MWI1MDViNTlmMjBhY2RiYTYzMzQyYzE1YTI2M2NiOGE1NDQ3NzQ4ODU3YWYxMzllMDJlMzY0ODlkNjRlNTRiMTc5YTgwOGRmMWU5YTk1ODY2YzE2YTYzM2EyZmUyYjA2MzM4OTI5YTc4MmRlMGFkZDgwZDZiYWU3Y2M1ZjljMWEzYzA5MGU4MTVlNjc2MGJjMzA0ZWU3ZmY1MDM5OGRiNDc0YTJkNWMzYWVhNTMxZjc0ZDU3NGNhZGNhZTIzZmZiZjcyY2FhNmU5YTNjNjFhYzNiMDJjNDdjYzQzZGJhYjA2NTgwNTkyZmE5YjMyNGMxMGJhMGRjNjgzZWIyYzRiNDg4NzFiMjk2YmIxNDBhMWUyZWRlOTE0NmY3MThkZTE4ZWU0M2QwZTk4NWY3NWQ1YWYyYjlkNjU5ODM5YzQwZWFiMzg2" """星火请求中的 GtToken 字段""" sid: Optional[str] = "" """星火请求中的 sid 字段""" proxy: Optional[str] = None """可选的代理地址,留空则检测系统代理""" + class YiyanAuths(BaseModel): accounts: List[YiyanCookiePath] = [] """文心一言的账号列表""" @@ -241,6 +245,7 @@ class XinghuoAuths(BaseModel): accounts: List[XinghuoCookiePath] = [] """讯飞星火大模型的账号列表""" + class ChatGLMAPI(BaseModel): api_endpoint: str """自定义 ChatGLM API 的接入点""" diff --git a/manager/bot.py b/manager/bot.py index 578ef844..795a4db4 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -5,7 +5,7 @@ import urllib.request from typing import List, Dict from urllib.parse import urlparse - +import re import base64 import json import time @@ -120,8 +120,14 @@ async def handle_openai(self): openai.api_base = self.config.openai.api_endpoint or openai.api_base if openai.api_base.endswith("/"): openai.api_base.removesuffix("/") - logger.info(f"当前的 api_endpoint 为:{openai.api_base}") - await self.login_openai() + + pattern = r'^https://[^/]+/v1$' + if match := re.match(pattern, openai.api_base): + logger.info(f"当前的 api_endpoint 为:{openai.api_base}") + await self.login_openai() + else: + logger.error("API反代地址填写错误,正确格式应为 'https://<网址>/v1'") + raise ValueError("API反代地址填写错误,正确格式应为 'https://<网址>/v1'") async def login(self): self.bots = { @@ -271,7 +277,6 @@ def login_xinghuo(self): logger.error("所有 讯飞星火 账号均解析失败!") logger.success(f"成功解析 {len(self.bots['xinghuo-cookie'])}/{len(self.xinghuo)} 个 讯飞星火 账号!") - def login_poe(self): from adapter.quora.poe import PoeClientWrapper try: @@ -349,7 +354,9 @@ async def login_openai(self): # sourcery skip: raise-specific-error counter = counter + 1 except httpx.HTTPStatusError as e: logger.error("登录失败! 可能是账号密码错误,或者 Endpoint 不支持 该登录方式。{exc}", exc=e) - except (ConnectTimeout, RequestException, SSLError, urllib3.exceptions.MaxRetryError, ClientConnectorError) as e: + except ( + ConnectTimeout, RequestException, SSLError, urllib3.exceptions.MaxRetryError, + ClientConnectorError) as e: logger.error("登录失败! 连接 OpenAI 服务器失败,请更换代理节点重试!{exc}", exc=e) except APIKeyNoFundsError: logger.error("登录失败! API 账号余额不足,无法继续使用。") diff --git a/requirements.txt b/requirements.txt index 2fb0fa8d..190f1697 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT==6.3.3 +revChatGPT~=6.5.0 toml~=0.10.2 Pillow>=9.3.0 -tinydb~=4.8.0 +tinydb~=4.7.1 loguru~=0.7.0 asyncio~=3.4.3 @@ -17,7 +17,7 @@ qrcode~=7.4.2 openai~=0.27.8 EdgeGPT==0.8.2 aiohttp~=3.8.4 -OpenAIAuth~=1.0.2 +OpenAIAuth urllib3~=1.26.15 BingImageCreator~=0.4.2 @@ -33,9 +33,13 @@ azure-cognitiveservices-speech poe-api~=0.4.8 regex~=2023.6.3 -httpx +httpx~=0.24.1 Quart==0.17.0 edge-tts wechatpy~=2.0.0a26 pydub~=0.25.1 + +creart~=0.2.2 +tiktoken~=0.4.0 +httpcore~=0.17.2 diff --git a/universal.py b/universal.py index 1c93f46e..db855383 100644 --- a/universal.py +++ b/universal.py @@ -85,7 +85,7 @@ async def respond(msg: str): return ret # TTS Converting if conversation_context.conversation_voice and isinstance(msg, MessageChain): - if request_from == BotPlatform.Onebot or request_from == BotPlatform.AriadneBot: + if request_from in [BotPlatform.Onebot, BotPlatform.AriadneBot]: voice_type = VoiceType.Silk elif request_from == BotPlatform.HttpService: voice_type = VoiceType.Mp3 @@ -201,8 +201,10 @@ async def request(_session_id, prompt: str, conversation_context, _respond): await conversation_context.switch_model(model_name) await respond(f"已切换至 {model_name} 模型,让我们聊天吧!") else: + logger.warning(f"模型 {model_name} 不在支持列表中,下次将尝试使用此模型创建对话。") + await conversation_context.switch_model(model_name) await respond( - f"当前的 AI 不支持切换至 {model_name} 模型,目前仅支持:{conversation_context.supported_models}!") + f"模型 {model_name} 不在支持列表中,下次将尝试使用此模型创建对话,目前AI仅支持:{conversation_context.supported_models}!") return # 加载预设 diff --git a/utils/text_to_img.py b/utils/text_to_img.py index fa2adc8c..e76ca38e 100644 --- a/utils/text_to_img.py +++ b/utils/text_to_img.py @@ -333,16 +333,19 @@ async def text_to_image(text): with StringIO(html) as input_file: ok = False try: - # 调用imgkit将html转为图片 - ok = await asyncio.get_event_loop().run_in_executor(None, imgkit.from_file, input_file, - temp_jpg_filename, { - "enable-local-file-access": "", - "allow": asset_folder, - "width": config.text_to_image.width, # 图片宽度 - }, None, None, None, imgkit_config) - # 调用PIL将图片读取为 JPEG,RGB 格式 - image = Image.open(temp_jpg_filename, formats=['PNG']).convert('RGB') - ok = True + if config.text_to_image.wkhtmltoimage: + # 调用imgkit将html转为图片 + ok = await asyncio.get_event_loop().run_in_executor(None, imgkit.from_file, input_file, + temp_jpg_filename, { + "enable-local-file-access": "", + "allow": asset_folder, + "width": config.text_to_image.width, # 图片宽度 + }, None, None, None, imgkit_config) + # 调用PIL将图片读取为 JPEG,RGB 格式 + image = Image.open(temp_jpg_filename, formats=['PNG']).convert('RGB') + ok = True + else: + ok = False except Exception as e: logger.exception(e) finally: @@ -357,6 +360,7 @@ async def text_to_image(text): return image + async def to_image(text) -> GraiaImage: img = await text_to_image(text=str(text)) b = BytesIO() From 71730dc740ac2dec4ecd2dd89520ba033a6c5b10 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Tue, 11 Jul 2023 10:18:11 +0800 Subject: [PATCH 45/68] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=A8=8B=E5=BA=8F?= =?UTF-8?q?=E5=BC=80=E5=90=AFhttp=E6=A8=A1=E5=BC=8F=E5=85=B3=E4=B8=8D?= =?UTF-8?q?=E6=8E=89=E7=9A=84=E9=97=AE=E9=A2=98=20(#1040)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bot.py | 3 +-- utils/exithooks.py | 10 ++++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/bot.py b/bot.py index d2ff3f0e..8cbff67f 100644 --- a/bot.py +++ b/bot.py @@ -10,8 +10,6 @@ sys.path.append(os.getcwd()) -hook() - loop = creart.create(AbstractEventLoop) loop.run_until_complete(botManager.login()) @@ -57,5 +55,6 @@ logger.exception(e) logger.error("[Edge TTS] 读取失败!") +hook() loop.run_until_complete(asyncio.gather(*bots)) loop.run_forever() diff --git a/utils/exithooks.py b/utils/exithooks.py index 08c78678..5400d412 100644 --- a/utils/exithooks.py +++ b/utils/exithooks.py @@ -1,10 +1,9 @@ import atexit import sys - +import os from loguru import logger import signal -import sys - +from constants import config class ExitHooks(object): def __init__(self): @@ -44,7 +43,10 @@ def foo(): def exit_gracefully(signal, frame): - print("Received signal {}, exiting...".format(signal)) + if config.http: + logger.warning("检测到HTTP配置,将强制关闭程序……") + os._exit(0) + logger.warning("程序即将退出...".format(signal)) sys.exit(0) From 62b3cff809c86e4bc5c7898223f3bdae8c3542e8 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Wed, 12 Jul 2023 01:38:45 +0800 Subject: [PATCH 46/68] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E9=87=8D=E6=9E=84API?= =?UTF-8?q?=E9=94=99=E8=AF=AF=20(#1042)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 修复程序开启http模式关不掉的问题 * 修复部分api错误 修复了加载预设导致engine重置为None的问题 --- adapter/chatgpt/api.py | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 254233bc..5343568a 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -1,11 +1,9 @@ - import json import time from typing import AsyncGenerator import aiohttp import async_timeout -import tiktoken from loguru import logger @@ -14,7 +12,6 @@ from constants import botManager, config import tiktoken - DEFAULT_ENGINE: str = "gpt-3.5-turbo" @@ -67,6 +64,8 @@ def add_to_conversation( # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb def count_tokens(self, session_id: str = "default", model: str = DEFAULT_ENGINE): """Return the number of tokens used by a list of messages.""" + if model is None: + model = DEFAULT_ENGINE try: encoding = tiktoken.encoding_for_model(model) except KeyError: @@ -195,7 +194,8 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non } async with aiohttp.ClientSession() as session: with async_timeout.timeout(self.bot.timeout): - async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), proxy=proxy) as resp: + async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), + proxy=proxy) as resp: if resp.status != 200: response_text = await resp.text() raise Exception( @@ -206,18 +206,22 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non completion_text: str = '' async for line in resp.content: - line = line.decode('utf-8').strip() - if not line.startswith("data: "): - continue - line = line[len("data: "):] - if line == "[DONE]": - break - if not line: - continue try: + line = line.decode('utf-8').strip() + if not line.startswith("data: "): + continue + line = line[len("data: "):] + if line == "[DONE]": + break + if not line: + continue event = json.loads(line) except json.JSONDecodeError: raise Exception(f"JSON解码错误: {line}") from None + except Exception as e: + logger.error(f"未知错误: {e}\n响应内容: {resp.content}") + logger.error("请将该段日记提交到项目issue中,以便修复该问题。") + raise Exception(f"未知错误: {e}") from None if 'error' in event: raise Exception(f"响应错误: {event['error']}") if 'choices' in event and len(event['choices']) > 0 and 'delta' in event['choices'][0]: @@ -236,8 +240,7 @@ async def compressed_session(self, session_id: str): logger.debug(f"不存在该会话,不进行压缩: {session_id}") return - if config.openai.gpt_params.compressed_session and self.bot.count_tokens( - session_id) > config.openai.gpt_params.compressed_tokens: + if self.bot.count_tokens(session_id) > config.openai.gpt_params.compressed_tokens: logger.debug('开始进行会话压缩') filtered_data = [entry for entry in self.bot.conversation[session_id] if entry['role'] != 'system'] @@ -258,12 +261,14 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: self.manage_conversation(self.session_id, prompt) - await self.compressed_session(self.session_id) + if config.openai.gpt_params.compressed_session: + await self.compressed_session(self.session_id) event_time = None try: - + if self.bot.engine not in self.supported_models: + logger.warning(f"当前模型非官方支持的模型,请注意控制台输出,当前使用的模型为 {self.bot.engine}") logger.debug(f"[尝试使用ChatGPT-API:{self.bot.engine}] 请求:{prompt}") self.bot.add_to_conversation(prompt, "user", session_id=self.session_id) start_time = time.time() @@ -283,6 +288,7 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: yield f"发生错误: \n{e}" async def preset_ask(self, role: str, text: str): + self.bot.engine = self.current_model if role.endswith('bot') or role in {'assistant', 'chatgpt'}: logger.debug(f"[预设] 响应:{text}") yield text From 4124979c605f39be6006efaf44e6a43cbe65de9a Mon Sep 17 00:00:00 2001 From: Jerry Chen <1261449269@qq.com> Date: Wed, 12 Jul 2023 01:47:44 +0800 Subject: [PATCH 47/68] =?UTF-8?q?=E6=9B=B4=E6=96=B0edgeGPT=E7=9A=84?= =?UTF-8?q?=E6=96=B9=E5=BC=8F=E5=92=8C=E4=BE=9D=E8=B5=96=E7=89=88=E6=9C=AC?= =?UTF-8?q?=20(#1041)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- adapter/ms/bing.py | 4 ++-- conversation.py | 2 +- requirements.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index cafa5b53..eb8a4c5c 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -9,14 +9,14 @@ from constants import config from adapter.botservice import BotAdapter -from EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess +from EdgeGPT.EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess from contextlib import suppress from constants import botManager from drawing import DrawingAPI from exceptions import BotOperationNotSupportedException from loguru import logger -from ImageGen import ImageGenAsync +from EdgeGPT.ImageGen import ImageGenAsync from graia.ariadne.message.element import Image as GraiaImage image_pattern = r"!\[.*\]\((.*)\)" diff --git a/conversation.py b/conversation.py index 515c5508..2f1bdebb 100644 --- a/conversation.py +++ b/conversation.py @@ -5,7 +5,7 @@ from typing import List, Dict, Optional import httpx -from EdgeGPT import ConversationStyle +from EdgeGPT.EdgeGPT import ConversationStyle from graia.amnesia.message import MessageChain from graia.ariadne.message.element import Image as GraiaImage, Element from loguru import logger diff --git a/requirements.txt b/requirements.txt index 190f1697..13922dba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 openai~=0.27.8 -EdgeGPT==0.8.2 +EdgeGPT==0.12.1 aiohttp~=3.8.4 OpenAIAuth urllib3~=1.26.15 From 63f7111eb8598cbc6b99a37caf98c1eac3072a27 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Wed, 12 Jul 2023 12:30:29 +0800 Subject: [PATCH 48/68] =?UTF-8?q?Revert=20"=E6=9B=B4=E6=96=B0edgeGPT?= =?UTF-8?q?=E7=9A=84=E6=96=B9=E5=BC=8F=E5=92=8C=E4=BE=9D=E8=B5=96=E7=89=88?= =?UTF-8?q?=E6=9C=AC=20(#1041)"=20(#1044)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 4124979c605f39be6006efaf44e6a43cbe65de9a. --- adapter/ms/bing.py | 4 ++-- conversation.py | 2 +- requirements.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index eb8a4c5c..cafa5b53 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -9,14 +9,14 @@ from constants import config from adapter.botservice import BotAdapter -from EdgeGPT.EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess +from EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess from contextlib import suppress from constants import botManager from drawing import DrawingAPI from exceptions import BotOperationNotSupportedException from loguru import logger -from EdgeGPT.ImageGen import ImageGenAsync +from ImageGen import ImageGenAsync from graia.ariadne.message.element import Image as GraiaImage image_pattern = r"!\[.*\]\((.*)\)" diff --git a/conversation.py b/conversation.py index 2f1bdebb..515c5508 100644 --- a/conversation.py +++ b/conversation.py @@ -5,7 +5,7 @@ from typing import List, Dict, Optional import httpx -from EdgeGPT.EdgeGPT import ConversationStyle +from EdgeGPT import ConversationStyle from graia.amnesia.message import MessageChain from graia.ariadne.message.element import Image as GraiaImage, Element from loguru import logger diff --git a/requirements.txt b/requirements.txt index 13922dba..190f1697 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 openai~=0.27.8 -EdgeGPT==0.12.1 +EdgeGPT==0.8.2 aiohttp~=3.8.4 OpenAIAuth urllib3~=1.26.15 From 101e406af0dc59e0e25d28db62f2edffc94e92ff Mon Sep 17 00:00:00 2001 From: "sourcery-ai[bot]" <58596630+sourcery-ai[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 23:43:33 +0800 Subject: [PATCH 49/68] =?UTF-8?q?=E4=B8=8D=E5=86=8D=E5=BC=BA=E5=88=B6?= =?UTF-8?q?=E6=A3=80=E6=9F=A5API=EF=BC=8C=E6=96=B0=E5=A2=9E=E9=9D=9E?= =?UTF-8?q?=E6=B5=81API=E8=AF=B7=E6=B1=82=20(Sourcery=20refactored)=20(#10?= =?UTF-8?q?48)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 修复程序开启http模式关不掉的问题 * 修复部分api错误 修复了加载预设导致engine重置为None的问题 * 不再强制检查API,新增非流API请求 * 格式化文件 * 'Refactored by Sourcery' --------- Co-authored-by: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Co-authored-by: Sourcery AI <> --- adapter/chatgpt/api.py | 112 ++++++++++++++++++++++++----------------- config.py | 1 + manager/bot.py | 11 ++-- 3 files changed, 72 insertions(+), 52 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 5343568a..b4c2b462 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -1,16 +1,14 @@ import json import time -from typing import AsyncGenerator - import aiohttp import async_timeout - +import tiktoken from loguru import logger +from typing import AsyncGenerator from adapter.botservice import BotAdapter from config import OpenAIAPIKey from constants import botManager, config -import tiktoken DEFAULT_ENGINE: str = "gpt-3.5-turbo" @@ -53,12 +51,7 @@ async def rollback(self, session_id: str = "default", n: int = 1) -> None: logger.error(f"未知错误: {e}") raise - def add_to_conversation( - self, - message: str, - role: str, - session_id: str = "default", - ) -> None: + def add_to_conversation(self, message: str, role: str, session_id: str = "default") -> None: self.conversation[session_id].append({"role": role, "content": message}) # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb @@ -71,26 +64,8 @@ def count_tokens(self, session_id: str = "default", model: str = DEFAULT_ENGINE) except KeyError: encoding = tiktoken.get_encoding("cl100k_base") - if model in { - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - "gpt-4-0314", - "gpt-4-32k-0314", - "gpt-4-0613", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-4", - "gpt-4-32k" - }: - tokens_per_message = 3 - tokens_per_name = 1 - elif model == "gpt-3.5-turbo-0301": - tokens_per_message = 4 # every message follows {role/name}\n{content}\n - tokens_per_name = -1 # if there's a name, the role is omitted - else: - logger.warning("未找到相应模型计算方法,使用默认方法进行计算") - tokens_per_message = 3 - tokens_per_name = 1 + tokens_per_message = 4 + tokens_per_name = 1 num_tokens = 0 for message in self.conversation[session_id]: @@ -168,15 +143,7 @@ async def on_reset(self): self.bot.engine = self.api_info.model self.__conversation_keep_from = 0 - async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]: - self.api_info = botManager.pick('openai-api') - api_key = self.api_info.api_key - proxy = self.api_info.proxy - api_endpoint = config.openai.api_endpoint or "https://api.openai.com/v1" - - if not messages: - messages = self.bot.conversation[session_id] - + def construct_data(self, messages: list = None, api_key: str = None, stream: bool = True): headers = { 'Content-Type': 'application/json', 'Authorization': f'Bearer {api_key}' @@ -184,7 +151,7 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non data = { 'model': self.bot.engine, 'messages': messages, - 'stream': True, + 'stream': stream, 'temperature': self.bot.temperature, 'top_p': self.bot.top_p, 'presence_penalty': self.bot.presence_penalty, @@ -192,16 +159,64 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non "user": 'user', 'max_tokens': self.bot.get_max_tokens(self.session_id, self.bot.engine), } + return headers, data + + def _prepare_request(self, session_id: str = None, messages: list = None, stream: bool = False): + self.api_info = botManager.pick('openai-api') + api_key = self.api_info.api_key + proxy = self.api_info.proxy + api_endpoint = config.openai.api_endpoint or "https://api.openai.com/v1" + + if not messages: + messages = self.bot.conversation[session_id] + + headers, data = self.construct_data(messages, api_key, stream) + + return api_key, proxy, api_endpoint, headers, data + + async def _process_response(self, resp, session_id: str = None): + + result = await resp.json() + + total_tokens = result.get('usage', {}).get('total_tokens', None) + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{total_tokens}") + if total_tokens is None: + raise Exception("Response does not contain 'total_tokens'") + + content = result.get('choices', [{}])[0].get('message', {}).get('content', None) + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{content}") + if content is None: + raise Exception("Response does not contain 'content'") + + response_role = result.get('choices', [{}])[0].get('message', {}).get('role', None) + if response_role is None: + raise Exception("Response does not contain 'role'") + + self.bot.add_to_conversation(content, response_role, session_id) + + return content + + async def request(self, session_id: str = None, messages: list = None) -> str: + api_key, proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=False) + async with aiohttp.ClientSession() as session: with async_timeout.timeout(self.bot.timeout): - async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), - proxy=proxy) as resp: + async with session.post(f'{api_endpoint}/chat/completions', headers=headers, + data=json.dumps(data)) as resp: if resp.status != 200: response_text = await resp.text() raise Exception( f"{resp.status} {resp.reason} {response_text}", ) + return await self._process_response(resp, session_id) + + async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]: + api_key, proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=True) + async with aiohttp.ClientSession() as session: + with async_timeout.timeout(self.bot.timeout): + async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), + proxy=proxy) as resp: response_role: str = '' completion_text: str = '' @@ -273,12 +288,15 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: self.bot.add_to_conversation(prompt, "user", session_id=self.session_id) start_time = time.time() - async for completion_text in self.request_with_stream(session_id=self.session_id): - yield completion_text + if config.openai.gpt_params.stream: + async for completion_text in self.request_with_stream(session_id=self.session_id): + yield completion_text - token_count = self.bot.count_tokens(self.session_id, self.bot.engine) - logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{completion_text}") - logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}") + token_count = self.bot.count_tokens(self.session_id, self.bot.engine) + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{completion_text}") + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}") + else: + yield await self.request(session_id=self.session_id) event_time = time.time() - start_time if event_time is not None: logger.debug(f"[ChatGPT-API:{self.bot.engine}] 接收到全部消息花费了{event_time:.2f}秒") diff --git a/config.py b/config.py index c428707c..ef77bbee 100644 --- a/config.py +++ b/config.py @@ -85,6 +85,7 @@ class OpenAIParams(BaseModel): min_tokens: int = 1000 compressed_session: bool = False compressed_tokens: int = 1000 + stream: bool = True class OpenAIAuths(BaseModel): diff --git a/manager/bot.py b/manager/bot.py index 795a4db4..f04154dd 100644 --- a/manager/bot.py +++ b/manager/bot.py @@ -120,14 +120,15 @@ async def handle_openai(self): openai.api_base = self.config.openai.api_endpoint or openai.api_base if openai.api_base.endswith("/"): openai.api_base.removesuffix("/") + logger.info(f"当前的 api_endpoint 为:{openai.api_base}") pattern = r'^https://[^/]+/v1$' - if match := re.match(pattern, openai.api_base): - logger.info(f"当前的 api_endpoint 为:{openai.api_base}") - await self.login_openai() - else: + + if not re.match(pattern, openai.api_base): logger.error("API反代地址填写错误,正确格式应为 'https://<网址>/v1'") - raise ValueError("API反代地址填写错误,正确格式应为 'https://<网址>/v1'") + + await self.login_openai() + async def login(self): self.bots = { From 2f9da1d06bdfa93569f5a715a67d8ed745866e16 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Wed, 12 Jul 2023 23:52:02 +0800 Subject: [PATCH 50/68] Create quickstart-windows-dev-gocqhttp.yml --- .../quickstart-windows-dev-gocqhttp.yml | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 .github/workflows/quickstart-windows-dev-gocqhttp.yml diff --git a/.github/workflows/quickstart-windows-dev-gocqhttp.yml b/.github/workflows/quickstart-windows-dev-gocqhttp.yml new file mode 100644 index 00000000..894f1b7d --- /dev/null +++ b/.github/workflows/quickstart-windows-dev-gocqhttp.yml @@ -0,0 +1,79 @@ +name: Windows Quickstart Dev (go-cqhttp) + +on: + workflow_dispatch: + push: + branches: + - 'browser-version-dev' + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + build: + name: Quickstart (GO-CQHTTP) + runs-on: Windows-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Generate files + run: | + mkdir C:/generated_files + mkdir C:/tmp_files + echo "Creating folders..." + cd C:/generated_files + echo "Downloading go-cqhttp..." + mkdir go-cqhttp + mkdir chatgpt + mkdir ffmpeg + Invoke-WebRequest -URI https://github.com/Mrs4s/go-cqhttp/releases/download/v1.0.1/go-cqhttp_windows_amd64.exe -OutFile C:/generated_files/go-cqhttp/go-cqhttp.exe + + cp -r D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\* C:\generated_files\chatgpt\ + + echo "Downloading ffmpeg ..." + Invoke-WebRequest https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-6.0-full_build.7z -OutFile C:/tmp_files/ffmpeg.7z + 7z x C:/tmp_files/ffmpeg.zip -r -oC:/generated_files/ffmpeg + + echo "Downloading Python3.11 ..." + Invoke-WebRequest https://www.python.org/ftp/python/3.11.2/python-3.11.2-embed-amd64.zip -OutFile C:/tmp_files/python.zip + 7z x C:/tmp_files/python.zip -r -oC:/generated_files/python3.11 + + echo "Downloading get-pip.py ..." + Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/python3.11/get-pip.py + echo "import site" >> C:/generated_files/python3.11/python311._pth + + echo "Moving files..." + mv D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\.github\quickstarts\windows\ C:/generated_files/files/ + + echo "Replacing..." + cp C:/generated_files/files/go-cqhttp/scripts/初始化.cmd C:/generated_files/ + + Invoke-WebRequest -URI https://github.com/lss233/awesome-chatgpt-qq-presets/archive/refs/heads/master.zip -OutFile C:/tmp_files/presets.zip + 7z x C:/tmp_files/presets.zip -oC:/tmp_files/ + Copy-Item C:\tmp_files\awesome-chatgpt-qq-presets-master\* -Destination C:\generated_files\chatgpt\presets\ -Recurse + + Invoke-WebRequest -URI https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox-0.12.6-1.mxe-cross-win64.7z -O C:/tmp_files/wkhtmltox.7z + + echo "Downloading vc_redist.exe..." + Invoke-WebRequest -URI https://aka.ms/vs/17/release/vc_redist.x64.exe -O "C:\generated_files\【语音功能依赖】vc_redist.x64.exe" + + echo "Setting up wkhtmltox" + 7z x C:/tmp_files/wkhtmltox.7z -oC:/tmp_files/ + cp C:/tmp_files/wkhtmltox/bin/wkhtmltoimage.exe C:\generated_files\chatgpt\ + + echo "Downloading packages..." + cd C:/generated_files/chatgpt + ..\python3.11\python.exe C:/generated_files/python3.11/get-pip.py + ..\python3.11\python.exe -m pip install -r requirements.txt + + echo "Packing..." + cd C:/generated_files + 7z a quickstart-windows-dev-go-cqhttp-amd64.zip C:\generated_files\* + - name: Archive production artifacts + uses: actions/upload-artifact@v3 + with: + name: quickstart-windows-dev-go-cqhttp-amd64.zip + path: | + C:\generated_files\quickstart-windows-dev-go-cqhttp-amd64.zip From 9f8a4fd2f8d2b9f88a7abeb73fb40d2d98d9c533 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Sun, 16 Jul 2023 21:58:09 +0800 Subject: [PATCH 51/68] =?UTF-8?q?=E4=BC=98=E5=8C=96=E9=83=A8=E5=88=86?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=20(#1058)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 修复程序开启http模式关不掉的问题 * 修复部分api错误 修复了加载预设导致engine重置为None的问题 * 不再强制检查API,新增非流API请求 * 格式化文件 * 优化部分代码 --- adapter/chatgpt/api.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index b4c2b462..40aa2b2c 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -140,7 +140,7 @@ async def on_reset(self): self.bot.api_key = self.api_info.api_key self.bot.proxy = self.api_info.proxy self.bot.conversation[self.session_id] = [] - self.bot.engine = self.api_info.model + self.bot.engine = self.current_model self.__conversation_keep_from = 0 def construct_data(self, messages: list = None, api_key: str = None, stream: bool = True): @@ -172,7 +172,7 @@ def _prepare_request(self, session_id: str = None, messages: list = None, stream headers, data = self.construct_data(messages, api_key, stream) - return api_key, proxy, api_endpoint, headers, data + return proxy, api_endpoint, headers, data async def _process_response(self, resp, session_id: str = None): @@ -197,21 +197,23 @@ async def _process_response(self, resp, session_id: str = None): return content async def request(self, session_id: str = None, messages: list = None) -> str: - api_key, proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=False) + proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=False) async with aiohttp.ClientSession() as session: with async_timeout.timeout(self.bot.timeout): async with session.post(f'{api_endpoint}/chat/completions', headers=headers, - data=json.dumps(data)) as resp: + data=json.dumps(data), proxy=proxy) as resp: if resp.status != 200: response_text = await resp.text() raise Exception( f"{resp.status} {resp.reason} {response_text}", ) - return await self._process_response(resp, session_id) + content = await self._process_response(resp, session_id) + + return content async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]: - api_key, proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=True) + proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=True) async with aiohttp.ClientSession() as session: with async_timeout.timeout(self.bot.timeout): @@ -296,7 +298,9 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{completion_text}") logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}") else: - yield await self.request(session_id=self.session_id) + completion_text = await self.request(session_id=self.session_id) + yield completion_text + event_time = time.time() - start_time if event_time is not None: logger.debug(f"[ChatGPT-API:{self.bot.engine}] 接收到全部消息花费了{event_time:.2f}秒") From f4de8e3955960ee73734f495170fcff67d71d255 Mon Sep 17 00:00:00 2001 From: Jerry Chen <1261449269@qq.com> Date: Sun, 16 Jul 2023 21:58:47 +0800 Subject: [PATCH 52/68] =?UTF-8?q?edgeGPT=E6=9B=B4=E6=96=B0=20(#1051)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 更新edgeGPT的方式和依赖版本 * 更新适配新的EDGEGPT的流式传输 --- adapter/ms/bing.py | 8 +++++--- config.py | 2 +- conversation.py | 2 +- requirements.txt | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py index cafa5b53..5cd39d40 100644 --- a/adapter/ms/bing.py +++ b/adapter/ms/bing.py @@ -9,14 +9,14 @@ from constants import config from adapter.botservice import BotAdapter -from EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess +from EdgeGPT.EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess from contextlib import suppress from constants import botManager from drawing import DrawingAPI from exceptions import BotOperationNotSupportedException from loguru import logger -from ImageGen import ImageGenAsync +from EdgeGPT.ImageGen import ImageGenAsync from graia.ariadne.message.element import Image as GraiaImage image_pattern = r"!\[.*\]\((.*)\)" @@ -95,7 +95,9 @@ async def ask(self, prompt: str) -> Generator[str, None, None]: return else: # 生成中的消息 - parsed_content = re.sub(r"\[\^\d+\^\]", "", response) + parsed_content = re.sub(r"Searching the web for:(.*)\n", "", response) + parsed_content = re.sub(r"```json(.*)```", "", parsed_content,flags=re.DOTALL) + parsed_content = re.sub(r"Generating answers for you...", "", parsed_content) if config.bing.show_references: parsed_content = re.sub(r"\[(\d+)\]: ", r"\1: ", parsed_content) else: diff --git a/config.py b/config.py index ef77bbee..fcd461b7 100644 --- a/config.py +++ b/config.py @@ -203,7 +203,7 @@ class BingAuths(BaseModel): """Bing 的会话创建接入点""" accounts: List[BingCookiePath] = [] """Bing 的账号列表""" - max_messages: int = 20 + max_messages: int = 30 """Bing 的最大消息数,仅展示用""" diff --git a/conversation.py b/conversation.py index 515c5508..2f1bdebb 100644 --- a/conversation.py +++ b/conversation.py @@ -5,7 +5,7 @@ from typing import List, Dict, Optional import httpx -from EdgeGPT import ConversationStyle +from EdgeGPT.EdgeGPT import ConversationStyle from graia.amnesia.message import MessageChain from graia.ariadne.message.element import Image as GraiaImage, Element from loguru import logger diff --git a/requirements.txt b/requirements.txt index 190f1697..13922dba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ pygments~=2.15.1 imgkit~=1.2.3 qrcode~=7.4.2 openai~=0.27.8 -EdgeGPT==0.8.2 +EdgeGPT==0.12.1 aiohttp~=3.8.4 OpenAIAuth urllib3~=1.26.15 From 21f179e9019d321604ce9e7355ff03488479af35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 16 Jul 2023 22:00:03 +0800 Subject: [PATCH 53/68] chore(deps): update pydantic requirement from ~=1.10.9 to ~=2.0.2 (#1036) Updates the requirements on [pydantic](https://github.com/pydantic/pydantic) to permit the latest version. - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/main/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v1.10.9...v2.0.2) --- updated-dependencies: - dependency-name: pydantic dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 13922dba..8b125634 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ tinydb~=4.7.1 loguru~=0.7.0 asyncio~=3.4.3 -pydantic~=1.10.9 +pydantic~=2.0.2 markdown~=3.4.3 python-markdown-math~=0.8 From 0c614435ef69207bd65fc7b06569c45e30233e3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 16 Jul 2023 22:00:17 +0800 Subject: [PATCH 54/68] chore(deps): update urllib3 requirement from ~=1.26.15 to ~=2.0.3 (#948) Updates the requirements on [urllib3](https://github.com/urllib3/urllib3) to permit the latest version. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.15...2.0.3) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8b125634..5a417617 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ openai~=0.27.8 EdgeGPT==0.12.1 aiohttp~=3.8.4 OpenAIAuth -urllib3~=1.26.15 +urllib3~=2.0.3 BingImageCreator~=0.4.2 requests~=2.31.0 From 0fb82e13b728168306bdb465858d33d31cd31486 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Mon, 17 Jul 2023 23:50:48 +0800 Subject: [PATCH 55/68] Update api.py --- adapter/chatgpt/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 40aa2b2c..acb0187a 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -308,6 +308,7 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: except Exception as e: logger.error(f"[ChatGPT-API:{self.bot.engine}] 请求失败:\n{e}") yield f"发生错误: \n{e}" + raise async def preset_ask(self, role: str, text: str): self.bot.engine = self.current_model From 2be18376142081b1871e3ca9c689c21cb0e76e24 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Mon, 17 Jul 2023 23:53:05 +0800 Subject: [PATCH 56/68] Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5a417617..99f15657 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ tinydb~=4.7.1 loguru~=0.7.0 asyncio~=3.4.3 -pydantic~=2.0.2 +pydantic markdown~=3.4.3 python-markdown-math~=0.8 From 8ec1fa89598c6b5b2bd3fdb1a7fff3138c36d7ba Mon Sep 17 00:00:00 2001 From: "sourcery-ai[bot]" <58596630+sourcery-ai[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 00:16:32 +0800 Subject: [PATCH 57/68] =?UTF-8?q?=E4=BF=AE=E6=94=B9API=E5=93=8D=E5=BA=94?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=20(Sourcery=20refactored)=20(#1066)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update api.py * 'Refactored by Sourcery' --------- Co-authored-by: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Co-authored-by: Sourcery AI <> --- adapter/chatgpt/api.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index acb0187a..57325c81 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -202,15 +202,13 @@ async def request(self, session_id: str = None, messages: list = None) -> str: async with aiohttp.ClientSession() as session: with async_timeout.timeout(self.bot.timeout): async with session.post(f'{api_endpoint}/chat/completions', headers=headers, - data=json.dumps(data), proxy=proxy) as resp: + data=json.dumps(data), proxy=proxy) as resp: if resp.status != 200: response_text = await resp.text() raise Exception( f"{resp.status} {resp.reason} {response_text}", ) - content = await self._process_response(resp, session_id) - - return content + return await self._process_response(resp, session_id) async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]: proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=True) @@ -249,7 +247,7 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non event_text = delta['content'] completion_text += event_text self.latest_role = response_role - yield completion_text + yield event_text self.bot.add_to_conversation(completion_text, response_role, session_id) async def compressed_session(self, session_id: str): @@ -290,17 +288,18 @@ async def ask(self, prompt: str) -> AsyncGenerator[str, None]: self.bot.add_to_conversation(prompt, "user", session_id=self.session_id) start_time = time.time() + full_response = '' + if config.openai.gpt_params.stream: - async for completion_text in self.request_with_stream(session_id=self.session_id): - yield completion_text + async for resp in self.request_with_stream(session_id=self.session_id): + full_response += resp + yield full_response token_count = self.bot.count_tokens(self.session_id, self.bot.engine) - logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{completion_text}") + logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{full_response}") logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}") else: - completion_text = await self.request(session_id=self.session_id) - yield completion_text - + yield await self.request(session_id=self.session_id) event_time = time.time() - start_time if event_time is not None: logger.debug(f"[ChatGPT-API:{self.bot.engine}] 接收到全部消息花费了{event_time:.2f}秒") From 8b752dc37cd1dd89dd221c5ee1178558ed878897 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:05:15 +0800 Subject: [PATCH 58/68] chore(deps): bump revchatgpt from 6.3.3 to 6.8.1 (#1038) Bumps [revchatgpt](https://github.com/acheong08/ChatGPT) from 6.3.3 to 6.8.1. - [Release notes](https://github.com/acheong08/ChatGPT/releases) - [Commits](https://github.com/acheong08/ChatGPT/compare/6.3.3...6.8.1) --- updated-dependencies: - dependency-name: revchatgpt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 99f15657..1033e0f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT~=6.5.0 +revChatGPT~=6.8.1 toml~=0.10.2 Pillow>=9.3.0 tinydb~=4.7.1 From 3b993e66000561f4e8635b3be0cfaffeacc9f3f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:05:48 +0800 Subject: [PATCH 59/68] chore(deps): update poe-api requirement from ~=0.4.8 to ~=0.4.10 (#1061) Updates the requirements on [poe-api](https://github.com/ading2210/poe-api) to permit the latest version. - [Commits](https://github.com/ading2210/poe-api/commits) --- updated-dependencies: - dependency-name: poe-api dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1033e0f6..ae26c4d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.8 +poe-api~=0.4.10 regex~=2023.6.3 httpx~=0.24.1 From 7a7bc9fe238a005400dc2adc47c5fa69242cec44 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:20:53 +0800 Subject: [PATCH 60/68] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=BC=82=E5=B8=B8?= =?UTF-8?q?=E8=AF=B7=E6=B1=82=E6=8F=90=E7=A4=BA=20(#1077)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- adapter/chatgpt/api.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 57325c81..5e508612 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -217,6 +217,12 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non with async_timeout.timeout(self.bot.timeout): async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data), proxy=proxy) as resp: + if resp.status != 200: + response_text = await resp.text() + raise Exception( + f"{resp.status} {resp.reason} {response_text}", + ) + response_role: str = '' completion_text: str = '' From ef33e8c96b670b3dc8e584a0c1493e3ecc4d8efe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:21:08 +0800 Subject: [PATCH 61/68] chore(deps): update poe-api requirement from ~=0.4.10 to ~=0.4.11 (#1076) Updates the requirements on [poe-api](https://github.com/ading2210/poe-api) to permit the latest version. - [Commits](https://github.com/ading2210/poe-api/commits) --- updated-dependencies: - dependency-name: poe-api dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ae26c4d3..2040452b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.10 +poe-api~=0.4.11 regex~=2023.6.3 httpx~=0.24.1 From 413aca52b4f03ac69fa2ad7c688cd7ae6c2200cf Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:46:57 +0800 Subject: [PATCH 62/68] Update requirements.txt --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 2040452b..874b5e47 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ graia-ariadne==0.11.5 graiax-silkcoder -revChatGPT~=6.8.1 +revChatGPT~=6.8.6 toml~=0.10.2 Pillow>=9.3.0 tinydb~=4.7.1 @@ -17,7 +17,7 @@ qrcode~=7.4.2 openai~=0.27.8 EdgeGPT==0.12.1 aiohttp~=3.8.4 -OpenAIAuth +OpenAIAuth>=2.0.0 urllib3~=2.0.3 BingImageCreator~=0.4.2 From 22004d7bd8d30eb155f1c069688a3864cb9c52c5 Mon Sep 17 00:00:00 2001 From: Maolaohei Date: Sun, 23 Jul 2023 16:08:00 +0800 Subject: [PATCH 63/68] Update bot.py fix ModuleNotFoundError: No module named 'utils' --- bot.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bot.py b/bot.py index 8cbff67f..e047c4e7 100644 --- a/bot.py +++ b/bot.py @@ -1,6 +1,7 @@ import os import sys import creart +sys.path.append(os.getcwd()) from asyncio import AbstractEventLoop import asyncio from utils.exithooks import hook @@ -8,8 +9,6 @@ from constants import config, botManager from utils.edge_tts import load_edge_tts_voices -sys.path.append(os.getcwd()) - loop = creart.create(AbstractEventLoop) loop.run_until_complete(botManager.login()) From ab737f89d68f6aeb26f4ef014dfd0d1f6bbab4c9 Mon Sep 17 00:00:00 2001 From: Maolaohei Date: Sun, 23 Jul 2023 16:31:14 +0800 Subject: [PATCH 64/68] Update api.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 更新api模型清单以支持第三方网站提供的额外模型 --- adapter/chatgpt/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index 5e508612..ad3c87e5 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -109,6 +109,7 @@ def __init__(self, session_id: str = "unknown"): "gpt-4-32k-0314", "gpt-4-0613", "gpt-4-32k-0613", + "claude-2-web", ] def manage_conversation(self, session_id: str, prompt: str): From 0b22e39e62bc82aff974facc924889953decb46d Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Thu, 27 Jul 2023 08:58:56 +0800 Subject: [PATCH 65/68] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=AF=B9=E7=A7=81?= =?UTF-8?q?=E6=9C=89=E5=8C=96=E9=83=A8=E7=BD=B2=E7=9A=84=E6=94=AF=E6=8C=81?= =?UTF-8?q?=EF=BC=8C=E7=A7=BB=E9=99=A4=E4=B8=8D=E5=BF=85=E8=A6=81=E7=9A=84?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- adapter/chatgpt/api.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index ad3c87e5..dae02995 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -71,9 +71,10 @@ def count_tokens(self, session_id: str = "default", model: str = DEFAULT_ENGINE) for message in self.conversation[session_id]: num_tokens += tokens_per_message for key, value in message.items(): - num_tokens += len(encoding.encode(value)) - if key == "name": - num_tokens += tokens_per_name + if value is not None: + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name num_tokens += 3 # every reply is primed with assistant return num_tokens @@ -109,7 +110,6 @@ def __init__(self, session_id: str = "unknown"): "gpt-4-32k-0314", "gpt-4-0613", "gpt-4-32k-0613", - "claude-2-web", ] def manage_conversation(self, session_id: str, prompt: str): @@ -252,9 +252,10 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non response_role = delta['role'] if 'content' in delta: event_text = delta['content'] - completion_text += event_text - self.latest_role = response_role - yield event_text + if event_text is not None: + completion_text += event_text + self.latest_role = response_role + yield event_text self.bot.add_to_conversation(completion_text, response_role, session_id) async def compressed_session(self, session_id: str): From 176a76620bb4ad833acf995cfb8a83e8cc9c0ca9 Mon Sep 17 00:00:00 2001 From: Matt Gideon <117586514+Haibersut@users.noreply.github.com> Date: Fri, 28 Jul 2023 13:57:45 +0800 Subject: [PATCH 66/68] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=A7=81=E6=9C=89?= =?UTF-8?q?=E5=8C=96=E6=A8=A1=E5=9E=8B=E5=AF=BC=E8=87=B4=E7=9A=84=E4=B8=8A?= =?UTF-8?q?=E4=B8=8B=E6=96=87=E5=A4=B1=E6=95=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- adapter/chatgpt/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py index dae02995..8f18f625 100644 --- a/adapter/chatgpt/api.py +++ b/adapter/chatgpt/api.py @@ -249,7 +249,8 @@ async def request_with_stream(self, session_id: str = None, messages: list = Non if 'choices' in event and len(event['choices']) > 0 and 'delta' in event['choices'][0]: delta = event['choices'][0]['delta'] if 'role' in delta: - response_role = delta['role'] + if delta['role'] is not None: + response_role = delta['role'] if 'content' in delta: event_text = delta['content'] if event_text is not None: From 7240afbe2e09cb0d70018ce468a7b4e20511d8f9 Mon Sep 17 00:00:00 2001 From: Qin Ruizhe <23500397+qrzbing@users.noreply.github.com> Date: Mon, 31 Jul 2023 08:51:10 +0800 Subject: [PATCH 67/68] fix: old poe-api cause AttributeError (#1097) --- Dockerfile | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 19da2f0a..9008f25d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ ENV DEBIAN_FRONTEND=noninteractive COPY ./fonts/sarasa-mono-sc-regular.ttf /usr/share/fonts/ RUN apt-get update && \ - apt install --no-install-recommends xvfb binutils qtbase5-dev wkhtmltopdf ffmpeg -yq && \ + apt install --no-install-recommends xvfb binutils build-essential qtbase5-dev wkhtmltopdf ffmpeg -yq && \ (strip --remove-section=.note.ABI-tag /usr/lib/x86_64-linux-gnu/libQt5Core.so.5 || true) && \ apt-get remove --purge -yq binutils && \ apt-get clean && \ diff --git a/requirements.txt b/requirements.txt index 874b5e47..bd744107 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ tls-client python-dateutil~=2.8.2 discord.py azure-cognitiveservices-speech -poe-api~=0.4.11 +poe-api~=0.4.17 regex~=2023.6.3 httpx~=0.24.1 From 8288978893e68444ec23f2aeb9a2c0d646c09dde Mon Sep 17 00:00:00 2001 From: Qin Ruizhe <23500397+qrzbing@users.noreply.github.com> Date: Mon, 31 Jul 2023 11:14:51 +0800 Subject: [PATCH 68/68] fix: remove binutils will also remove gcc (#1100) --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9008f25d..3c2d4555 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,6 @@ COPY ./fonts/sarasa-mono-sc-regular.ttf /usr/share/fonts/ RUN apt-get update && \ apt install --no-install-recommends xvfb binutils build-essential qtbase5-dev wkhtmltopdf ffmpeg -yq && \ (strip --remove-section=.note.ABI-tag /usr/lib/x86_64-linux-gnu/libQt5Core.so.5 || true) && \ - apt-get remove --purge -yq binutils && \ apt-get clean && \ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \ rm -rf /var/lib/apt/lists/* @@ -18,6 +17,8 @@ WORKDIR /app COPY requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt && pip cache purge +RUN apt-get remove --purge -yq binutils + COPY . /app CMD ["/bin/bash", "/app/docker/start.sh"]