From 11e76ff8376d4220642ed1dc4613b79db96c194d Mon Sep 17 00:00:00 2001 From: Showdown76py Date: Tue, 16 Jan 2024 21:37:43 +0100 Subject: [PATCH] feat: v2 --- .gitignore | 2 ++ copeai_backend/conversation.py | 4 +++- main.py | 35 ++++++++++++++++++++++++++-------- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 1b77d31..c5862b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +tokens.db + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/copeai_backend/conversation.py b/copeai_backend/conversation.py index c058bef..608b5d0 100644 --- a/copeai_backend/conversation.py +++ b/copeai_backend/conversation.py @@ -76,6 +76,8 @@ def text_to_tokens(string_or_messages: str | list[str | dict | list] | Conversat messages = [] if isinstance(string_or_messages, str): messages = [{"role": "user", "content": string_or_messages}] + elif isinstance(string_or_messages, Conversation): + messages = string_or_messages.messages else: messages = string_or_messages @@ -99,4 +101,4 @@ def text_to_tokens(string_or_messages: str | list[str | dict | list] | Conversat num_tokens += text_to_tokens(message["content"]) num_tokens += 2 # every reply is primed with assistant - return num_tokens \ No newline at end of file + return num_tokens diff --git a/main.py b/main.py index 484d4cd..2d11352 100644 --- a/main.py +++ b/main.py @@ -116,8 +116,8 @@ async def on_message(message: discord.Message): total_tokens = copeai_backend.conversation.text_to_tokens(cached_conversations[message.author]) cached_conversations[message.author].add_message( - role=copeai_backend.conversation.Role.user, - content=message.content + role=copeai_backend.conversation.Role.USER, + message=message.content ) await message.channel.typing() @@ -132,12 +132,31 @@ async def on_message(message: discord.Message): ) typing.remove(message.channel) - response = req['choices'][0]['message']['content'] - prompt_used_tokens = req['usage']['prompt_tokens'] - completion_used_tokens = req['usage']['completion_tokens'] - r=await message.reply(response, allowed_mentions=discord.AllowedMentions.none()) - c.execute('INSERT INTO message_history VALUES (?, ?, ?, ?, ?, ?)', (message.id, message.author.id, message.content, prompt_used_tokens, 'user', int(message.created_at.timestamp()))) - c.execute('INSERT INTO message_history VALUES (?, ?, ?, ?, ?, ?)', (r.id, message.author.id, response, completion_used_tokens, 'assistant', int(time.time()))) + last_generation = 0 + MSG = await message.reply('** **', view=views.GenerationState.GenerationStateView(views.GenerationState.GenerationState.GENERATING)) + all_generated = [] + async for response in req: + print(response.text) + if isinstance(response, copeai_backend.ConversationResponse): + response = ''.join(response.text) + else: + all_generated.append(response.text) + + if last_generation < time.time(): + compiled = ''.join(all_generated) + last_generation = time.time() + 1.5 + if len(compiled) > 2000: + await MSG.edit(content=None, embed=discord.Embed(description=compiled, color=0xfce75d)) + else: + await MSG.edit(content=compiled) + + if len(response) > 2000: + await MSG.edit(content=None, embed=discord.Embed(description=response, color=0xfce75d), view=views.GenerationState.GenerationStateView(views.GenerationState.GenerationState.FINISHED)) + else: + await MSG.edit(content=response, view=views.GenerationState.GenerationStateView(views.GenerationState.GenerationState.FINISHED)) + + c.execute('INSERT INTO message_history VALUES (?, ?, ?, ?, ?, ?)', (message.id, message.author.id, message.content, copeai_backend.conversation.text_to_tokens(message.content), 'user', int(message.created_at.timestamp()))) + c.execute('INSERT INTO message_history VALUES (?, ?, ?, ?, ?, ?)', (MSG.id, message.author.id, response, copeai_backend.conversation.text_to_tokens(response), 'assistant', int(time.time()))) db.commit() except Exception as e: traceback.print_exc()