2024-01-07 18:20:09 +00:00
# fmt: off
2024-01-07 19:19:11 +00:00
from typing import AsyncGenerator
2024-01-07 18:20:09 +00:00
from copeai_backend import generate , models , conversation
import asyncio
async def main ( ) :
# Add a base prompt, if you wish to.
conversation . BASE_PROMPT = " You are CopeAI. You are kind, and useful. Answer to questions properly and make sure that it is really useful. "
# Create a conversation object, that will store the history of the messages.
conv = generate . Conversation (
add_base_prompt = True , # Add the base prompt to the conversation. By default, it is True.
# However, the base prompt is empty by default. You must set it yourself.
storage = { } # If you need to store some data.
)
2024-01-07 19:19:11 +00:00
# Generate a response. This is a streamed request, so it will return a GeneratingResponseChunk object.
# Then, at the end of the generation, ConversationResponse will be returned.
# This is a non-blocking call.
response = generate . process_text_streaming (
conversation = conv , # The conversation object.
model = models . GPT_3 , # The model to use. Add your own models to the MODELS dict in models.py.
2024-01-07 18:20:09 +00:00
new_message = " Hello, how are you? " , # The message to send.
2024-01-07 19:19:11 +00:00
# additional_args={} # Additional arguments to send to the API. These are different for each API.
) # type: ignore
async for chunk in response :
if isinstance ( chunk , conversation . GeneratingResponseChunk ) :
print ( chunk . text , end = " " )
else :
print ( ' \n Conversation ended! ' )
# To retrieve the response, you can use the ConversationResponse object, from the last iteration.
_response = chunk . text
2024-01-07 18:20:09 +00:00
# The assistant's message is automatically implemented into the conversation object.
# Add a new user message.
conv . add_message (
role = generate . Role . USER , # The role of the message. This is an enum, so you can use generate.Role.USER, generate.Role.ASSISTANT, or generate.Role.SYSTEM.
message = " I am fine, thanks! " # The message.
)
# Generate a response.
2024-01-07 19:19:11 +00:00
response : generate . ConversationResponse = generate . process_text_streaming (
2024-01-07 18:20:09 +00:00
conversation = conv ,
model = models . GPT_3 ,
new_message = " ... " ,
)
2024-01-07 19:19:11 +00:00
async for chunk in response :
if isinstance ( chunk , conversation . GeneratingResponseChunk ) :
print ( chunk . text , end = " " )
else :
print ( ' \n Conversation ended! ' )
2024-01-07 18:20:09 +00:00
asyncio . run ( main ( ) )