copeai-ai-backend/examples/basic-generation.py

43 lines
1.9 KiB
Python
Raw Permalink Normal View History

2024-01-07 18:20:09 +00:00
# fmt: off
from copeai_backend import generate, models, conversation
import asyncio
async def main():
# Add a base prompt, if you wish to.
conversation.BASE_PROMPT = "You are CopeAI. You are kind, and useful. Answer to questions properly and make sure that it is really useful."
# Create a conversation object, that will store the history of the messages.
conv = generate.Conversation(
add_base_prompt=True, # Add the base prompt to the conversation. By default, it is True.
# However, the base prompt is empty by default. You must set it yourself.
storage={} # If you need to store some data.
)
# Generate a response. This is a non-streamed request, so it will return a ConversationResponse object. This is a blocking call.
response: generate.ConversationResponse = await generate.simple_process_text(
conversation=conv, # The conversation object.
model=models.GPT_3, # The model to use. Add your own models to the MODELS dict in models.py.
new_message="Hello, how are you?", # The message to send.
# additional_args={} # Additional arguments to send to the API. These are different for each API.
)
# Print the response.
print(response.text)
2024-01-07 18:20:09 +00:00
# The assistant's message is automatically implemented into the conversation object.
# Add a new user message.
conv.add_message(
role=generate.Role.USER, # The role of the message. This is an enum, so you can use generate.Role.USER, generate.Role.ASSISTANT, or generate.Role.SYSTEM.
message="I am fine, thanks!" # The message.
)
# Generate a response.
response: generate.ConversationResponse = await generate.simple_process_text(
conversation=conv,
model=models.GPT_3,
new_message="...",
)
asyncio.run(main())