|
|
def generate_response( |
|
|
user_input: str, |
|
|
history: List[Tuple[str, str]], |
|
|
max_tokens: int, |
|
|
temperature: float, |
|
|
top_p: float |
|
|
) -> str: |
|
|
""" |
|
|
Generates a response from the AI model. |
|
|
Args: |
|
|
user_input: The user's input message. |
|
|
history: A list of tuples containing the conversation history |
|
|
(user input, AI response). |
|
|
max_tokens: The maximum number of tokens in the generated response. |
|
|
temperature: Controls the randomness of the generated response. |
|
|
top_p: Controls the nucleus sampling probability. |
|
|
Returns: |
|
|
str: The generated response from the AI model. |
|
|
""" |
|
|
try: |
|
|
|
|
|
messages = [{"role": "system", "content": SYSTEM_MESSAGE}] |
|
|
messages.extend([{"role": "user" if i % 2 == 0 else "assistant", "content": val} |
|
|
for i, val in enumerate(sum(history, ()))]) |
|
|
messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
response = "" |
|
|
for msg in client.chat_completion( |
|
|
messages, |
|
|
max_tokens=max_tokens, |
|
|
stream=True, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
): |
|
|
|
|
|
if msg and 'choices' in msg and msg['choices']: |
|
|
|
|
|
token = msg['choices'][0].get('delta', {}).get('content', '') |
|
|
if token: |
|
|
response += token |
|
|
else: |
|
|
|
|
|
print("Warning: Unexpected response format or empty 'choices'.") |
|
|
break |
|
|
return response or "Sorry, I couldn't generate a response. Please try again." |
|
|
|
|
|
except Exception as e: |
|
|
print(f"An error occurred: {e}") |
|
|
return "Error: An unexpected error occurred while processing your request." |
|
|
|