| | import gradio as gr |
| | from openai import OpenAI |
| |
|
| | api_key = "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" |
| | client = OpenAI(api_key=api_key, base_url="https://api.perplexity.ai") |
| |
|
| | def predict(message, history): |
| | history_openai_format = [] |
| | |
| | |
| | |
| | |
| | |
| | history_openai_format.append({"role": "user", "content": message}) |
| | |
| | response = client.chat.completions.create(model='sonar-small-chat', |
| | messages= history_openai_format, |
| | temperature=1.0, |
| | stream=True) |
| |
|
| | partial_message = "" |
| | for chunk in response: |
| | if chunk.choices[0].delta.content is not None: |
| | partial_message = partial_message + chunk.choices[0].delta.content |
| | yield partial_message |
| | |
| | gr.ChatInterface(predict).launch() |