Spaces:
Paused
Paused
File size: 1,291 Bytes
f4e867d eb270d3 df07e03 f4e867d 0e18334 eb270d3 719e38c eb270d3 df07e03 eb270d3 df07e03 f4e867d df07e03 eb270d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
import openai
from fastapi import FastAPI, Request
import uvicorn
import json
openai.api_key = ""
app = FastAPI()
conversation_history = []
@app.post("/chat")
async def chat(request: Request):
global conversation_history
message = await request.json()
# Convert the user's message to JSON
message_json = json.dumps(message)
conversation_history.append({"role": "user", "content": message_json})
# Prepare the messages for the GPT-3 API call
messages = conversation_history.copy()
# Add a system message to instruct the model
messages.insert(0, {"role": "system", "content": "You are a story writer. What ever the prompt you always write a story"})
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
reply = completion.choices[0].message.content
# Append the model's reply to the conversation history
conversation_history.append({"role": "assistant", "content": reply})
return reply
def gradio_chat(message):
reply = chat(message)
return reply
with gr.ChatInterface(fn=gradio_chat) as chatbot:
chatbot.launch(server_name="0.0.0.0", server_port=7860)
# Run the FastAPI app
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860) |