Spaces:
Sleeping
Sleeping
File size: 1,737 Bytes
ef9aa44 47fa0b9 57c48ac ef9aa44 47fa0b9 2032ccc ef9aa44 3aca7d6 79ba995 4a5f64b 3aca7d6 e5ed350 3aca7d6 79ba995 3aca7d6 47fa0b9 3aca7d6 79ba995 47fa0b9 ef9aa44 47fa0b9 ef9aa44 e5ed350 b236d39 e5ed350 47fa0b9 ef9aa44 47fa0b9 79ba995 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import openai
from cachetools import cached, LRUCache, TTLCache
# Set your OpenAI API key here
openai.api_key = "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" # Replace with your actual API key
cache_size = 1000
expire = 3600 # Cache expiration time in seconds (1 hour)
cache = LRUCache(maxsize=cache_size)
def generate_response(message):
if message in cache:
if cache.get(message).__dict__["_expire"] > time.time(): # Check expiration
return cache[message]
else:
del cache[message] # Remove expired entry
try:
response = openai.Completion.create(
engine="text-davinci-003", # Adjust model as needed
prompt=f"Question: {message}",
max_tokens=150,
n=1,
stop=None, # No explicit stop sequences for chat-like interactions
temperature=0.7,
)
cache[message] = response.choices[0].text.strip()
cache.expire(message, expire) # Set expiration time
return response.choices[0].text.strip()
except Exception as e:
return f"Error: {str(e)}"
# Create the Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(placeholder="Type your message here...", label="Message"),
outputs="text",
title="Real-Time Chatbot with GPT-3.5",
description="Ask any question or start a conversation. Powered by OpenAI GPT-3.5-turbo.",
theme="dark", # Optional: Use a dark theme for a more modern look
)
# Launch the interface
iface.launch()
import time # Import time module for expiration check |