Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import openai | |
| from cachetools import cached, LRUCache, TTLCache | |
| # Set your OpenAI API key here | |
| openai.api_key = "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" # Replace with your actual API key | |
| cache_size = 1000 | |
| expire = 3600 # Cache expiration time in seconds (1 hour) | |
| cache = LRUCache(maxsize=cache_size) | |
| def generate_response(message): | |
| if message in cache: | |
| if cache.get(message).__dict__["_expire"] > time.time(): # Check expiration | |
| return cache[message] | |
| else: | |
| del cache[message] # Remove expired entry | |
| try: | |
| response = openai.Completion.create( | |
| engine="text-davinci-003", # Adjust model as needed | |
| prompt=f"Question: {message}", | |
| max_tokens=150, | |
| n=1, | |
| stop=None, # No explicit stop sequences for chat-like interactions | |
| temperature=0.7, | |
| ) | |
| cache[message] = response.choices[0].text.strip() | |
| cache.expire(message, expire) # Set expiration time | |
| return response.choices[0].text.strip() | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(placeholder="Type your message here...", label="Message"), | |
| outputs="text", | |
| title="Real-Time Chatbot with GPT-3.5", | |
| description="Ask any question or start a conversation. Powered by OpenAI GPT-3.5-turbo.", | |
| theme="dark", # Optional: Use a dark theme for a more modern look | |
| ) | |
| # Launch the interface | |
| iface.launch() | |
| import time # Import time module for expiration check |