akazmi commited on
Commit
3aca7d6
·
verified ·
1 Parent(s): 2032ccc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -1,11 +1,19 @@
1
  import gradio as gr
2
  import openai
 
3
 
4
  # Set your OpenAI API key here
5
  openai.api_key = "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" # Replace with your actual API key
6
 
7
- # Use the new `Completion.create` method for chat-like interactions
 
 
 
 
8
  def generate_response(message):
 
 
 
9
  try:
10
  response = openai.Completion.create(
11
  engine="text-davinci-003", # Adjust model as needed
@@ -15,7 +23,7 @@ def generate_response(message):
15
  stop=None, # No explicit stop sequences for chat-like interactions
16
  temperature=0.7,
17
  )
18
- # Return the content of the response
19
  return response.choices[0].text.strip()
20
  except Exception as e:
21
  return f"Error: {str(e)}"
 
1
  import gradio as gr
2
  import openai
3
+ import cachetools
4
 
5
  # Set your OpenAI API key here
6
  openai.api_key = "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" # Replace with your actual API key
7
 
8
+ # Cache settings
9
+ cache_size = 1000
10
+ ttl = 3600 # Cache expiration time in seconds (1 hour)
11
+ cache = cachetools.LRUCache(maxsize=cache_size, ttl=ttl)
12
+
13
  def generate_response(message):
14
+ if message in cache:
15
+ return cache[message]
16
+
17
  try:
18
  response = openai.Completion.create(
19
  engine="text-davinci-003", # Adjust model as needed
 
23
  stop=None, # No explicit stop sequences for chat-like interactions
24
  temperature=0.7,
25
  )
26
+ cache[message] = response.choices[0].text.strip()
27
  return response.choices[0].text.strip()
28
  except Exception as e:
29
  return f"Error: {str(e)}"