txh17 commited on
Commit
1b8a3d5
·
verified ·
1 Parent(s): 38be0b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -1
app.py CHANGED
@@ -3,4 +3,38 @@ import os
3
  from openai import OpenAI
4
 
5
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
- openai_client = OpenAI(api_key=OPENAI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from openai import OpenAI
4
 
5
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
+ openai_client = OpenAI(api_key=OPENAI_API_KEY)
7
+
8
+
9
+ def generate_response(prompt,temperature, top_p, max_tokens, repetition_penalty):
10
+ try:
11
+ response = openai_client.chat.completions.create(
12
+ model="gpt-3.5-turbo", # or another model of your choice
13
+ messages=[{"role": "user", "content": prompt}],
14
+ temperature=temperature,
15
+ top_p=top_p,
16
+ max_tokens=max_tokens,
17
+ presence_penalty=repetition_penalty,
18
+ stream=False
19
+ )
20
+ return response.choices[0].message.content.strip()
21
+ except Exception as e:
22
+ return f"OpenAI API Error: {str(e)}"
23
+
24
+
25
+ iface = gr.Interface(
26
+ fn=generate_response,
27
+ inputs=[
28
+ gr.Textbox(label="Prompt", lines=6, placeholder="Ask something..."),
29
+ gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"),
30
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
31
+ gr.Slider(minimum=32, maximum=2048, value=512, step=32, label="Max New Tokens"),
32
+ gr.Slider(minimum=1.0, maximum=2.0, value=1.1, step=0.1, label="Repetition Penalty")
33
+ ],
34
+ outputs="text",
35
+ title="🧠 DeepSeek LLM Chat with Parameter Tuning",
36
+ theme=gr.themes.Soft()
37
+ )
38
+
39
+ iface.launch()
40
+