longdiyao commited on
Commit
b9ae76a
·
verified ·
1 Parent(s): 8d56688

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py CHANGED
@@ -4,3 +4,34 @@ from openai import OpenAI
4
 
5
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
  openai_client = OpenAI(api_key=OPENAI_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
  openai_client = OpenAI(api_key=OPENAI_API_KEY)
7
+
8
+ def generate_response(prompt, model_provider, temperature, top_p, max_tokens, repetition_penalty):#prommpt->modol->
9
+ try:
10
+ response = openai_client.chat.completions.create(
11
+ model="gpt-3.5-turbo", # or another model of your choice
12
+ messages=[{"role": "user", "content": prompt}],
13
+ temperature=temperature,
14
+ top_p=top_p,
15
+ max_tokens=max_tokens,
16
+ presence_penalty=repetition_penalty,
17
+ stream=False
18
+ )
19
+ return response.choices[0].message.content.strip()
20
+ except Exception as e:
21
+ return f"OpenAI API Error: {str(e)}"
22
+ #
23
+ iface = gr.Interface(
24
+ fn=generate_response,
25
+ inputs=[
26
+ gr.Textbox(label="Prompt", lines=6, placeholder="Ask something..."),
27
+ gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"),
28
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
29
+ gr.Slider(minimum=32, maximum=2048, value=512, step=32, label="Max New Tokens"),
30
+ gr.Slider(minimum=1.0, maximum=2.0, value=1.1, step=0.1, label="Repetition Penalty")
31
+ ],
32
+ outputs="text",
33
+ title="🧠 DeepSeek LLM Chat with Parameter Tuning",
34
+ theme=gr.themes.Soft()
35
+ )
36
+
37
+ iface.launch()