txh17 commited on
Commit
6a7bb33
·
verified ·
1 Parent(s): 3e65375

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -39
app.py CHANGED
@@ -9,47 +9,43 @@ openai_client = OpenAI(api_key=OPENAI_API_KEY)
9
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
10
  deepseek_base_url = "https://api.deepseek.com" # assuming DeepSeek uses a REST API, you can adjust as needed
11
 
12
- def generate_response(model_provider, prompt, temperature, top_p, max_tokens, repetition_penalty):
13
  try:
14
- if model_provider == "OpenAI":
15
- response = openai_client.chat.completions.create(
16
- model="gpt-3.5-turbo", # or another model of your choice
17
- messages=[{"role": "user", "content": prompt}],
18
- temperature=temperature,
19
- top_p=top_p,
20
- max_tokens=max_tokens,
21
- presence_penalty=repetition_penalty,
22
- stream=False
23
- )
24
- return response.choices[0].message.content.strip()
25
-
26
- elif model_provider == "DeepSeek":
27
- headers = {
28
- "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
29
- "Content-Type": "application/json"
30
- }
31
- payload = {
32
- "model": "deepseek-chat", # 使用 DeepSeek 的 "deepseek-chat" 模型
33
- "prompt": prompt,
34
- "temperature": temperature,
35
- "top_p": top_p,
36
- "max_tokens": max_tokens,
37
- "repetition_penalty": repetition_penalty
38
- }
39
- # Assuming DeepSeek has a /generate endpoint, adjust URL and payload if needed
40
- response = requests.post(f"{deepseek_base_url}/generate", headers=headers, json=payload)
41
-
42
- if response.status_code == 200:
43
- return response.json().get("generated_text", "No response text found.")
44
- else:
45
- return f"DeepSeek API Error: {response.status_code}, {response.text}"
46
-
47
- else:
48
- return "Invalid model provider selected."
49
-
50
  except Exception as e:
51
- return f"Error: {str(e)}"
52
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  iface = gr.Interface(
54
  fn=generate_response,
55
  inputs=[
 
9
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
10
  deepseek_base_url = "https://api.deepseek.com" # assuming DeepSeek uses a REST API, you can adjust as needed
11
 
12
+ def generate_response(Model_provider, prompt, temperature, top_p, max_tokens, repetition_penalty):
13
  try:
14
+ response = deepseek_client.chat.completions.create(
15
+ model="deepseek-chat", #or "deepseek-reasoner" for R1 model
16
+ messages=[f"role":"user","content": prompt}],
17
+ temperature=temperature,
18
+ top_p=top_p,
19
+ max_tokens=max_tokens,
20
+ presence_penalty=repetition_penalty,
21
+ stream=False
22
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  except Exception as e:
24
+ return f"DeepSeek API Error: Istr(e)]"
25
+ elif model_provider == "OpenAI":
26
+ try:
27
+ response = openai_client.chat.completions.create(
28
+ model="gpt-3.5-turbo", # or another model of your choice
29
+ messages=[f"role": "user","content":prompt}],
30
+ temperature=temperature,
31
+ top_p=top_P,
32
+ max_tokens=max_tokens,
33
+ presence_penalty=repetition_penalty,
34
+ stream=False
35
+ )
36
+ return response.choices[o].message.content.strip()
37
+ except Exception as e:
38
+ return f"OpenAI API Error: [str(e)]"
39
+ else:
40
+ return "Invalid model provider selected."
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown("# LLM Chat Interface")
43
+ with gr.Row():
44
+ model_provider = gr.Dropdown(
45
+ choices=["DeepSeek", "OpenAI"],
46
+ value="DeepSeek",
47
+ label="Select Model Provider"
48
+ prompt = gr.Textbox(label="Enter your prompt", lines=4, placeholder="Type your message here..")
49
  iface = gr.Interface(
50
  fn=generate_response,
51
  inputs=[