txh17 commited on
Commit
d705551
·
verified ·
1 Parent(s): c13d4c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -15
app.py CHANGED
@@ -1,33 +1,58 @@
1
  import gradio as gr
2
  import os
3
  from openai import OpenAI
 
4
 
5
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
  openai_client = OpenAI(api_key=OPENAI_API_KEY)
7
 
8
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
9
- deepseek_client = OpenAI(api_key=DEEPSEEK_API_KEY, base_url="https://api.deepseek.com")
10
 
11
-
12
- def generate_response(prompt,temperature, top_p, max_tokens, repetition_penalty):
13
  try:
14
- response = openai_client.chat.completions.create(
15
- model="gpt-3.5-turbo", # or another model of your choice
16
- messages=[{"role": "user", "content": prompt}],
17
- temperature=temperature,
18
- top_p=top_p,
19
- max_tokens=max_tokens,
20
- presence_penalty=repetition_penalty,
21
- stream=False
22
- )
23
- return response.choices[0].message.content.strip()
24
- except Exception as e:
25
- return f"OpenAI API Error: {str(e)}"
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  iface = gr.Interface(
29
  fn=generate_response,
30
  inputs=[
 
31
  gr.Textbox(label="Prompt", lines=6, placeholder="Ask something..."),
32
  gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"),
33
  gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
@@ -41,3 +66,4 @@ iface = gr.Interface(
41
 
42
  iface.launch()
43
 
 
 
1
  import gradio as gr
2
  import os
3
  from openai import OpenAI
4
+ import requests
5
 
6
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
7
  openai_client = OpenAI(api_key=OPENAI_API_KEY)
8
 
9
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
10
+ deepseek_base_url = "https://api.deepseek.com" # assuming DeepSeek uses a REST API, you can adjust as needed
11
 
12
+ def generate_response(model_provider, prompt, temperature, top_p, max_tokens, repetition_penalty):
 
13
  try:
14
+ if model_provider == "OpenAI":
15
+ response = openai_client.chat.completions.create(
16
+ model="gpt-3.5-turbo", # or another model of your choice
17
+ messages=[{"role": "user", "content": prompt}],
18
+ temperature=temperature,
19
+ top_p=top_p,
20
+ max_tokens=max_tokens,
21
+ presence_penalty=repetition_penalty,
22
+ stream=False
23
+ )
24
+ return response.choices[0].message.content.strip()
 
25
 
26
+ elif model_provider == "DeepSeek":
27
+ headers = {
28
+ "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
29
+ "Content-Type": "application/json"
30
+ }
31
+ payload = {
32
+ "prompt": prompt,
33
+ "temperature": temperature,
34
+ "top_p": top_p,
35
+ "max_tokens": max_tokens,
36
+ "repetition_penalty": repetition_penalty
37
+ }
38
+ # Assuming DeepSeek has a /generate endpoint, adjust URL and payload if needed
39
+ response = requests.post(f"{deepseek_base_url}/generate", headers=headers, json=payload)
40
+
41
+ if response.status_code == 200:
42
+ return response.json().get("generated_text", "No response text found.")
43
+ else:
44
+ return f"DeepSeek API Error: {response.status_code}, {response.text}"
45
+
46
+ else:
47
+ return "Invalid model provider selected."
48
+
49
+ except Exception as e:
50
+ return f"Error: {str(e)}"
51
 
52
  iface = gr.Interface(
53
  fn=generate_response,
54
  inputs=[
55
+ gr.Dropdown(choices=["DeepSeek", "OpenAI"], value="DeepSeek", label="Model Provider"),
56
  gr.Textbox(label="Prompt", lines=6, placeholder="Ask something..."),
57
  gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"),
58
  gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-p"),
 
66
 
67
  iface.launch()
68
 
69
+