Niklauseik commited on
Commit
5bdc091
·
1 Parent(s): 0123138

add openai

Browse files
Files changed (2) hide show
  1. app.py +14 -16
  2. requirements.txt +1 -0
app.py CHANGED
@@ -15,7 +15,7 @@ MODELS = {
15
  "LLaMA2_70B": "meta-llama/Llama-2-70b-hf",
16
  "ChatGLM3_6B": "THUDM/chatglm-6b",
17
  "InternLM_7B": "internlm/internlm-7b",
18
- "Falcon_7B": "tiiuae/falcon-7b",
19
  # Add other Hugging Face models here
20
  }
21
 
@@ -27,35 +27,33 @@ def load_pipeline(task, model):
27
  model_name = MODELS[model]
28
  return pipeline(task, model=model_name)
29
 
30
- # Function to predict using Hugging Face models
31
  def predict(task, model, text):
32
- if model.startswith("ChatGPT"):
33
- response = openai.Completion.create(
34
- engine="text-davinci-003",
35
- prompt=text,
36
- max_tokens=50
37
  )
38
- results = [{"label": response.choices[0].text.strip()}]
39
  else:
40
  selected_pipeline = load_pipeline(task, model)
41
  results = selected_pipeline(text)
42
  return results
43
 
44
- # Function to benchmark Hugging Face models
45
  def benchmark(task, model, file):
46
  data = pd.read_csv(file.name)
47
  texts = data['query'].tolist()
48
  true_labels = data['answer'].tolist()
49
 
50
- if model.startswith("ChatGPT"):
51
  predictions = []
52
  for text in texts:
53
- response = openai.Completion.create(
54
- engine="text-davinci-003",
55
- prompt=text,
56
- max_tokens=50
57
  )
58
- predictions.append(response.choices[0].text.strip())
59
  else:
60
  selected_pipeline = load_pipeline(task, model)
61
  predictions = [selected_pipeline(text)[0]['label'] for text in texts]
@@ -74,7 +72,7 @@ def benchmark(task, model, file):
74
  with gr.Blocks() as demo:
75
  with gr.Row():
76
  task_input = gr.Dropdown(TASKS, label="Task")
77
- model_input = gr.Dropdown(list(MODELS.keys()) + ["ChatGPT"], label="Model")
78
 
79
  with gr.Tab("Predict"):
80
  with gr.Row():
 
15
  "LLaMA2_70B": "meta-llama/Llama-2-70b-hf",
16
  "ChatGLM3_6B": "THUDM/chatglm-6b",
17
  "InternLM_7B": "internlm/internlm-7b",
18
+ "Falcon_7B": "tiiuae/falcon-7b"
19
  # Add other Hugging Face models here
20
  }
21
 
 
27
  model_name = MODELS[model]
28
  return pipeline(task, model=model_name)
29
 
30
+ # Function to predict using Hugging Face models and OpenAI models
31
  def predict(task, model, text):
32
+ if model in ["ChatGPT", "GPT-4"]:
33
+ response = openai.ChatCompletion.create(
34
+ model="gpt-4" if model == "GPT-4" else "gpt-3.5-turbo",
35
+ messages=[{"role": "user", "content": text}]
 
36
  )
37
+ results = [{"label": response.choices[0].message['content'].strip()}]
38
  else:
39
  selected_pipeline = load_pipeline(task, model)
40
  results = selected_pipeline(text)
41
  return results
42
 
43
+ # Function to benchmark Hugging Face models and OpenAI models
44
  def benchmark(task, model, file):
45
  data = pd.read_csv(file.name)
46
  texts = data['query'].tolist()
47
  true_labels = data['answer'].tolist()
48
 
49
+ if model in ["ChatGPT", "GPT-4"]:
50
  predictions = []
51
  for text in texts:
52
+ response = openai.ChatCompletion.create(
53
+ model="gpt-4" if model == "GPT-4" else "gpt-3.5-turbo",
54
+ messages=[{"role": "user", "content": text}]
 
55
  )
56
+ predictions.append(response.choices[0].message['content'].strip())
57
  else:
58
  selected_pipeline = load_pipeline(task, model)
59
  predictions = [selected_pipeline(text)[0]['label'] for text in texts]
 
72
  with gr.Blocks() as demo:
73
  with gr.Row():
74
  task_input = gr.Dropdown(TASKS, label="Task")
75
+ model_input = gr.Dropdown(list(MODELS.keys()) + ["ChatGPT", "GPT-4"], label="Model")
76
 
77
  with gr.Tab("Predict"):
78
  with gr.Row():
requirements.txt CHANGED
@@ -4,3 +4,4 @@ pandas
4
  scikit-learn
5
  gradio
6
  torch
 
 
4
  scikit-learn
5
  gradio
6
  torch
7
+ openai