Niklauseik commited on
Commit
f2f0fac
·
1 Parent(s): 3c47333
Files changed (1) hide show
  1. app.py +7 -11
app.py CHANGED
@@ -2,7 +2,6 @@ import gradio as gr
2
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
3
  import pandas as pd
4
  from sklearn.metrics import accuracy_score, precision_recall_fscore_support
5
- import torch
6
 
7
  # Define the available models and tasks
8
  TASKS = ["sentiment-analysis", "ner", "text-classification"]
@@ -14,23 +13,20 @@ MODELS = {
14
  # Add other models here
15
  }
16
 
17
- def load_pipeline(task, model_name):
18
- model = MODELS.get(model_name)
19
- if not model:
20
- raise ValueError(f"Model {model_name} is not available.")
21
  return pipeline(task, model=model)
22
 
23
- def predict(task, model_name, text):
24
- selected_pipeline = load_pipeline(task, model_name)
25
  results = selected_pipeline(text)
26
  return results
27
 
28
- def benchmark(task, model_name, file):
29
  data = pd.read_csv(file.name)
30
  texts = data['text'].tolist()
31
  true_labels = data['label'].tolist()
32
 
33
- selected_pipeline = load_pipeline(task, model_name)
34
  predictions = [selected_pipeline(text)[0]['label'] for text in texts]
35
 
36
  accuracy = accuracy_score(true_labels, predictions)
@@ -62,5 +58,5 @@ with gr.Blocks() as demo:
62
  benchmark_button = gr.Button("Benchmark")
63
  benchmark_output = gr.JSON(label="Benchmark Output")
64
  benchmark_button.click(benchmark, inputs=[task_input, model_input, file_input], outputs=benchmark_output)
65
-
66
- demo.launch()
 
2
  from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
3
  import pandas as pd
4
  from sklearn.metrics import accuracy_score, precision_recall_fscore_support
 
5
 
6
  # Define the available models and tasks
7
  TASKS = ["sentiment-analysis", "ner", "text-classification"]
 
13
  # Add other models here
14
  }
15
 
16
+ def load_pipeline(task, model):
 
 
 
17
  return pipeline(task, model=model)
18
 
19
+ def predict(task, model, text):
20
+ selected_pipeline = load_pipeline(task, model)
21
  results = selected_pipeline(text)
22
  return results
23
 
24
+ def benchmark(task, model, file):
25
  data = pd.read_csv(file.name)
26
  texts = data['text'].tolist()
27
  true_labels = data['label'].tolist()
28
 
29
+ selected_pipeline = load_pipeline(task, model)
30
  predictions = [selected_pipeline(text)[0]['label'] for text in texts]
31
 
32
  accuracy = accuracy_score(true_labels, predictions)
 
58
  benchmark_button = gr.Button("Benchmark")
59
  benchmark_output = gr.JSON(label="Benchmark Output")
60
  benchmark_button.click(benchmark, inputs=[task_input, model_input, file_input], outputs=benchmark_output)
61
+
62
+ demo.launch()