MENG21 commited on
Commit
ff47c9e
·
1 Parent(s): 9fdf227
Files changed (1) hide show
  1. app5_selectbox/evaluation_analysis.py +43 -42
app5_selectbox/evaluation_analysis.py CHANGED
@@ -1,18 +1,19 @@
1
- import gspread
2
  import pandas as pd
3
- from oauth2client.service_account import ServiceAccountCredentials
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
  import torch
6
  import re
7
  import streamlit as st
8
  import hydralit_components as hc
9
  from matplotlib import pyplot as plt
10
- import numpy as np
11
  from wordcloud import WordCloud
12
  import plotly.graph_objs as go
13
- import plotly.express as px
14
- import plotly.figure_factory as ff
15
  # from PIL import ImageFont
 
16
  # from app5_selectbox.langchain_llama_gpu import llm_chain
17
  from app5_selectbox.g4f_prompt import g4f_prompt
18
  # from app5_selectbox.llama2_prompt import llama_prompt
@@ -61,55 +62,55 @@ def clean_text(text_list):
61
  return cleaned_samples
62
 
63
 
64
- # local model
65
- def classify_sentiments(text_samples, tokenizer, model):
66
- instructor_comments = []
67
- predicted_sentiments = []
68
- predicted_sentiments_scores = []
69
 
70
- # Iterate through the text samples and classify the sentiment
71
- for idx, text_sample in enumerate(text_samples):
72
- # Tokenize the text sample
73
- inputs = tokenizer(text_sample, return_tensors="pt")
74
 
75
- # Perform sentiment classification
76
- outputs = model(**inputs)
77
 
78
- # Get the predicted sentiment (positive/negative)
79
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
80
 
81
- # Get the probabilities for each class
82
- probabilities = torch.softmax(outputs.logits, dim=1).tolist()[0]
83
 
84
- # Store results
85
- instructor_comments.append(text_sample)
86
- predicted_sentiments.append("positive" if predicted_class == 1 else "negative")
87
- predicted_sentiments_scores.append({"positive": probabilities[1]*100, "negative": probabilities[0]*100})
88
- return instructor_comments, predicted_sentiments, predicted_sentiments_scores
89
 
90
 
91
- # # inference
92
- # def classify_sentiments(text_samples, model):
93
- # instructor_comments = []
94
- # predicted_sentiments = []
95
- # predicted_sentiments_scores = []
96
 
97
- # # text = ["i love this", "nice one!", "happy!"]
98
- # selected_model = model
99
- # results = [analyze_sintement(t, selected_model) for t in text_samples]
100
 
101
 
102
 
103
- # for idx, result in enumerate(results):
104
- # # st.text(result[0])
105
- # # predicted_class, probabilities = analyze_sintement(text_sample, model)
106
- # # Store results
107
- # instructor_comments.append(text_samples[idx])
108
- # predicted_sentiments.append("positive" if result[0] == "LABEL_1" else "negative")
109
- # predicted_sentiments_scores.append({"positive": result[1]*100, "negative": 100-(result[1]*100)})
110
 
111
- # # st.write(instructor_comments)
112
- # return instructor_comments, predicted_sentiments, predicted_sentiments_scores
113
 
114
 
115
 
 
1
+ # import gspread
2
  import pandas as pd
3
+ # from oauth2client.service_account import ServiceAccountCredentials
4
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
  import torch
6
  import re
7
  import streamlit as st
8
  import hydralit_components as hc
9
  from matplotlib import pyplot as plt
10
+ # import numpy as np
11
  from wordcloud import WordCloud
12
  import plotly.graph_objs as go
13
+ # import plotly.express as px
14
+ # import plotly.figure_factory as ff
15
  # from PIL import ImageFont
16
+
17
  # from app5_selectbox.langchain_llama_gpu import llm_chain
18
  from app5_selectbox.g4f_prompt import g4f_prompt
19
  # from app5_selectbox.llama2_prompt import llama_prompt
 
62
  return cleaned_samples
63
 
64
 
65
+ # # local model
66
+ # def classify_sentiments(text_samples, tokenizer, model):
67
+ # instructor_comments = []
68
+ # predicted_sentiments = []
69
+ # predicted_sentiments_scores = []
70
 
71
+ # # Iterate through the text samples and classify the sentiment
72
+ # for idx, text_sample in enumerate(text_samples):
73
+ # # Tokenize the text sample
74
+ # inputs = tokenizer(text_sample, return_tensors="pt")
75
 
76
+ # # Perform sentiment classification
77
+ # outputs = model(**inputs)
78
 
79
+ # # Get the predicted sentiment (positive/negative)
80
+ # predicted_class = torch.argmax(outputs.logits, dim=1).item()
81
 
82
+ # # Get the probabilities for each class
83
+ # probabilities = torch.softmax(outputs.logits, dim=1).tolist()[0]
84
 
85
+ # # Store results
86
+ # instructor_comments.append(text_sample)
87
+ # predicted_sentiments.append("positive" if predicted_class == 1 else "negative")
88
+ # predicted_sentiments_scores.append({"positive": probabilities[1]*100, "negative": probabilities[0]*100})
89
+ # return instructor_comments, predicted_sentiments, predicted_sentiments_scores
90
 
91
 
92
+ # inference
93
+ def classify_sentiments(text_samples, model):
94
+ instructor_comments = []
95
+ predicted_sentiments = []
96
+ predicted_sentiments_scores = []
97
 
98
+ # text = ["i love this", "nice one!", "happy!"]
99
+ selected_model = model
100
+ results = [analyze_sintement(t, selected_model) for t in text_samples]
101
 
102
 
103
 
104
+ for idx, result in enumerate(results):
105
+ # st.text(result[0])
106
+ # predicted_class, probabilities = analyze_sintement(text_sample, model)
107
+ # Store results
108
+ instructor_comments.append(text_samples[idx])
109
+ predicted_sentiments.append("positive" if result[0] == "LABEL_1" else "negative")
110
+ predicted_sentiments_scores.append({"positive": result[1]*100, "negative": 100-(result[1]*100)})
111
 
112
+ # st.write(instructor_comments)
113
+ return instructor_comments, predicted_sentiments, predicted_sentiments_scores
114
 
115
 
116