Spaces:
Paused
Paused
- HF_inference.py +2 -2
- HF_pipeline.py +19 -0
- __pycache__/HF_inference.cpython-39.pyc +0 -0
- app5_selectbox/evaluation_analysis.py +3 -1
HF_inference.py
CHANGED
|
@@ -69,7 +69,7 @@ headers = {"Authorization": SECRET_TOKEN}
|
|
| 69 |
MAX_RETRIES = 3
|
| 70 |
RETRY_INTERVAL = 1 # in seconds
|
| 71 |
|
| 72 |
-
@st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
|
| 73 |
def query(payload, selected_model):
|
| 74 |
# st.write(selected_model)
|
| 75 |
|
|
@@ -81,7 +81,7 @@ def query(payload, selected_model):
|
|
| 81 |
if response.status_code == 200:
|
| 82 |
return response.json()
|
| 83 |
else:
|
| 84 |
-
st.info("
|
| 85 |
time.sleep(RETRY_INTERVAL)
|
| 86 |
|
| 87 |
return None
|
|
|
|
| 69 |
MAX_RETRIES = 3
|
| 70 |
RETRY_INTERVAL = 1 # in seconds
|
| 71 |
|
| 72 |
+
# @st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
|
| 73 |
def query(payload, selected_model):
|
| 74 |
# st.write(selected_model)
|
| 75 |
|
|
|
|
| 81 |
if response.status_code == 200:
|
| 82 |
return response.json()
|
| 83 |
else:
|
| 84 |
+
st.info("loading..")
|
| 85 |
time.sleep(RETRY_INTERVAL)
|
| 86 |
|
| 87 |
return None
|
HF_pipeline.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
MODEL_URLS = {
|
| 5 |
+
"DISTILIBERT MODEL": "MENG21/stud-fac-eval-distilbert-base-uncased",
|
| 6 |
+
"BERT-LARGE MODEL": "MENG21/stud-fac-eval-bert-large-uncased",
|
| 7 |
+
"BERT-BASE MODEL": "MENG21/stud-fac-eval-bert-base-uncased"
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
@st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
|
| 11 |
+
def analyze_sintement(text, selected_model):
|
| 12 |
+
# st.write(selected_model)
|
| 13 |
+
|
| 14 |
+
# API_URL = MODEL_URLS.get(selected_model, MODEL_URLS[selected_model]) # Get API URL based on selected model
|
| 15 |
+
# Create a text classification pipeline
|
| 16 |
+
classifier = pipeline("text-classification", model=MODEL_URLS[selected_model])
|
| 17 |
+
|
| 18 |
+
result = classifier(text)
|
| 19 |
+
return result
|
__pycache__/HF_inference.cpython-39.pyc
CHANGED
|
Binary files a/__pycache__/HF_inference.cpython-39.pyc and b/__pycache__/HF_inference.cpython-39.pyc differ
|
|
|
app5_selectbox/evaluation_analysis.py
CHANGED
|
@@ -19,7 +19,9 @@ from app5_selectbox.g4f_prompt import g4f_prompt
|
|
| 19 |
# from app5_selectbox.llama2_prompt import llama_prompt
|
| 20 |
from app5_selectbox.naive_bayes_cl import nb_clf
|
| 21 |
|
| 22 |
-
from HF_inference import analyze_sintement
|
|
|
|
|
|
|
| 23 |
|
| 24 |
models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL']
|
| 25 |
|
|
|
|
| 19 |
# from app5_selectbox.llama2_prompt import llama_prompt
|
| 20 |
from app5_selectbox.naive_bayes_cl import nb_clf
|
| 21 |
|
| 22 |
+
# from HF_inference import analyze_sintement
|
| 23 |
+
|
| 24 |
+
from HF_pipeline import analyze_sintement
|
| 25 |
|
| 26 |
models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL']
|
| 27 |
|