Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from llm_classification import get_answer | |
| from inference_demo import ( | |
| predict_randomforest_2f, predict_xgboost_2f, predict_lightgbm_2f, | |
| predict_svm_2f, predict_decisiontree_2f, predict_naivebayes_2f, | |
| predict_logisticregression_2f, | |
| predict_randomforest_6f, predict_xgboost_6f, predict_lightgbm_6f, | |
| predict_svm_6f, predict_decisiontree_6f, predict_naivebayes_6f, | |
| predict_logisticregression_6f, | |
| ) | |
| PREDICT_FUNCS = { | |
| ("Random Forest", "2-feature"): predict_randomforest_2f, | |
| ("XGBoost", "2-feature"): predict_xgboost_2f, | |
| ("LightGBM", "2-feature"): predict_lightgbm_2f, | |
| ("SVM", "2-feature"): predict_svm_2f, | |
| ("Decision Tree", "2-feature"): predict_decisiontree_2f, | |
| ("Naive Bayes", "2-feature"): predict_naivebayes_2f, | |
| ("Logistic Regression", "2-feature"): predict_logisticregression_2f, | |
| ("Random Forest", "6-feature"): predict_randomforest_6f, | |
| ("XGBoost", "6-feature"): predict_xgboost_6f, | |
| ("LightGBM", "6-feature"): predict_lightgbm_6f, | |
| ("SVM", "6-feature"): predict_svm_6f, | |
| ("Decision Tree", "6-feature"): predict_decisiontree_6f, | |
| ("Naive Bayes", "6-feature"): predict_naivebayes_6f, | |
| ("Logistic Regression", "6-feature"): predict_logisticregression_6f, | |
| } | |
| CLASSIFIERS = [ | |
| "๐ฎ Gemini", | |
| "๐ณ Random Forest", | |
| "โก XGBoost", | |
| "๐ก LightGBM", | |
| "๐ SVM", | |
| "๐ฒ Decision Tree", | |
| "๐ Naive Bayes", | |
| "๐งฎ Logistic Regression", | |
| "๐ค Ensemble" | |
| ] | |
| FEATURE_VERSIONS = ["2-feature", "6-feature"] | |
| FEATURE_EXPLANATIONS = { | |
| "2-feature": ( | |
| "### Supported Language\n" | |
| "Only **English** sentences are supported.\n\n" | |
| "### 2-feature version\n" | |
| "This version uses only 2 frequency-based features:\n" | |
| " * x1 = Total frequency of words in the Positive class\n" | |
| " * x2 = Total frequency of words in the Negative class" | |
| ), | |
| "6-feature": ( | |
| "### Supported Language\n" | |
| "Only **English** sentences are supported.\n\n" | |
| "### 6-feature version\n" | |
| "This version uses 6 features:\n" | |
| " * x1 = Total frequency of words in the Positive class\n" | |
| " * x2 = Total frequency of words in the Negative class\n" | |
| " * x3 = 1 if the word 'no' appears, else 0\n" | |
| " * x4 = Count of 1st and 2nd person pronouns\n" | |
| " * x5 = 1 if the tweet contains '!' else 0\n" | |
| " * x6 = log(word count)" | |
| ), | |
| } | |
| def explain_features(version: str) -> str: | |
| return FEATURE_EXPLANATIONS[version] | |
| def infer(clf: str, version: str, text: str): | |
| if not text.strip(): | |
| return {"โ ๏ธ Please enter a sentence": 1.0}, "" | |
| if clf == "๐ฎ Gemini": | |
| y = get_answer(text) | |
| return ({"Positive ๐": 1.0} if y == 1 else {"Negative ๐": 1.0}), "" | |
| if clf == "๐ค Ensemble": | |
| model_names = ["Random Forest", "XGBoost", "LightGBM", "SVM", "Decision Tree", "Naive Bayes", "Logistic Regression"] | |
| votes_detail, votes = [], [] | |
| for m in model_names: | |
| func = PREDICT_FUNCS.get((m, version)) | |
| if func: | |
| y = func(text) | |
| votes.append(y) | |
| votes_detail.append(f"- **{m}**: {'Positive ๐' if y == 1 else 'Negative ๐'}") | |
| if not votes: | |
| return {"No models available": 1.0}, "" | |
| pos, total = sum(votes), len(votes) | |
| neg = total - pos | |
| pos_pct = 100 * pos / total | |
| neg_pct = 100 * neg / total | |
| if pos > neg: | |
| label = {"Positive ๐": 1.0} | |
| final = "### Final Ensemble Result: **Positive ๐**" | |
| elif neg > pos: | |
| label = {"Negative ๐": 1.0} | |
| final = "### Final Ensemble Result: **Negative ๐**" | |
| else: | |
| label = {"Tie ๐ค": 1.0} | |
| final = "### Final Ensemble Result: **Tie ๐ค**" | |
| detail_md = ( | |
| f"{final}\n\n" | |
| f"**Votes:** {pos} positive ({pos_pct:.1f}%) | {neg} negative ({neg_pct:.1f}%) out of {total} models.\n\n" | |
| f"**Individual model decisions:**\n" + "\n".join(votes_detail) | |
| ) | |
| return label, detail_md | |
| base_name = ( | |
| clf.replace("๐ณ ","") | |
| .replace("โก ","") | |
| .replace("๐ก ","") | |
| .replace("๐ ","") | |
| .replace("๐ฒ ","") | |
| .replace("๐ ","") | |
| .replace("๐งฎ ","") | |
| ) | |
| func = PREDICT_FUNCS.get((base_name, version)) | |
| if func is None: | |
| return {"Model not found": 1.0}, "" | |
| y = func(text) | |
| return ({"Positive ๐": 1.0} if y == 1 else {"Negative ๐": 1.0}), "" | |
| with gr.Blocks( | |
| title="Sentiment Classifier Demo", | |
| css=".big-markdown {font-size: 1.2rem; min-height: 300px; overflow:auto;}" | |
| ) as demo: | |
| gr.Markdown("## Sentiment Classifier Demo") | |
| with gr.Row(): | |
| clf = gr.Dropdown(choices=CLASSIFIERS, value="๐ฎ Gemini", label="Classifier (or Ensemble)") | |
| version = gr.Dropdown(choices=FEATURE_VERSIONS, value="2-feature", label="Feature Version (not used for gemini)") | |
| txt = gr.Textbox(label="Input sentence (English only)", placeholder="Type a sentenceโฆ") | |
| btn = gr.Button("Classify") | |
| out_label = gr.Label(label="Main Result") | |
| out_detail = gr.Markdown(elem_classes="big-markdown") | |
| explanation_box = gr.Markdown(FEATURE_EXPLANATIONS["2-feature"]) | |
| version.change(fn=explain_features, inputs=version, outputs=explanation_box) | |
| btn.click(fn=infer, inputs=[clf, version, txt], outputs=[out_label, out_detail]) | |
| gr.Markdown( | |
| "**Note:** This demo supports **English** sentences only. " | |
| "Choose '๐ค Ensemble' to see the combined decision from all classifiers, " | |
| "or choose '๐ฎ Gemini' to use the Gemini LLM-based classifier." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |