File size: 4,267 Bytes
0c66a04
 
 
 
 
 
 
 
42e4a73
0c66a04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42e4a73
 
 
 
 
 
 
0c66a04
 
 
 
 
 
 
 
 
 
 
80bce7e
a6df862
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c66a04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42e4a73
 
 
 
 
 
 
 
 
 
0c66a04
 
 
 
 
 
 
 
 
 
 
42e4a73
 
0c66a04
 
 
 
 
 
 
 
 
 
42e4a73
0c66a04
 
 
 
 
 
 
 
 
 
 
 
 
42e4a73
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import gradio as gr
from transformers import pipeline

DEVICE = -1
models = {}

def get_model(task_name):
    if task_name not in models:

        if task_name == "Chatbot":
            models[task_name] = pipeline(
                "text-generation",
                model="microsoft/DialoGPT-small",
                device=DEVICE
            )

        elif task_name == "Sentiment Analysis":
            models[task_name] = pipeline(
                "sentiment-analysis",
                model="distilbert-base-uncased-finetuned-sst-2-english",
                device=DEVICE
            )

        elif task_name == "NER":
            models[task_name] = pipeline(
                "token-classification",
                model="dslim/bert-base-NER",
                aggregation_strategy="simple",
                device=DEVICE
            )

        elif task_name == "Summarization":
            models[task_name] = pipeline(
                "summarization",
                model="sshleifer/distilbart-cnn-12-6",
                device=DEVICE
            )

        elif task_name == "Translation (EN→FR)":
            models[task_name] = pipeline(
                "translation_en_to_fr",
                model="Helsinki-NLP/opus-mt-en-fr",
                device=DEVICE
            )

        elif task_name == "Fill Mask":
            models[task_name] = pipeline(
                "fill-mask",
                model="bert-base-uncased",
                device=DEVICE
            )

    return models[task_name]


def run_task(task, user_input, chat_history):

    if not user_input.strip():
        return "Please enter some text.", chat_history

    model = get_model(task)

    if task == "Chatbot":

        response = model(
            user_input,
            max_new_tokens=100,
            do_sample=True,
            temperature=0.7
        )
    
        generated_text = response[0]["generated_text"]
        bot_reply = generated_text[len(user_input):].strip()
    
        chat_history = chat_history + [
            {"role": "user", "content": user_input},
            {"role": "assistant", "content": bot_reply}
        ]
    
        return "", chat_history

    elif task == "Sentiment Analysis":
        sentiment = model(user_input)[0]
        result = f"Label: {sentiment['label']}\nConfidence: {sentiment['score']:.2f}"
        return result, chat_history

    elif task == "Summarization":
        summary = model(
            user_input,
            max_length=120,
            min_length=40,
            do_sample=False
        )[0]["summary_text"]
        return summary, chat_history

    elif task == "NER":
        entities = model(user_input)
        if not entities:
            return "No entities found.", chat_history
        formatted = "\n".join(
            f"{e['word']} ({e['entity_group']}) - {e['score']:.2f}"
            for e in entities
        )
        return formatted, chat_history

    elif task == "Translation (EN→FR)":
        translation = model(user_input)[0]["translation_text"]
        return translation, chat_history

    elif task == "Fill Mask":
        if "<mask>" not in user_input:
            return "Please include the token <mask> in your sentence.", chat_history
        predictions = model(user_input)
        formatted = "\n".join(
            f"{p['token_str']} (score: {p['score']:.4f})"
            for p in predictions
        )
        return formatted, chat_history


with gr.Blocks(title="NLP Application") as demo:

    gr.Markdown("# NLP Application")

    task_dropdown = gr.Dropdown(
        choices=[
            "Chatbot",
            "Sentiment Analysis",
            "NER",
            "Summarization",
            "Translation (EN→FR)",
            "Fill Mask"
        ],
        label="Select NLP Task"
    )

    user_input = gr.Textbox(
        lines=5,
        placeholder="Enter text here...",
        label="Input Text"
    )

    output_box = gr.Textbox(label="Output")

    chatbot = gr.Chatbot(label="Conversation")

    state = gr.State([])

    run_button = gr.Button("Run")

    run_button.click(
        fn=run_task,
        inputs=[task_dropdown, user_input, state],
        outputs=[output_box, chatbot]
    )


if __name__ == "__main__":
    demo.launch()