File size: 4,588 Bytes
2196aef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# app_gradio.py
import gradio as gr
from deep_translator import GoogleTranslator
from langdetect import detect
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
import re # Import regex for post-processing

MODEL_DIR = "./fine_tuned_model"

def load_model():
    tokenizer = GPT2Tokenizer.from_pretrained(MODEL_DIR, local_files_only=True)
    model = GPT2LMHeadModel.from_pretrained(MODEL_DIR, local_files_only=True)
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
        model.config.pad_token_id = tokenizer.pad_token_id
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device).eval()
    return tokenizer, model, device

tokenizer, model, device = load_model()

def to_en(text):
    try:
        lang = detect(text)
    except Exception:
        lang = "en"
    if lang == "en":
        return text, "en"
    translated_text = GoogleTranslator(source=lang, target="en").translate(text)
    # Handle potential None return from translator
    return translated_text if translated_text is not None else text, lang

def from_en(text, tgt):
    if tgt == "en":
        return text
    translated_text = GoogleTranslator(source="en", target=tgt).translate(text)
    # Handle potential None return from translator
    return translated_text if translated_text is not None else text

def generate(prompt, max_new_tokens=120, temperature=0.8):
    inputs = tokenizer(prompt, return_tensors="pt").to(device)
    with torch.no_grad():
        out = model.generate(
            **inputs,
            max_new_tokens=max_new_tokens,
            do_sample=True,
            top_k=50,
            top_p=0.95,
            temperature=temperature,
            eos_token_id=tokenizer.eos_token_id,
            pad_token_id=tokenizer.pad_token_id,
        )
    return tokenizer.decode(out[0], skip_special_tokens=True)

def post_process_generated_text(text, prompt):
    # Simple post-processing to clean up potential repetitions or unwanted tokens
    cleaned_text = text.replace(prompt, "").strip() # Remove the prompt from the output

    # Remove consecutive repeated words - improved
    words = cleaned_text.split()
    if not words:
        return ""
    cleaned_words = [words[0]]
    for i in range(1, len(words)):
        if words[i].lower() != words[i-1].lower(): # Case-insensitive comparison
            cleaned_words.append(words[i])
    return " ".join(cleaned_words)


def recommend_course(t):
    t = t.lower()
    if "python" in t: return "🐍 Python Programming – Beginner to Advanced"
    if "data science" in t: return "πŸ“Š Data Science Master Program"
    if "ai" in t or "machine learning" in t or "ml" in t: return "πŸ€– AI & Machine Learning with Real Projects"
    if "web" in t or "full stack" in t or "javascript" in t or "react" in t: return "🌐 Full Stack Web Development"
    if "java" in t: return "β˜• Java Programming Essentials"
    return None

def chat(user_input, history):
    en, lang = to_en(user_input)
    course = recommend_course(en)
    if course:
        en_resp = f"I recommend you check out: {course}"
    else:
        # Modify prompt to encourage structured output based on keywords
        prompt = f"User: {en}\nAssistant:"
        if any(keyword in en.lower() for keyword in ["what is", "tell me about"]):
             prompt = f"User: {en}\nAssistant: Here is information about {en.lower().replace('what is', '').replace('tell me about', '').strip()}:\n"
        elif "recommend" in en.lower():
             prompt = f"User: {en}\nAssistant: Based on your request, here is a recommendation:\n"


        en_resp = generate(prompt)

        # Apply post-processing to clean the generated text
        en_resp = post_process_generated_text(en_resp, prompt)

        if en_resp.startswith(prompt):
            en_resp = en_resp[len(prompt):].strip()

    final = from_en(en_resp, lang)
    history = history + [(user_input, final)]
    return history, history

with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 🌐 Multilingual GPT-2 Chatbot")
    chatbot = gr.Chatbot(height=420)
    msg = gr.Textbox(label="Your Message", placeholder="Type here...")
    clear = gr.Button("πŸ—‘οΈ Clear")
    state = gr.State([])
    msg.submit(chat, [msg, state], [chatbot, state])
    clear.click(lambda: ([], []), None, [chatbot, state], queue=False)

# You can run this in a separate cell using !python app_gradio.py if needed,
# but running it directly in the notebook cell is also possible.
# if __name__ == "__main__":
#     demo.launch()