File size: 3,768 Bytes
188bea3
 
 
 
 
 
 
37783cb
188bea3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import warnings
warnings.filterwarnings("ignore")

# Initialize model
model_name = "sheikh/Sheikh-F1"
device = "cuda" if torch.cuda.is_available() else "cpu"

print("Loading F-1 model...")

try:
    tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
    model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
    
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
    
    print("F-1 model loaded successfully!")
    
    def generate_code(prompt, language="English", max_length=200):
        try:
            # Add language prefix
            if language == "Bengali":
                prefix = "বাংলা স্ক্রিপ্ট: "
            elif language == "Banglish":
                prefix = "বাংলিশ: "
            else:
                prefix = "English: "
            
            full_prompt = prefix + prompt
            
            # Generate response
            inputs = tokenizer(full_prompt, return_tensors="pt", padding=True, truncation=True)
            input_ids = inputs["input_ids"].to(device)
            attention_mask = inputs["attention_mask"].to(device)
            
            with torch.no_grad():
                outputs = model.generate(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    max_new_tokens=max_length,
                    temperature=0.7,
                    do_sample=True,
                    pad_token_id=tokenizer.eos_token_id,
                    eos_token_id=tokenizer.eos_token_id,
                    repetition_penalty=1.1,
                    top_p=0.9,
                    top_k=50
                )
            
            response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            if response.startswith(full_prompt):
                response = response[len(full_prompt):].strip()
            
            return response
            
        except Exception as e:
            return f"Error: {str(e)}"

    # Create interface
    with gr.Blocks(title="F-1 Demo", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# 🤖 F-1: Multilingual Coding Assistant\n## মাল্টিলিঙ্গুয়াল কোডিং সহায়ক\nCreated by Likhon Sheikh 🇧🇩")
        
        with gr.Row():
            language = gr.Dropdown(["English", "Bengali", "Banglish"], label="Language")
            max_length = gr.Slider(50, 500, 200, label="Response Length")
        
        prompt = gr.Textbox(label="Enter coding request", lines=3)
        output = gr.Textbox(label="Generated Code", lines=10)
        btn = gr.Button("Generate", variant="primary")
        
        btn.click(
            fn=generate_code,
            inputs=[prompt, language, max_length],
            outputs=[output]
        )
        
        gr.Markdown("""
        ### Features:
        - ✅ Multilingual support (English, Bengali, Banglish)
        - ✅ Code generation in Python
        - ✅ Designed for Bangladeshi developers
        
        **Made with ❤️ by Likhon Sheikh**
        """)
    
    if __name__ == "__main__":
        demo.queue().launch()

except Exception as e:
    print(f"Error: {e}")
    gr.Markdown("# F-1 Model Loading Error\nPlease check the model installation.")

# Fallback interface
demo = gr.Interface(
    fn=lambda x: "Model loading failed. Please check the base model microsoft/DialoGPT-medium.",
    inputs=gr.Textbox(label="Enter coding request"),
    outputs=gr.Textbox(label="Generated Code"),
    title="F-1 Demo (Fallback)"
)

if __name__ == "__main__":
    demo.queue().launch()