File size: 797 Bytes
7728085
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load model and tokenizer
model_id = "Qwen/Qwen3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)

# Create pipeline
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Chat function
def chat(prompt):
    output = generator(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
    return output[0]["generated_text"]

# Gradio UI
gr.Interface(
    fn=chat,
    inputs=gr.Textbox(lines=3, placeholder="Enter your prompt here..."),
    outputs="text",
    title="Qwen3-0.6B Chatbot",
    description="A simple demo using Qwen3-0.6B from Hugging Face"
).launch()