|
|
| import gradio as gr |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
| |
| model_id = "tiiuae/falcon-rw-1b" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained(model_id) |
|
|
| generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
| def chat_with_cael(prompt): |
| result = generator(prompt, max_length=200, do_sample=True, top_k=50)[0]["generated_text"] |
| return result |
|
|
| iface = gr.Interface(fn=chat_with_cael, |
| inputs=gr.Textbox(lines=2, placeholder="Talk to Cael..."), |
| outputs="text", |
| title="Cael: Your AI Companion", |
| description="This is a simple early version of Cael using Falcon-RW-1B. Fully free and open!") |
|
|
| iface.launch() |
|
|