Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
| # Použití TinyLlama 1.1B Chat (zcela zdarma) | |
| model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # Vytvoření pipeline pro generování odpovědí | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Funkce AI asistenta | |
| def ai_assistant(prompt): | |
| response = pipe(prompt, max_length=200, do_sample=True) | |
| return response[0]['generated_text'] | |
| # Gradio UI | |
| iface = gr.Interface( | |
| fn=ai_assistant, | |
| inputs="text", | |
| outputs="text", | |
| title="AI Asistent", | |
| description="Zeptej se na cokoli!" | |
| ) | |
| # Spuštění aplikace | |
| iface.launch() |