Spaces:
Sleeping
Sleeping
| import transformers | |
| transformers.__version__ | |
| import torch | |
| torch.cuda.is_available() | |
| from transformers import pipeline | |
| generator = pipeline("text-generation") | |
| generator = pipeline("text-generation", model = "Qwen/Qwen3-4B-Instruct-2507", | |
| torch_dtype= "auto", device_map="auto") | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful AI assistant."}, | |
| {"role": "user", "content": "Tell me a short joke"} | |
| ] | |
| outputs = generator( | |
| messages, | |
| max_new_tokens = 100, | |
| do_sample=True, | |
| temperature = 0.7, | |
| return_full_text = False) | |
| print(outputs[0]["generated_text"]) | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful AI assistant."}, | |
| {"role": "user", "content": "Türkiyenin başkenti neresidir?"} | |
| ] | |
| outputs = generator( | |
| messages, | |
| max_new_tokens = 100, | |
| do_sample=True, | |
| temperature = 0.7, | |
| return_full_text = False) | |
| print(outputs[0]["generated_text"]) | |
| import gradio as gr | |
| def generate_text(prompt): | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful AI assistant."}, | |
| {"role": "user", "content": prompt}] | |
| outputs = generator( | |
| messages, | |
| max_new_tokens = 100, | |
| do_sample=True, | |
| temperature = 0.7, | |
| return_full_text = False) | |
| return outputs[0]["generated_text"] | |
| demo = gr.Interface( | |
| fn= generate_text, | |
| inputs = gr.Textbox(label="Give an input"), | |
| outputs = gr.Textbox(label="Output"), | |
| title = "Text Generation" | |
| ) | |
| demo.launch() | |