| import gradio as gr |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import torch |
|
|
| |
| model_id = "deepseek-ai/deepseek-llm-7b-chat" |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") |
|
|
|
|
| def generate_response(prompt, temperature): |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
| outputs = model.generate( |
| **inputs, |
| do_sample=True, |
| temperature=temperature, |
| top_p=0.9 |
| ) |
|
|
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
|
|
| demo = gr.Interface(fn=generate_response, |
| inputs=[ |
| gr.Textbox(label="Prompt", lines=6, placeholder="Ask something..."), |
| gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature") |
| ], |
| outputs="text" |
| ) |
| demo.launch() |