aaaaaaaa / app.py
shibly100's picture
create app.py
02bf1c1 verified
raw
history blame
786 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import gradio as gr
# Load model and tokenizer from Hugging Face directly
model_name = "deepseek-ai/deepseek-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
# Create a simple chat function
def chat_function(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_length=200)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Build a simple Gradio interface
iface = gr.Interface(fn=chat_function, inputs="text", outputs="text")
iface.launch()