qwen-coder / app.py
pradeeparul2's picture
Update app.py
6384eb8 verified
raw
history blame
1.46 kB
import os
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Redirect cache to /tmp to avoid 50GB storage limit
os.environ['HF_HOME'] = '/tmp/hf_home'
os.environ['TRANSFORMERS_CACHE'] = '/tmp/hf_cache'
model_name = "Qwen/Qwen2.5-Coder-14B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Custom device map: Place most layers on GPU, offload rest to CPU
device_map = {
"transformer": "cuda", # Main layers on GPU
"lm_head": "cpu" # Output layer to CPU
}
model = AutoModelForCausalLM.from_pretrained(
model_name,
load_in_4bit=True,
device_map=device_map,
llm_int8_enable_fp32_cpu_offload=True, # Enable CPU offloading
torch_dtype=torch.float16, # Reduce memory with FP16
trust_remote_code=True
)
def chat(message, history):
messages = [{"role": "user", "content": message}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
with torch.no_grad():
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.7)
response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
history.append((message, response))
return history, ""
demo = gr.ChatInterface(chat)
if __name__ == "__main__":
demo.launch()