File size: 1,458 Bytes
6388a60
d9c2248
 
 
 
6384eb8
6388a60
 
 
d9c2248
6384eb8
 
 
 
 
 
 
 
6388a60
 
6384eb8
 
 
 
 
6388a60
d9c2248
 
 
 
6384eb8
d9c2248
 
 
 
 
 
 
6388a60
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import os
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Redirect cache to /tmp to avoid 50GB storage limit
os.environ['HF_HOME'] = '/tmp/hf_home'
os.environ['TRANSFORMERS_CACHE'] = '/tmp/hf_cache'

model_name = "Qwen/Qwen2.5-Coder-14B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

# Custom device map: Place most layers on GPU, offload rest to CPU
device_map = {
    "transformer": "cuda",  # Main layers on GPU
    "lm_head": "cpu"       # Output layer to CPU
}

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    load_in_4bit=True,
    device_map=device_map,
    llm_int8_enable_fp32_cpu_offload=True,  # Enable CPU offloading
    torch_dtype=torch.float16,              # Reduce memory with FP16
    trust_remote_code=True
)

def chat(message, history):
    messages = [{"role": "user", "content": message}]
    text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    inputs = tokenizer(text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
    with torch.no_grad():
        outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.7)
    response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
    history.append((message, response))
    return history, ""

demo = gr.ChatInterface(chat)
if __name__ == "__main__":
    demo.launch()