Fu01978 commited on
Commit
10ee8ca
·
verified ·
1 Parent(s): e77b699

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -29
app.py CHANGED
@@ -1,26 +1,25 @@
1
  import gradio as gr
2
- from llama_cpp import Llama
3
- import os
4
 
5
- # Download and load the GGUF model
6
- model_url = "https://huggingface.co/bartowski/Llama-3.2-3B-Instruct-GGUF/resolve/main/Llama-3.2-3B-Instruct-Q6_K_L.gguf?download=true"
7
- model_path = "model.gguf"
8
-
9
- # Download model if not already present
10
- if not os.path.exists(model_path):
11
- print("Downloading model...")
12
- import urllib.request
13
- urllib.request.urlretrieve(model_url, model_path)
14
- print("Model downloaded!")
15
 
16
- # Load the model
 
 
 
17
  print("Loading model...")
18
- llm = Llama(
19
- model_path=model_path,
20
- n_ctx=2048,
21
- n_threads=4,
22
- n_gpu_layers=0,
23
- verbose=False
24
  )
25
  print("Model loaded!")
26
 
@@ -43,22 +42,38 @@ def chat(message, history):
43
  # Add current message
44
  messages.append({"role": "user", "content": message})
45
 
46
- # Generate response
47
- response = llm.create_chat_completion(
48
- messages=messages,
49
- max_tokens=512,
50
- temperature=0.7,
51
- top_p=0.9,
52
  )
53
 
54
- # Extract the assistant's response
55
- return response["choices"][0]["message"]["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  # Create Gradio interface
58
  demo = gr.ChatInterface(
59
  fn=chat,
60
- title="Llama 3.2 3B Instruct Chatbot (GGUF)",
61
- description="Chat with Llama 3.2 3B Instruct model running from GGUF format. Ask me anything!",
62
  examples=[
63
  "What is artificial intelligence?",
64
  "Write a short poem about coding",
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
3
+ import torch
4
 
5
+ # Configure 4-bit quantization
6
+ quantization_config = BitsAndBytesConfig(
7
+ load_in_4bit=True,
8
+ bnb_4bit_compute_dtype=torch.float16,
9
+ bnb_4bit_use_double_quant=True,
10
+ bnb_4bit_quant_type="nf4"
11
+ )
 
 
 
12
 
13
+ # Load model and tokenizer
14
+ model_name = "meta-llama/Llama-3.2-3B-Instruct"
15
+ print("Loading tokenizer...")
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
  print("Loading model...")
18
+ model = AutoModelForCausalLM.from_pretrained(
19
+ model_name,
20
+ quantization_config=quantization_config,
21
+ device_map="auto",
22
+ low_cpu_mem_usage=True
 
23
  )
24
  print("Model loaded!")
25
 
 
42
  # Add current message
43
  messages.append({"role": "user", "content": message})
44
 
45
+ # Apply chat template
46
+ prompt = tokenizer.apply_chat_template(
47
+ messages,
48
+ tokenize=False,
49
+ add_generation_prompt=True
 
50
  )
51
 
52
+ # Tokenize
53
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
54
+
55
+ # Generate response with streaming
56
+ streamer_output = ""
57
+ with torch.no_grad():
58
+ outputs = model.generate(
59
+ **inputs,
60
+ max_new_tokens=512,
61
+ temperature=0.7,
62
+ top_p=0.9,
63
+ do_sample=True,
64
+ pad_token_id=tokenizer.eos_token_id
65
+ )
66
+
67
+ # Decode and extract only the new response
68
+ response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
69
+
70
+ return response.strip()
71
 
72
  # Create Gradio interface
73
  demo = gr.ChatInterface(
74
  fn=chat,
75
+ title="Llama 3.2 3B Instruct Chatbot",
76
+ description="Chat with Llama 3.2 3B Instruct model (4-bit quantized). Ask me anything!",
77
  examples=[
78
  "What is artificial intelligence?",
79
  "Write a short poem about coding",