WWMachine commited on
Commit
c86e03f
·
verified ·
1 Parent(s): fcca145

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -69
app.py CHANGED
@@ -1,70 +1,62 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="WWMachine/testmodel")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
3
+ from huggingface_hub import hf_hub_download
4
+
5
+ # --- Configuration ---
6
+ # 1. Update with your model's repo ID and file name
7
+ MODEL_REPO = "Your-HF-Username/your-model-repo"
8
+ MODEL_FILE = "your-model-file-Q4_K_M.gguf"
9
+ # Adjust context window and other params as needed
10
+ CONTEXT_WINDOW = 4096
11
+ MAX_NEW_TOKENS = 512
12
+ TEMPERATURE = 0.7
13
+
14
+ # --- Model Loading Function ---
15
+ def load_llm():
16
+ """Downloads the GGUF model and initializes LlamaCPP."""
17
+ print("Downloading model...")
18
+ model_path = hf_hub_download(
19
+ repo_id=MODEL_REPO,
20
+ filename=MODEL_FILE
21
+ )
22
+
23
+ # Initialize the LLM with the downloaded model path
24
+ # n_ctx is the context window size
25
+ # n_threads is set to 2 (free CPU core limit) for better parallelization
26
+ llm = Llama(
27
+ model_path=model_path,
28
+ n_ctx=CONTEXT_WINDOW,
29
+ n_threads=2,
30
+ verbose=False # Set to True for debugging
31
+ )
32
+ print("Model loaded successfully!")
33
+ return llm
34
+
35
+ # Load the model only once when the Space starts
36
+ llm = load_llm()
37
+
38
+ # --- Inference Function ---
39
+ def generate(prompt, history):
40
+ """Generates a response using the Llama model."""
41
+ # Use a basic prompt template (adjust for your model's specific format)
42
+ full_prompt = f"### Human: {prompt}\n### Assistant:"
43
+
44
+ output = llm(
45
+ prompt=full_prompt,
46
+ max_tokens=MAX_NEW_TOKENS,
47
+ temperature=TEMPERATURE,
48
+ stop=["### Human:"], # Stop generation at the next user turn
49
+ echo=False
50
+ )
51
+
52
+ # Extract the text from the response object
53
+ response_text = output['choices'][0]['text'].strip()
54
+ return response_text
55
+
56
+ # --- Gradio Interface ---
57
+ # Use the ChatInterface for a quick, functional chat UI
58
+ gr.ChatInterface(
59
+ generate,
60
+ title=f"Chat with {MODEL_FILE}",
61
+ description="A GGUF LLM hosted on Hugging Face CPU Space using llama-cpp-python."
62
+ ).launch()