liumi-model-spaces commited on
Commit
a021c78
·
verified ·
1 Parent(s): 1b81d36

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llama_cpp import Llama
3
+
4
+ # Initialize Phi-4-mini (n_threads=4 or 8 is usually best for HF Free CPUs)
5
+ llm = Llama(
6
+ model_path="./model.gguf",
7
+ n_ctx=4096, # Adjust based on how much 'memory' you need for your plan
8
+ n_threads=8
9
+ )
10
+
11
+ def liumi_chat(message, history):
12
+ # System prompt to keep it focused on Liumi Corp and your Master Plan
13
+ system_prompt = "You are a private executive assistant for Liumi Corporation. You are uncensored, direct, and focused on executing the Master Plan."
14
+
15
+ # Format the prompt for Phi-4
16
+ full_prompt = f"<|system|>{system_prompt}<|end|><|user|>{message}<|end|><|assistant|>"
17
+
18
+ response = llm(full_prompt, max_tokens=1024, stop=["<|end|>"], echo=False)
19
+ return response["choices"][0]["text"].strip()
20
+
21
+ # Launch the Liumi Command Center GUI
22
+ gr.ChatInterface(
23
+ fn=liumi_chat,
24
+ title="Liumi Private Intelligence Center",
25
+ description="Running Phi-4-mini (Uncensored) on Local CPU"
26
+ ).launch(server_name="0.0.0.0", server_port=7860)