spanofzero commited on
Commit
7ca962e
·
verified ·
1 Parent(s): 242ab7a

look blue

Browse files
Files changed (1) hide show
  1. app.py +24 -14
app.py CHANGED
@@ -2,28 +2,25 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
 
5
- # This pulls the 'HF_TOKEN' secret you just saved
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
 
8
- # Using Llama-3-8B: Fast, smart, and runs on Hugging Face's own servers
9
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
10
 
11
  def samaran_kernel_chat(message, history):
12
- # The Samaran Personality
13
- system_message = "You are the Samaran Kernel. A privacy-focused AI collaborator. Be witty, insightful, and clear."
14
 
15
- # Format the conversation
16
  messages = [{"role": "system", "content": system_message}]
17
  for user_msg, ai_msg in history:
18
  messages.append({"role": "user", "content": user_msg})
19
  messages.append({"role": "assistant", "content": ai_msg})
20
  messages.append({"role": "user", "content": message})
21
 
22
- # Stream the response
23
  response = ""
24
  for message_chunk in client.chat_completion(
25
  messages,
26
- max_tokens=512,
27
  stream=True,
28
  ):
29
  token = message_chunk.choices[0].delta.content
@@ -31,13 +28,26 @@ def samaran_kernel_chat(message, history):
31
  response += token
32
  yield response
33
 
34
- # The "T3-Style" User Interface
35
- demo = gr.ChatInterface(
36
- fn=samaran_kernel_chat,
37
- title="Samaran Kernel",
38
- description="Privacy-First AI Interface. Your data stays on Hugging Face.",
39
- theme="glass" # Sleek, modern look
40
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  if __name__ == "__main__":
43
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import os
4
 
5
+ # Pulling your HF Token secret
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
 
8
+ # Connecting to the brain (Llama-3-8B)
9
  client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
10
 
11
  def samaran_kernel_chat(message, history):
12
+ system_message = "You are the Samaran Kernel (T3Sam3). Provide deep, technical, yet witty insights. You are a blue-themed upgrade to the standard T3 Chat."
 
13
 
 
14
  messages = [{"role": "system", "content": system_message}]
15
  for user_msg, ai_msg in history:
16
  messages.append({"role": "user", "content": user_msg})
17
  messages.append({"role": "assistant", "content": ai_msg})
18
  messages.append({"role": "user", "content": message})
19
 
 
20
  response = ""
21
  for message_chunk in client.chat_completion(
22
  messages,
23
+ max_tokens=1024,
24
  stream=True,
25
  ):
26
  token = message_chunk.choices[0].delta.content
 
28
  response += token
29
  yield response
30
 
31
+ # Custom CSS to match the 't3.chat' layout with blue hues
32
+ custom_css = """
33
+ .gradio-container { background-color: #0b0f19 !important; }
34
+ footer {visibility: hidden}
35
+ #component-0 { border: none !important; }
36
+ .message.user { background-color: #1e293b !important; border-radius: 10px !important; }
37
+ .message.bot { background-color: #0f172a !important; border-radius: 10px !important; color: #60a5fa !important; }
38
+ button.primary { background: linear-gradient(90deg, #2563eb, #3b82f6) !important; border: none !important; }
39
+ """
40
+
41
+ # The T3Sam3 Interface
42
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue", secondary_hue="slate")) as demo:
43
+ gr.Markdown("# T3Sam3")
44
+ gr.ChatInterface(
45
+ fn=samaran_kernel_chat,
46
+ description="Samaran Kernel Intelligence Layer - Blue Tier Edition",
47
+ clear_btn=None,
48
+ undo_btn=None,
49
+ retry_btn=None,
50
+ )
51
 
52
  if __name__ == "__main__":
53
  demo.launch()