Spaces:
Runtime error
Runtime error
huhgain
Browse files
app.py
CHANGED
|
@@ -2,16 +2,15 @@ import gradio as gr
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import os
|
| 4 |
|
| 5 |
-
#
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
-
#
|
| 9 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
|
| 10 |
|
| 11 |
def samaran_kernel_chat(message, history):
|
| 12 |
system_message = "You are T3Sam3, the Samaran Kernel. Provide deep, blue-tier logic. Be witty and technical."
|
| 13 |
|
| 14 |
-
# Stable version uses (message, history) as strings
|
| 15 |
messages = [{"role": "system", "content": system_message}]
|
| 16 |
for human, assistant in history:
|
| 17 |
messages.append({"role": "user", "content": human})
|
|
@@ -19,7 +18,6 @@ def samaran_kernel_chat(message, history):
|
|
| 19 |
messages.append({"role": "user", "content": message})
|
| 20 |
|
| 21 |
response = ""
|
| 22 |
-
# Standard Chat Completion
|
| 23 |
for message_chunk in client.chat_completion(
|
| 24 |
messages,
|
| 25 |
max_tokens=1024,
|
|
@@ -30,7 +28,7 @@ def samaran_kernel_chat(message, history):
|
|
| 30 |
response += token
|
| 31 |
yield response
|
| 32 |
|
| 33 |
-
#
|
| 34 |
custom_css = """
|
| 35 |
body, .gradio-container { background-color: #0b0f19 !important; }
|
| 36 |
footer {display: none !important}
|
|
@@ -38,7 +36,6 @@ footer {display: none !important}
|
|
| 38 |
.message.bot { background-color: #0f172a !important; color: #60a5fa !important; }
|
| 39 |
"""
|
| 40 |
|
| 41 |
-
# The Stable Interface Build
|
| 42 |
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue")) as demo:
|
| 43 |
gr.Markdown("# T3Sam3")
|
| 44 |
gr.ChatInterface(
|
|
@@ -49,8 +46,5 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue")) as demo
|
|
| 49 |
"Are black holes real?",
|
| 50 |
"How many Rs are in 'strawberry'?",
|
| 51 |
"What is the meaning of life?"
|
| 52 |
-
]
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
if __name__ == "__main__":
|
| 56 |
-
demo.launch()
|
|
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import os
|
| 4 |
|
| 5 |
+
# Pull the secret token
|
| 6 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 7 |
|
| 8 |
+
# Initialize the AI Brain
|
| 9 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
|
| 10 |
|
| 11 |
def samaran_kernel_chat(message, history):
|
| 12 |
system_message = "You are T3Sam3, the Samaran Kernel. Provide deep, blue-tier logic. Be witty and technical."
|
| 13 |
|
|
|
|
| 14 |
messages = [{"role": "system", "content": system_message}]
|
| 15 |
for human, assistant in history:
|
| 16 |
messages.append({"role": "user", "content": human})
|
|
|
|
| 18 |
messages.append({"role": "user", "content": message})
|
| 19 |
|
| 20 |
response = ""
|
|
|
|
| 21 |
for message_chunk in client.chat_completion(
|
| 22 |
messages,
|
| 23 |
max_tokens=1024,
|
|
|
|
| 28 |
response += token
|
| 29 |
yield response
|
| 30 |
|
| 31 |
+
# Blue T3Sam3 Styling
|
| 32 |
custom_css = """
|
| 33 |
body, .gradio-container { background-color: #0b0f19 !important; }
|
| 34 |
footer {display: none !important}
|
|
|
|
| 36 |
.message.bot { background-color: #0f172a !important; color: #60a5fa !important; }
|
| 37 |
"""
|
| 38 |
|
|
|
|
| 39 |
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue")) as demo:
|
| 40 |
gr.Markdown("# T3Sam3")
|
| 41 |
gr.ChatInterface(
|
|
|
|
| 46 |
"Are black holes real?",
|
| 47 |
"How many Rs are in 'strawberry'?",
|
| 48 |
"What is the meaning of life?"
|
| 49 |
+
],
|
| 50 |
+
cache_
|
|
|
|
|
|
|
|
|