Spaces:
Sleeping
Sleeping
File size: 3,241 Bytes
474d17b 6e01896 474d17b 6e01896 d6a18fd 6e01896 474d17b 6e01896 474d17b d6a18fd 6e01896 d6a18fd 6e01896 474d17b d6a18fd 6e01896 474d17b 6e01896 d6a18fd 6e01896 d6a18fd 6e01896 d6a18fd 6e01896 d6a18fd 6e01896 d6a18fd 6e01896 d6a18fd 474d17b 6e01896 474d17b 6e01896 d6a18fd 6e01896 474d17b 6e01896 474d17b 6e01896 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
# ==============================================================================
# π NITHIN-1: SERVERLESS AGI (Fixed & Updated)
# ==============================================================================
# 1. SETUP CLIENTS
token = os.getenv("HF_TOKEN")
# TEXT BRAIN: Switched to Mistral-7B-Instruct-v0.3 (Latest & Best Free Model)
# This model supports the "Chat" format perfectly.
text_client = InferenceClient(
"mistralai/Mistral-7B-Instruct-v0.3",
token=token
)
# IMAGE EYES: Stable Diffusion XL
image_client = InferenceClient(
"stabilityai/stable-diffusion-xl-base-1.0",
token=token
)
# 2. THE BRAIN LOGIC
def nithin_agi(message, history):
# A. IMAGE DETECTION
triggers = ["draw", "generate", "image", "photo", "paint", "picture"]
if any(word in message.lower() for word in triggers):
return "π¨ I am generating your image... Please check the 'Image Engine' tab!"
# B. TEXT GENERATION (Conversational Mode)
system_prompt = "You are Nithin-1, an advanced AI created by Nithin. Be helpful, smart, and concise."
# New Logic: Using 'chat_completion' instead of 'text_generation'
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": message}
]
partial_message = ""
try:
# Streaming the response correctly
stream = text_client.chat_completion(messages, max_tokens=512, stream=True)
for chunk in stream:
# Extracting content from the new format
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
partial_message += content
yield partial_message
except Exception as e:
yield f"β οΈ Brain Error: {str(e)}\n\n(Tip: Check if your HF_TOKEN is valid in Settings)"
# 3. THE IMAGE LOGIC
def generate_image(prompt):
if not token:
return None
try:
image = image_client.text_to_image(prompt + ", high quality, 8k, masterpiece")
return image
except Exception as e:
print(f"Error: {e}")
return None
# ==============================================================================
# π¨ THE UI
# ==============================================================================
with gr.Blocks(theme="soft") as demo:
gr.Markdown("# β‘ NITHIN-1 AGI")
gr.Markdown("### Autonomous Super Intelligence | 24/7 Online")
with gr.Tab("π¬ Chat"):
gr.ChatInterface(
fn=nithin_agi,
examples=["Who are you?", "Write Python code for a snake game", "Explain Black Holes"],
cache_examples=False
)
with gr.Tab("π¨ Image Engine"):
gr.Markdown("Enter a prompt to generate 8K Art.")
with gr.Row():
img_input = gr.Textbox(label="Prompt", placeholder="A futuristic cyberpunk city...")
img_btn = gr.Button("Generate π", variant="primary")
img_output = gr.Image(label="Output")
img_btn.click(generate_image, inputs=img_input, outputs=img_output)
# Launch
demo.queue().launch() |