Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| # ============================================================================== | |
| # π NITHIN-1: SERVERLESS AGI (Fixed & Updated) | |
| # ============================================================================== | |
| # 1. SETUP CLIENTS | |
| token = os.getenv("HF_TOKEN") | |
| # TEXT BRAIN: Switched to Mistral-7B-Instruct-v0.3 (Latest & Best Free Model) | |
| # This model supports the "Chat" format perfectly. | |
| text_client = InferenceClient( | |
| "mistralai/Mistral-7B-Instruct-v0.3", | |
| token=token | |
| ) | |
| # IMAGE EYES: Stable Diffusion XL | |
| image_client = InferenceClient( | |
| "stabilityai/stable-diffusion-xl-base-1.0", | |
| token=token | |
| ) | |
| # 2. THE BRAIN LOGIC | |
| def nithin_agi(message, history): | |
| # A. IMAGE DETECTION | |
| triggers = ["draw", "generate", "image", "photo", "paint", "picture"] | |
| if any(word in message.lower() for word in triggers): | |
| return "π¨ I am generating your image... Please check the 'Image Engine' tab!" | |
| # B. TEXT GENERATION (Conversational Mode) | |
| system_prompt = "You are Nithin-1, an advanced AI created by Nithin. Be helpful, smart, and concise." | |
| # New Logic: Using 'chat_completion' instead of 'text_generation' | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": message} | |
| ] | |
| partial_message = "" | |
| try: | |
| # Streaming the response correctly | |
| stream = text_client.chat_completion(messages, max_tokens=512, stream=True) | |
| for chunk in stream: | |
| # Extracting content from the new format | |
| if chunk.choices and chunk.choices[0].delta.content: | |
| content = chunk.choices[0].delta.content | |
| partial_message += content | |
| yield partial_message | |
| except Exception as e: | |
| yield f"β οΈ Brain Error: {str(e)}\n\n(Tip: Check if your HF_TOKEN is valid in Settings)" | |
| # 3. THE IMAGE LOGIC | |
| def generate_image(prompt): | |
| if not token: | |
| return None | |
| try: | |
| image = image_client.text_to_image(prompt + ", high quality, 8k, masterpiece") | |
| return image | |
| except Exception as e: | |
| print(f"Error: {e}") | |
| return None | |
| # ============================================================================== | |
| # π¨ THE UI | |
| # ============================================================================== | |
| with gr.Blocks(theme="soft") as demo: | |
| gr.Markdown("# β‘ NITHIN-1 AGI") | |
| gr.Markdown("### Autonomous Super Intelligence | 24/7 Online") | |
| with gr.Tab("π¬ Chat"): | |
| gr.ChatInterface( | |
| fn=nithin_agi, | |
| examples=["Who are you?", "Write Python code for a snake game", "Explain Black Holes"], | |
| cache_examples=False | |
| ) | |
| with gr.Tab("π¨ Image Engine"): | |
| gr.Markdown("Enter a prompt to generate 8K Art.") | |
| with gr.Row(): | |
| img_input = gr.Textbox(label="Prompt", placeholder="A futuristic cyberpunk city...") | |
| img_btn = gr.Button("Generate π", variant="primary") | |
| img_output = gr.Image(label="Output") | |
| img_btn.click(generate_image, inputs=img_input, outputs=img_output) | |
| # Launch | |
| demo.queue().launch() |