File size: 1,974 Bytes
fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c 3877120 fc5db7c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | import gradio as gr
from huggingface_hub import InferenceClient
import random
# 1. Multiple Models for Stability (Agar ek fail ho toh dusra chale)
# Chat ke liye Mistral 7B (Bahut stable hai free tier pe)
client_chat = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
# Image ke liye Stable Diffusion (FLUX agar busy ho toh ye chalega)
client_image = InferenceClient("stabilityai/stable-diffusion-xl-base-1.0")
def infinity_engine(message, history):
user_msg = message.lower()
# CASE 1: Image Generation
image_triggers = ["generate", "make", "create", "draw", "photo", "image", "banao"]
if any(word in user_msg for word in image_triggers):
yield "Infinity is painting... 🎨"
try:
# Random seed taaki har baar nayi image bane
seed = random.randint(0, 1000000)
img = client_image.text_to_image(message, seed=seed)
yield img
return
except Exception:
yield "Infinity: Image server busy hai, 10 second baad phir try karein."
return
# CASE 2: Text Chat
system_prompt = "You are Infinity, a powerful AI by RockSky1. Be cool and smart."
messages = [{"role": "system", "content": system_prompt}]
for val in history:
if val[0]: messages.append({"role": "user", "content": val[0]})
if val[1]: messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
try:
# Stream=False rakhenge taaki connection break na ho
result = client_chat.chat_completion(messages, max_tokens=500)
response = result.choices[0].message.content
yield response
except Exception:
yield "Infinity: Connection thoda slow hai, ek baar phir se message bhejo bhai."
# UI Setup
with gr.Blocks(theme=gr.themes.Default()) as demo:
gr.Markdown("# ♾️ INFINITY AI")
gr.ChatInterface(fn=infinity_engine)
demo.launch() |