| import gradio as gr |
| from huggingface_hub import InferenceClient |
| import random |
|
|
| |
| |
| client_chat = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") |
| |
| client_image = InferenceClient("stabilityai/stable-diffusion-xl-base-1.0") |
|
|
| def infinity_engine(message, history): |
| user_msg = message.lower() |
| |
| |
| image_triggers = ["generate", "make", "create", "draw", "photo", "image", "banao"] |
| if any(word in user_msg for word in image_triggers): |
| yield "Infinity is painting... 🎨" |
| try: |
| |
| seed = random.randint(0, 1000000) |
| img = client_image.text_to_image(message, seed=seed) |
| yield img |
| return |
| except Exception: |
| yield "Infinity: Image server busy hai, 10 second baad phir try karein." |
| return |
|
|
| |
| system_prompt = "You are Infinity, a powerful AI by RockSky1. Be cool and smart." |
| messages = [{"role": "system", "content": system_prompt}] |
| for val in history: |
| if val[0]: messages.append({"role": "user", "content": val[0]}) |
| if val[1]: messages.append({"role": "assistant", "content": val[1]}) |
| messages.append({"role": "user", "content": message}) |
|
|
| response = "" |
| try: |
| |
| result = client_chat.chat_completion(messages, max_tokens=500) |
| response = result.choices[0].message.content |
| yield response |
| except Exception: |
| yield "Infinity: Connection thoda slow hai, ek baar phir se message bhejo bhai." |
|
|
| |
| with gr.Blocks(theme=gr.themes.Default()) as demo: |
| gr.Markdown("# ♾️ INFINITY AI") |
| gr.ChatInterface(fn=infinity_engine) |
|
|
| demo.launch() |