import os from dotenv import load_dotenv from openai import OpenAI import gradio as gr load_dotenv() OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4o-mini") if not OPENAI_API_KEY: raise ValueError("OPENAI_API_KEY não encontrado. Configure no .env.") client = OpenAI(api_key=OPENAI_API_KEY) def chat(prompt: str) -> str: """Backend principal de interação com o modelo.""" resp = client.chat.completions.create( model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], ) msg = resp.choices[0].message return getattr(msg, "content", msg.get("content")) def gradio_chat_fn(message, history): """Wrapper para ChatInterface: ignora history e usa backend único.""" return chat(message) def build_ui() -> gr.Blocks: return gr.ChatInterface( fn=gradio_chat_fn, title="🔥 FireStarter Chat", description="Minimal OpenAI chat powered by FireStarter.", ) def run_cli(): print("🔥 FireStarter Chat — modo CLI (digite 'exit' para sair)") while True: try: msg = input("\nVocê: ").strip() except (EOFError, KeyboardInterrupt): print("\nSaindo.") break if msg.lower() in ("exit", "quit"): break try: resp = chat(msg) print("\nAI:", resp) except Exception as e: print(f"\n[ERRO] {e}") if __name__ == "__main__": mode = os.getenv("FS_MODE", "gradio").lower() if mode == "cli": run_cli() else: demo = build_ui() demo.launch(server_name="0.0.0.0", server_port=7860)