# app.py - SIMPLEST WORKING VERSION import gradio as gr import subprocess import time print("🚀 Starting Foundation-Sec-8B Space...") def generate(prompt): """Generate text using transformers CLI (avoids loading issues)""" python_code = f""" import torch from transformers import pipeline print("Loading model...") model = pipeline( "text-generation", model="fdtn-ai/Foundation-Sec-8B", device=0, torch_dtype=torch.float16, trust_remote_code=True ) result = model("{prompt}", max_new_tokens=200, temperature=0.7) print(result[0]['generated_text']) """ # Save to temp file with open("/tmp/generate.py", "w") as f: f.write(python_code) try: # Run in subprocess result = subprocess.run( ["python", "/tmp/generate.py"], capture_output=True, text=True, timeout=180 ) if result.returncode == 0: return result.stdout.strip().split('\n')[-1] # Get last line else: return f"Error: {result.stderr}" except subprocess.TimeoutExpired: return "Timeout - Model taking too long to load" except Exception as e: return f"Exception: {str(e)}" # Create interface iface = gr.Interface( fn=generate, inputs=gr.Textbox(label="Ask about cybersecurity:", lines=3), outputs=gr.Textbox(label="Response", lines=10), title="🔒 Foundation-Sec-8B", description="Cybersecurity AI Assistant", examples=[ ["What is a firewall?"], ["Explain encryption:"], ["How to create strong passwords?"] ] ) if __name__ == "__main__": iface.launch(server_name="0.0.0.0")