Spaces:
Paused
Paused
File size: 1,707 Bytes
414db61 28bb65c 7278ce3 414db61 9894a7c 7278ce3 414db61 7278ce3 414db61 c98011b 414db61 e3db09b 9894a7c 74c313b 414db61 7278ce3 c98011b 414db61 c98011b 7278ce3 414db61 7278ce3 414db61 7278ce3 414db61 7278ce3 414db61 7278ce3 414db61 7278ce3 414db61 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 | # app.py - SIMPLEST WORKING VERSION
import gradio as gr
import subprocess
import time
print("🚀 Starting Foundation-Sec-8B Space...")
def generate(prompt):
"""Generate text using transformers CLI (avoids loading issues)"""
python_code = f"""
import torch
from transformers import pipeline
print("Loading model...")
model = pipeline(
"text-generation",
model="fdtn-ai/Foundation-Sec-8B",
device=0,
torch_dtype=torch.float16,
trust_remote_code=True
)
result = model("{prompt}", max_new_tokens=200, temperature=0.7)
print(result[0]['generated_text'])
"""
# Save to temp file
with open("/tmp/generate.py", "w") as f:
f.write(python_code)
try:
# Run in subprocess
result = subprocess.run(
["python", "/tmp/generate.py"],
capture_output=True,
text=True,
timeout=180
)
if result.returncode == 0:
return result.stdout.strip().split('\n')[-1] # Get last line
else:
return f"Error: {result.stderr}"
except subprocess.TimeoutExpired:
return "Timeout - Model taking too long to load"
except Exception as e:
return f"Exception: {str(e)}"
# Create interface
iface = gr.Interface(
fn=generate,
inputs=gr.Textbox(label="Ask about cybersecurity:", lines=3),
outputs=gr.Textbox(label="Response", lines=10),
title="🔒 Foundation-Sec-8B",
description="Cybersecurity AI Assistant",
examples=[
["What is a firewall?"],
["Explain encryption:"],
["How to create strong passwords?"]
]
)
if __name__ == "__main__":
iface.launch(server_name="0.0.0.0") |