philosophy / app.py
mari-a-12a's picture
Update app.py
036e77c verified
raw
history blame contribute delete
644 Bytes
import gradio as gr
from transformers import pipeline
# Load YOUR custom model
philo_pipe = pipeline(
"text-generation",
model="mari-a-12a/opt-2.7b-philosophy", # Replace with your model
device="cpu" # Switch to "cuda" if using GPU
)
def respond(message):
full_prompt = f"Instruction: {message}\nOutput:"
output = philo_pipe(full_prompt, max_length=100)[0]['generated_text']
return output.split("AI:")[1].strip()
gr.Interface(
fn=respond,
inputs=gr.Textbox(lines=2, placeholder="Ask a philosophical question..."),
outputs="text",
title="Philosophy Chatbot (Fine-tuned OPT-2.7b)"
).launch(debug=True)