tarot / app.py
oleh.rostovtsev
Add complete tarot reading app with AI model integration
2fbd0f4
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import json
# Load the model and tokenizer
model_name = "barissglc/tinyllama-tarot-v1"
print(f"Loading model: {model_name}")
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None
)
print("Model loaded successfully!")
except Exception as e:
print(f"Error loading model: {e}")
tokenizer = None
model = None
def generate_tarot_response(card_name, orientation, question=""):
"""
Generate a tarot reading based on card name, orientation, and optional question
"""
if model is None or tokenizer is None:
return "Error: Model not loaded properly. Please try again later."
try:
# Format the input prompt
if question:
input_text = f"Card: {card_name}, orientation: {orientation}. Question: {question}. Explain in 3 short sentences."
else:
input_text = f"Card: {card_name}, orientation: {orientation}. Explain in 3 short sentences."
# Tokenize input
inputs = tokenizer(input_text, return_tensors="pt")
# Move to same device as model
if torch.cuda.is_available():
inputs = {k: v.cuda() for k, v in inputs.items()}
# Generate response
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=150,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the input text from the response
if input_text in response:
response = response.replace(input_text, "").strip()
return response
except Exception as e:
return f"Error generating response: {str(e)}"
def api_predict(card_name, orientation, question=""):
"""
API endpoint for tarot predictions
"""
result = generate_tarot_response(card_name, orientation, question)
return {
"card": card_name,
"orientation": orientation,
"question": question,
"reading": result
}
# Create Gradio interface
def create_interface():
with gr.Blocks(title="Tarot Reading with AI", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🔮 AI Tarot Reading")
gr.Markdown("Get insights from tarot cards using AI. Enter a card name, orientation, and optional question.")
with gr.Row():
with gr.Column():
card_name = gr.Textbox(
label="Card Name",
placeholder="e.g., The Fool, The Magician, The Lovers",
value="The Fool"
)
orientation = gr.Dropdown(
choices=["upright", "reversed"],
label="Orientation",
value="upright"
)
question = gr.Textbox(
label="Question (Optional)",
placeholder="e.g., What should I focus on in my career?",
lines=2
)
generate_btn = gr.Button("🔮 Get Reading", variant="primary")
with gr.Column():
output = gr.Textbox(
label="Tarot Reading",
lines=8,
interactive=False
)
# Example cards
gr.Markdown("### Popular Tarot Cards:")
gr.Markdown("""
- **The Fool** - New beginnings, innocence, spontaneity
- **The Magician** - Manifestation, willpower, skill
- **The High Priestess** - Intuition, mystery, subconscious
- **The Empress** - Fertility, abundance, nature
- **The Emperor** - Authority, structure, control
- **The Lovers** - Love, relationships, choices
- **The Chariot** - Determination, willpower, victory
- **Strength** - Inner strength, courage, patience
- **The Hermit** - Soul-searching, introspection, guidance
- **Wheel of Fortune** - Change, cycles, destiny
""")
# Event handlers
generate_btn.click(
fn=generate_tarot_response,
inputs=[card_name, orientation, question],
outputs=output
)
# API endpoint
demo.api_predict = api_predict
return demo
# Create and launch the interface
if __name__ == "__main__":
demo = create_interface()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True
)