File size: 4,911 Bytes
2fbd0f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import json
# Load the model and tokenizer
model_name = "barissglc/tinyllama-tarot-v1"
print(f"Loading model: {model_name}")
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None
)
print("Model loaded successfully!")
except Exception as e:
print(f"Error loading model: {e}")
tokenizer = None
model = None
def generate_tarot_response(card_name, orientation, question=""):
"""
Generate a tarot reading based on card name, orientation, and optional question
"""
if model is None or tokenizer is None:
return "Error: Model not loaded properly. Please try again later."
try:
# Format the input prompt
if question:
input_text = f"Card: {card_name}, orientation: {orientation}. Question: {question}. Explain in 3 short sentences."
else:
input_text = f"Card: {card_name}, orientation: {orientation}. Explain in 3 short sentences."
# Tokenize input
inputs = tokenizer(input_text, return_tensors="pt")
# Move to same device as model
if torch.cuda.is_available():
inputs = {k: v.cuda() for k, v in inputs.items()}
# Generate response
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=150,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove the input text from the response
if input_text in response:
response = response.replace(input_text, "").strip()
return response
except Exception as e:
return f"Error generating response: {str(e)}"
def api_predict(card_name, orientation, question=""):
"""
API endpoint for tarot predictions
"""
result = generate_tarot_response(card_name, orientation, question)
return {
"card": card_name,
"orientation": orientation,
"question": question,
"reading": result
}
# Create Gradio interface
def create_interface():
with gr.Blocks(title="Tarot Reading with AI", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🔮 AI Tarot Reading")
gr.Markdown("Get insights from tarot cards using AI. Enter a card name, orientation, and optional question.")
with gr.Row():
with gr.Column():
card_name = gr.Textbox(
label="Card Name",
placeholder="e.g., The Fool, The Magician, The Lovers",
value="The Fool"
)
orientation = gr.Dropdown(
choices=["upright", "reversed"],
label="Orientation",
value="upright"
)
question = gr.Textbox(
label="Question (Optional)",
placeholder="e.g., What should I focus on in my career?",
lines=2
)
generate_btn = gr.Button("🔮 Get Reading", variant="primary")
with gr.Column():
output = gr.Textbox(
label="Tarot Reading",
lines=8,
interactive=False
)
# Example cards
gr.Markdown("### Popular Tarot Cards:")
gr.Markdown("""
- **The Fool** - New beginnings, innocence, spontaneity
- **The Magician** - Manifestation, willpower, skill
- **The High Priestess** - Intuition, mystery, subconscious
- **The Empress** - Fertility, abundance, nature
- **The Emperor** - Authority, structure, control
- **The Lovers** - Love, relationships, choices
- **The Chariot** - Determination, willpower, victory
- **Strength** - Inner strength, courage, patience
- **The Hermit** - Soul-searching, introspection, guidance
- **Wheel of Fortune** - Change, cycles, destiny
""")
# Event handlers
generate_btn.click(
fn=generate_tarot_response,
inputs=[card_name, orientation, question],
outputs=output
)
# API endpoint
demo.api_predict = api_predict
return demo
# Create and launch the interface
if __name__ == "__main__":
demo = create_interface()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=True
)
|