import os import gradio as gr import requests import json from cachetools import TTLCache, cached # Retrieve the API token from secrets api_token = os.getenv("API_TOKEN") if not api_token: raise ValueError("API token not found. Make sure 'API_TOKEN' is set in the Secrets.") # Use the token in your request headers API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct" HEADERS = {"Authorization": f"Bearer {api_token}"} # Cache to store API responses for 10 minutes cache = TTLCache(maxsize=100, ttl=600) @cached(cache) def generate_exegesis(passage): if not passage.strip(): return "Please enter a Bible passage." prompt = f"""[INST] You are a professional Bible Scholar. Provide a detailed exegesis of the following biblical verse, including: The original Greek text and transliteration with word-by-word analysis and meanings, historical and cultural context, and theological significance for: {passage} [/INST] Exegesis:""" payload = {"inputs": prompt} try: response = requests.post(API_URL, headers=HEADERS, json=payload) response.raise_for_status() result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get("generated_text", "") marker = "Exegesis:" # Marker to split on if marker in generated_text: generated_text = generated_text.split(marker, 1)[1].strip() return generated_text or "Error: No response from model." else: return "Error: Unexpected response format." except requests.exceptions.RequestException as e: return f"API Error: {e}" @cached(cache) def answer_question(question): if not question.strip(): return "Please enter a question." prompt = f"""[INST] You are a knowledgeable theologian. Answer the following question about the Bible and Christianity: {question} [/INST] Answer:""" payload = {"inputs": prompt} try: response = requests.post(API_URL, headers=HEADERS, json=payload) response.raise_for_status() result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get("generated_text", "") marker = "Answer:" # Marker to split on if marker in generated_text: generated_text = generated_text.split(marker, 1)[1].strip() return generated_text or "Error: No response from model." else: return "Error: Unexpected response format." except requests.exceptions.RequestException as e: return f"API Error: {e}" @cached(cache) def create_sermon(topic): if not topic.strip(): return "Please enter a sermon topic." prompt = f"""[INST] You are a skilled pastor. Create a sermon outline with linked Bible verses for the following topic: {topic} [/INST] Sermon Outline:""" payload = {"inputs": prompt} try: response = requests.post(API_URL, headers=HEADERS, json=payload) response.raise_for_status() result = response.json() if isinstance(result, list) and len(result) > 0: generated_text = result[0].get("generated_text", "") marker = "Sermon Outline:" # Marker to split on if marker in generated_text: generated_text = generated_text.split(marker, 1)[1].strip() return generated_text or "Error: No response from model." else: return "Error: Unexpected response format." except requests.exceptions.RequestException as e: return f"API Error: {e}" # Gradio Interface with Tabs with gr.Blocks(theme="huggingface") as demo: gr.Markdown("# JR Study Bible") gr.Markdown("A multifunctional Bible study group app that performs exegesis, answers questions, and helps create sermon outlines.") with gr.Tab("Exegesis"): passage_input = gr.Textbox(label="Enter Bible Passage", placeholder="e.g., John 3:16") exegesis_output = gr.Textbox(label="Exegesis Commentary") passage_input.change(generate_exegesis, inputs=passage_input, outputs=exegesis_output) with gr.Tab("Question Answering"): question_input = gr.Textbox(label="Ask a Question", placeholder="e.g., What is the meaning of life?") question_output = gr.Textbox(label="Answer") question_input.change(answer_question, inputs=question_input, outputs=question_output) with gr.Tab("Sermon Creation"): topic_input = gr.Textbox(label="Enter Sermon Topic", placeholder="e.g., Faith") sermon_output = gr.Textbox(label="Sermon Outline") topic_input.change(create_sermon, inputs=topic_input, outputs=sermon_output) if __name__ == "__main__": demo.launch()