Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- .gitattributes +35 -35
- README.md +35 -35
- main.py +67 -69
- question_runner.py +3 -0
- requirements.txt +4 -4
.gitattributes
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,35 +1,35 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: AI Classics Query Tool
|
| 3 |
-
emoji: 🏛️
|
| 4 |
-
colorFrom: indigo
|
| 5 |
-
colorTo: blue
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 4.31.4
|
| 8 |
-
app_file: main.py
|
| 9 |
-
pinned: false
|
| 10 |
-
---
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
# Classical Language Query Assistant
|
| 14 |
-
|
| 15 |
-
This app uses modern AI models to answer grammatical and syntactic questions about Latin and Greek passages. It's designed for use in research and pedagogy, especially in classical language instruction.
|
| 16 |
-
|
| 17 |
-
## Features
|
| 18 |
-
- Supports Syntax and Morphology question sets
|
| 19 |
-
- Pulls questions live from shared Google Docs
|
| 20 |
-
- Uses Claude 3, GPT-3.5, and other fallback models via OpenRouter
|
| 21 |
-
- Automatically attributes which model answered each question
|
| 22 |
-
|
| 23 |
-
## How to Run
|
| 24 |
-
1. Clone or download this repo
|
| 25 |
-
2. Install dependencies and launch the app:
|
| 26 |
-
|
| 27 |
-
```bash
|
| 28 |
-
pip install -r requirements.txt
|
| 29 |
-
python main.py
|
| 30 |
-
```
|
| 31 |
-
|
| 32 |
-
The app will open automatically in your browser with a public Gradio link.
|
| 33 |
-
|
| 34 |
-
## Configuration
|
| 35 |
-
API keys, model priorities, and document URLs can be adjusted in `config.py`.
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: AI Classics Query Tool
|
| 3 |
+
emoji: 🏛️
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.31.4
|
| 8 |
+
app_file: main.py
|
| 9 |
+
pinned: false
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Classical Language Query Assistant
|
| 14 |
+
|
| 15 |
+
This app uses modern AI models to answer grammatical and syntactic questions about Latin and Greek passages. It's designed for use in research and pedagogy, especially in classical language instruction.
|
| 16 |
+
|
| 17 |
+
## Features
|
| 18 |
+
- Supports Syntax and Morphology question sets
|
| 19 |
+
- Pulls questions live from shared Google Docs
|
| 20 |
+
- Uses Claude 3, GPT-3.5, and other fallback models via OpenRouter
|
| 21 |
+
- Automatically attributes which model answered each question
|
| 22 |
+
|
| 23 |
+
## How to Run
|
| 24 |
+
1. Clone or download this repo
|
| 25 |
+
2. Install dependencies and launch the app:
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
pip install -r requirements.txt
|
| 29 |
+
python main.py
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
The app will open automatically in your browser with a public Gradio link.
|
| 33 |
+
|
| 34 |
+
## Configuration
|
| 35 |
+
API keys, model priorities, and document URLs can be adjusted in `config.py`.
|
main.py
CHANGED
|
@@ -1,69 +1,67 @@
|
|
| 1 |
-
# main.py
|
| 2 |
-
import gradio as gr
|
| 3 |
-
from gradio.themes import Soft
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
from
|
| 7 |
-
from
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
if
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
app = build_app()
|
| 69 |
-
app.launch()
|
|
|
|
| 1 |
+
# main.py
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from gradio.themes import Soft
|
| 4 |
+
|
| 5 |
+
from question_runner import run_tool
|
| 6 |
+
from config import MODEL_PRIORITY, SYNTAX_DOC_URL, MORPHOLOGY_DOC_URL
|
| 7 |
+
from doc_utils import get_questions_from_doc
|
| 8 |
+
|
| 9 |
+
# --- ZERO-GPU ENTRYPOINT (must be top-level and referenced by Gradio) ---
|
| 10 |
+
def run_query(passage: str, doc_type: str):
|
| 11 |
+
# Delegate to your existing business logic
|
| 12 |
+
return run_tool(passage, doc_type)
|
| 13 |
+
|
| 14 |
+
# Estimate runtime based on # of questions
|
| 15 |
+
def estimate_runtime(passage, doc_type):
|
| 16 |
+
if not passage or not doc_type:
|
| 17 |
+
return ""
|
| 18 |
+
doc_url = SYNTAX_DOC_URL if doc_type.lower() == "syntax" else MORPHOLOGY_DOC_URL
|
| 19 |
+
questions = get_questions_from_doc(doc_url)
|
| 20 |
+
if not questions or (isinstance(questions, list) and questions and str(questions[0]).startswith("Error")):
|
| 21 |
+
return "Unable to load questions."
|
| 22 |
+
est_seconds = round(len(questions) * 2.5, 1)
|
| 23 |
+
return f"Estimated generation time: ~{est_seconds} seconds"
|
| 24 |
+
|
| 25 |
+
def build_app():
|
| 26 |
+
with gr.Blocks(theme=Soft()) as demo:
|
| 27 |
+
gr.Markdown("""
|
| 28 |
+
## **Classical Language Query Assistant**
|
| 29 |
+
Submit a Latin or Greek passage and select the question type.
|
| 30 |
+
Answers are generated using a rotating chain of hosted AI models via OpenRouter.
|
| 31 |
+
""")
|
| 32 |
+
|
| 33 |
+
with gr.Row():
|
| 34 |
+
passage_input = gr.Textbox(label="Latin or Greek Passage", lines=4)
|
| 35 |
+
question_type = gr.Radio(["Syntax", "Morphology"], label="Question Type")
|
| 36 |
+
|
| 37 |
+
top_model = MODEL_PRIORITY[0]
|
| 38 |
+
full_model_list = "\n".join(f"- `{m}`" for m in MODEL_PRIORITY)
|
| 39 |
+
gr.Markdown(f"""
|
| 40 |
+
**Currently prioritized model:** `{top_model}`
|
| 41 |
+
**Model fallback chain (if needed):**
|
| 42 |
+
{full_model_list}
|
| 43 |
+
""")
|
| 44 |
+
|
| 45 |
+
with gr.Row():
|
| 46 |
+
output_text = gr.Textbox(label="Generated Answers", lines=25, interactive=False)
|
| 47 |
+
output_file = gr.File(label="Download Answers (.txt)", interactive=False)
|
| 48 |
+
|
| 49 |
+
estimated_time_box = gr.Textbox(label="Estimated Time", interactive=False)
|
| 50 |
+
|
| 51 |
+
passage_input.change(fn=estimate_runtime, inputs=[passage_input, question_type], outputs=estimated_time_box)
|
| 52 |
+
question_type.change(fn=estimate_runtime, inputs=[passage_input, question_type], outputs=estimated_time_box)
|
| 53 |
+
|
| 54 |
+
submit_button = gr.Button("Generate Answers")
|
| 55 |
+
|
| 56 |
+
# IMPORTANT: point Gradio at the decorated function
|
| 57 |
+
submit_button.click(
|
| 58 |
+
fn=run_query,
|
| 59 |
+
inputs=[passage_input, question_type],
|
| 60 |
+
outputs=[output_text, output_file, estimated_time_box],
|
| 61 |
+
)
|
| 62 |
+
return demo
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
# On Spaces: no share=True, no webbrowser.open, no infinite loop
|
| 66 |
+
app = build_app()
|
| 67 |
+
app.launch()
|
|
|
|
|
|
question_runner.py
CHANGED
|
@@ -4,7 +4,10 @@ import tempfile
|
|
| 4 |
from router_client import query_model
|
| 5 |
from doc_utils import get_questions_from_doc
|
| 6 |
from config import SYNTAX_DOC_URL, MORPHOLOGY_DOC_URL
|
|
|
|
| 7 |
|
|
|
|
|
|
|
| 8 |
def run_tool(passage, doc_type):
|
| 9 |
if not passage.strip():
|
| 10 |
return "Please enter a passage to analyze.", None, None
|
|
|
|
| 4 |
from router_client import query_model
|
| 5 |
from doc_utils import get_questions_from_doc
|
| 6 |
from config import SYNTAX_DOC_URL, MORPHOLOGY_DOC_URL
|
| 7 |
+
import spaces
|
| 8 |
|
| 9 |
+
# Apply decorator to the function that does the actual inference
|
| 10 |
+
@spaces.GPU(duration=120) # Adjust duration based on your needs (in seconds)
|
| 11 |
def run_tool(passage, doc_type):
|
| 12 |
if not passage.strip():
|
| 13 |
return "Please enter a passage to analyze.", None, None
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
-
gradio>=4.44.1
|
| 2 |
-
spaces
|
| 3 |
-
requests>=2.31.0
|
| 4 |
-
--extra-index-url https://download.pytorch.org/whl/cu113
|
| 5 |
torch
|
|
|
|
| 1 |
+
gradio>=4.44.1
|
| 2 |
+
spaces
|
| 3 |
+
requests>=2.31.0
|
| 4 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
| 5 |
torch
|