File size: 3,024 Bytes
7ff406f
a3d1c69
76e7c7e
a3d1c69
 
e39741f
a3d1c69
e39741f
 
a3d1c69
 
 
 
227533b
e39741f
66af289
227533b
 
 
 
 
e39741f
227533b
 
 
 
e39741f
a3d1c69
 
 
 
 
 
 
 
e39741f
227533b
a3d1c69
e39741f
a3d1c69
 
e39741f
a3d1c69
 
 
e373130
 
 
227533b
 
e373130
 
 
 
 
 
 
227533b
e373130
a3d1c69
227533b
e39741f
 
a3d1c69
e39741f
a3d1c69
 
e39741f
a3d1c69
7473b97
 
5e65f7a
e39741f
a3d1c69
e39741f
 
 
 
 
 
227533b
e373130
 
 
a3d1c69
e39741f
227533b
 
e39741f
227533b
e39741f
 
227533b
e39741f
 
 
 
 
a3d1c69
39774cc
227533b
e39741f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import os
import gradio as gr
import json
from huggingface_hub import InferenceClient

# Load your Hugging Face token from environment variable
HF_TOKEN = os.getenv("HF_API_TOKEN")

# Initialize the inference client
client = InferenceClient(
    model="deepseek-ai/DeepSeek-V3",
    token=HF_TOKEN
)

# Load Quran words and language list
with open("words.json", encoding="utf-8") as f:
    word_list = json.load(f)

with open("language_list.json", encoding="utf-8") as f:
    language_list = json.load(f)

# Prepare dropdown options
word_options = [f"{word['text']} ({word['english']})" for word in word_list]
language_options = [f"{lang['name']} ({lang['code']})" for lang in language_list]


def create_messages(word_entry, language_name):
    return [
        {
            "role": "system",
            "content": "You are a helpful and friendly assistant that explains Quranic words in a simple way.",
        },
        {
            "role": "user",
            "content": f"""
Explain the Quranic word "{word_entry['text']}" (which means "{word_entry['english']}") in {language_name}.

Please include:
1. Translation in {language_name}
2. Root word and derivatives
3. Occurrences in the Qur'an (Surah & Verse)
4. Explanation of each occurrence using easy-to-understand {language_name}
""",
        },
    ]
    
# Keep a global/local cache (dict) to store responses
response_cache = {}

def process(word_label, lang_label):
    cache_key = (word_label, lang_label)

    # βœ… Return cached response directly if exists
    if cache_key in response_cache:
        yield response_cache[cache_key]
        return

    selected_word = next((w for w in word_list if w['text'] in word_label), None)
    language_name = lang_label.split("(")[1].strip() if "(" in lang_label else lang_label.strip()

    if not selected_word:
        yield "❌ Word not found."
        return

    messages = create_messages(selected_word, language_name)

    try:
        stream = client.chat.completions.create(
            messages=messages,
            temperature=0.7,
            top_p=0.9,
            max_tokens=1024,
            stream=True
        )

        output = ""
        for chunk in stream:
            if chunk.choices and chunk.choices[0].delta.content:
                output += chunk.choices[0].delta.content
                yield output

        # βœ… Store the final output in cache
        response_cache[cache_key] = output

    except Exception as e:
        yield f"❌ Error: {e}"


# Build Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("## πŸ“– Quran Word Explorer (with DeepSeek-V3) β€” Streaming Enabled")

    with gr.Row():
        word_input = gr.Dropdown(choices=word_options, label="πŸ”€ Select Quranic Word")
        lang_input = gr.Dropdown(choices=language_options, label="🌐 Select Language")

    run_btn = gr.Button("πŸ” Get Explanation")
    output = gr.Textbox(label="πŸ“˜ Output", lines=20)

    run_btn.click(fn=process, inputs=[word_input, lang_input], outputs=output)

demo.launch()