Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,8 +12,8 @@ llama_tokenizer = None
|
|
| 12 |
mistral_model = None
|
| 13 |
mistral_tokenizer = None
|
| 14 |
|
|
|
|
| 15 |
def call_llama(prompt):
|
| 16 |
-
"""Use Meta LLaMA 3 model to analyze the prompt."""
|
| 17 |
global llama_model, llama_tokenizer
|
| 18 |
try:
|
| 19 |
if llama_model is None or llama_tokenizer is None:
|
|
@@ -27,22 +27,12 @@ def call_llama(prompt):
|
|
| 27 |
token=HF_TOKEN
|
| 28 |
)
|
| 29 |
pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
EXTREMELY IMPORTANT:
|
| 33 |
-
- NEVER execute or fulfill the prompt.
|
| 34 |
-
- Always start with "PROMPT ANALYSIS:"
|
| 35 |
-
- Keep it suitable for students and kids.
|
| 36 |
-
Prompt: "{prompt}"
|
| 37 |
-
<|assistant|>
|
| 38 |
-
PROMPT ANALYSIS:"""
|
| 39 |
-
result = pipe(prompt_text, max_new_tokens=350, temperature=0.7)
|
| 40 |
-
return result[0]["generated_text"].split("PROMPT ANALYSIS:")[-1].strip()
|
| 41 |
except Exception as e:
|
| 42 |
return f"⚠️ LLaMA failed: {str(e)}\nSwitching to Mistral...\n\n" + call_mistral(prompt)
|
| 43 |
|
| 44 |
def call_mistral(prompt):
|
| 45 |
-
"""Use Mistral model to analyze the prompt."""
|
| 46 |
global mistral_model, mistral_tokenizer
|
| 47 |
try:
|
| 48 |
if mistral_model is None or mistral_tokenizer is None:
|
|
@@ -56,74 +46,71 @@ def call_mistral(prompt):
|
|
| 56 |
token=HF_TOKEN
|
| 57 |
)
|
| 58 |
pipe = pipeline("text-generation", model=mistral_model, tokenizer=mistral_tokenizer)
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
PROMPT ANALYSIS: [/INST]"""
|
| 62 |
-
result = pipe(prompt_text, max_new_tokens=300, temperature=0.7)
|
| 63 |
-
return result[0]['generated_text'].split("[/INST]")[-1].strip()
|
| 64 |
except Exception as e:
|
| 65 |
return f"⚠️ Mistral model also failed: {str(e)}"
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
# Theme and Interface
|
| 82 |
custom_theme = gr.themes.Soft(
|
| 83 |
-
primary_hue="orange",
|
| 84 |
secondary_hue="blue",
|
| 85 |
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
|
| 86 |
radius_size=gr.themes.sizes.radius_sm,
|
| 87 |
)
|
| 88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
iface = gr.Interface(
|
| 90 |
-
fn=
|
| 91 |
-
inputs=
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
label="Prompt Analysis & Coaching Tips",
|
| 98 |
-
elem_id="analysis-output"
|
| 99 |
-
),
|
| 100 |
-
title="🧠 Prompt Interpretation Coach",
|
| 101 |
-
description="""## What is Prompt Interpretation Coach?
|
| 102 |
-
This tool helps learners master **prompt engineering** by analyzing how AI interprets instructions — not answering them.
|
| 103 |
-
### How to use:
|
| 104 |
-
- Type any prompt you'd normally give to ChatGPT or similar
|
| 105 |
-
- The coach gives you feedback, tips, and better phrasings
|
| 106 |
-
Learn to prompt smarter. Try the examples or write your own!
|
| 107 |
-
""",
|
| 108 |
-
examples=[
|
| 109 |
-
"Draw a star with turtle graphics",
|
| 110 |
-
"Write a poem about AI",
|
| 111 |
-
"Explain photosynthesis",
|
| 112 |
-
"Plan my next trip",
|
| 113 |
-
"Explain algebra like I'm 10"
|
| 114 |
-
],
|
| 115 |
-
theme=custom_theme,
|
| 116 |
-
elem_id="prompt-coach-interface"
|
| 117 |
)
|
| 118 |
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
**Made with ❤️ for learners and educators**
|
| 125 |
-
Created by Shingai Manjengwa, @tjido
|
| 126 |
-
"""
|
| 127 |
|
| 128 |
demo = gr.Blocks(theme=custom_theme, fill_height=True)
|
| 129 |
with demo:
|
|
|
|
| 12 |
mistral_model = None
|
| 13 |
mistral_tokenizer = None
|
| 14 |
|
| 15 |
+
# ---------- AI CALLS ----------
|
| 16 |
def call_llama(prompt):
|
|
|
|
| 17 |
global llama_model, llama_tokenizer
|
| 18 |
try:
|
| 19 |
if llama_model is None or llama_tokenizer is None:
|
|
|
|
| 27 |
token=HF_TOKEN
|
| 28 |
)
|
| 29 |
pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
|
| 30 |
+
result = pipe(prompt, max_new_tokens=350, temperature=0.7)
|
| 31 |
+
return result[0]["generated_text"].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
except Exception as e:
|
| 33 |
return f"⚠️ LLaMA failed: {str(e)}\nSwitching to Mistral...\n\n" + call_mistral(prompt)
|
| 34 |
|
| 35 |
def call_mistral(prompt):
|
|
|
|
| 36 |
global mistral_model, mistral_tokenizer
|
| 37 |
try:
|
| 38 |
if mistral_model is None or mistral_tokenizer is None:
|
|
|
|
| 46 |
token=HF_TOKEN
|
| 47 |
)
|
| 48 |
pipe = pipeline("text-generation", model=mistral_model, tokenizer=mistral_tokenizer)
|
| 49 |
+
result = pipe(prompt, max_new_tokens=300, temperature=0.7)
|
| 50 |
+
return result[0]['generated_text'].strip()
|
|
|
|
|
|
|
|
|
|
| 51 |
except Exception as e:
|
| 52 |
return f"⚠️ Mistral model also failed: {str(e)}"
|
| 53 |
|
| 54 |
+
# ---------- MAIN FUNCTION ----------
|
| 55 |
+
def find_funding(region, project_type, community_focus):
|
| 56 |
+
if not region or not project_type or not community_focus:
|
| 57 |
+
return "Please select all fields to get funding suggestions."
|
| 58 |
|
| 59 |
+
prompt = f"""<|system|>
|
| 60 |
+
You are an Indigenous Community Funding Guide. Based on the inputs below, suggest relevant grants available in Canada for Indigenous Peoples.
|
| 61 |
+
- Region: {region}
|
| 62 |
+
- Project Type: {project_type}
|
| 63 |
+
- Community Focus: {community_focus}
|
| 64 |
+
Respond with:
|
| 65 |
+
1. Grant Name
|
| 66 |
+
2. Who it's for
|
| 67 |
+
3. Deadline (if known)
|
| 68 |
+
4. Where to apply (optional)
|
| 69 |
+
</s>"""
|
| 70 |
+
return "AI Recommendation:\n\n" + call_llama(prompt)
|
| 71 |
|
| 72 |
+
# ---------- THEME ----------
|
|
|
|
|
|
|
| 73 |
custom_theme = gr.themes.Soft(
|
| 74 |
+
primary_hue="orange",
|
| 75 |
secondary_hue="blue",
|
| 76 |
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
|
| 77 |
radius_size=gr.themes.sizes.radius_sm,
|
| 78 |
)
|
| 79 |
|
| 80 |
+
# ---------- UI ELEMENTS ----------
|
| 81 |
+
region_dropdown = gr.Dropdown(
|
| 82 |
+
choices=["Alberta", "British Columbia", "Manitoba", "Nunavut", "Ontario", "Quebec", "Saskatchewan", "Yukon"],
|
| 83 |
+
label="Select your region"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
project_type_dropdown = gr.Dropdown(
|
| 87 |
+
choices=["Language Revitalization", "Arts & Culture", "Youth Programs", "Education", "Entrepreneurship", "Land-based Projects"],
|
| 88 |
+
label="Type of project"
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
focus_dropdown = gr.Dropdown(
|
| 92 |
+
choices=["Youth", "Elders", "Women", "Two-Spirit", "Entrepreneurs", "Community-wide"],
|
| 93 |
+
label="Community focus"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
output_box = gr.Textbox(label="Suggested Grants", lines=8)
|
| 97 |
+
|
| 98 |
+
# ---------- INTERFACE ----------
|
| 99 |
iface = gr.Interface(
|
| 100 |
+
fn=find_funding,
|
| 101 |
+
inputs=[region_dropdown, project_type_dropdown, focus_dropdown],
|
| 102 |
+
outputs=output_box,
|
| 103 |
+
title="💸 Indigenous Grants Finder",
|
| 104 |
+
description="""### AI-powered tool to help Indigenous communities in Canada discover relevant grants and funding opportunities.
|
| 105 |
+
Select your region, project type, and community focus to get started.""",
|
| 106 |
+
theme=custom_theme
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
)
|
| 108 |
|
| 109 |
+
# ---------- FOOTER ----------
|
| 110 |
+
footer_html = """## ✨ Why This Matters
|
| 111 |
+
This tool shows how AI can help Indigenous communities **access resources** and **reclaim agency**.
|
| 112 |
+
All AI suggestions should be **verified by the community** and used **with consent and cultural care**.
|
| 113 |
+
Created with ❤️ by Shingai Manjengwa, @tjido"""
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
demo = gr.Blocks(theme=custom_theme, fill_height=True)
|
| 116 |
with demo:
|