Spaces:
Sleeping
Sleeping
| import os | |
| from dotenv import load_dotenv | |
| import gradio as gr | |
| from daggr import FnNode, Graph | |
| from huggingface_hub import InferenceClient, get_token | |
| # Load environment variables | |
| load_dotenv() | |
| # Startup Check | |
| token = os.environ.get("HF_TOKEN") | |
| if token: | |
| print(f"✅ STARTUP: HF_TOKEN loaded successfully.") | |
| else: | |
| print("❌ STARTUP: HF_TOKEN NOT FOUND in environment or .env file.") | |
| # Helper function to query any model with Fallback | |
| # NOTE: Switched to 7B models (Safe Free Tier) to avoid "Inference Providers" billing. | |
| def query_model_with_fallback(prompt, primary_model_id="Qwen/Qwen2.5-7B-Instruct", fallback_model_id="mistralai/Mistral-7B-Instruct-v0.3"): | |
| try: | |
| final_token = os.environ.get("HF_TOKEN") or get_token() | |
| if not final_token: | |
| return "Error: No HF_TOKEN found. Check .env file." | |
| # Try Primary | |
| client = InferenceClient(primary_model_id, token=final_token) | |
| messages = [{"role": "user", "content": prompt}] | |
| response = client.chat_completion(messages, max_tokens=800) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| print(f"WARNING: Primary model {primary_model_id} failed: {e}") | |
| try: | |
| # Try Fallback | |
| client = InferenceClient(fallback_model_id, token=final_token) | |
| messages = [{"role": "user", "content": prompt}] | |
| response = client.chat_completion(messages, max_tokens=800) | |
| return response.choices[0].message.content | |
| except Exception as e2: | |
| return f"Error: All models failed. {e2}" | |
| # --- 10 "CHANGE MAKING" AGENTS --- | |
| def query_agent_unsexy(ctx): | |
| p = f"""You are the 'Unsexy Question' expert. Strategy: Address unsexy topics (like parking) where low-hanging fruit exists. Make ideas palatable to all politics. Maintain strict message discipline. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_public(ctx): | |
| p = f"""You are the 'Public Character' expert. Strategy: Be present, public, and helpful. Offer small services (directions, advice, lending items) to unrelated people. Be a 'warm body' in public space. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_nucleation(ctx): | |
| p = f"""You are the 'Social Nucleation' expert. Strategy: Use 'unreasonable attentiveness' to create social 'nucleation sites'. create slightly uneven experiences or 'furrows' where people are forced to bond (like a chaotic event). | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_onion(ctx): | |
| p = f"""You are the 'Onion Merchant' expert. Strategy: Find a niche thing people really want and provide it honestly. Do an honest day's work. Be the middle ground between greedy capitalism and violent revolution. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_broker(ctx): | |
| p = f"""You are the 'Honest Broker' expert. Strategy: Enter a 'skeevy' or underserved industry (like immigration law, home repair, local news) and be the one honest, competent person there. Be a middle-class mensch. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_statistic(ctx): | |
| p = f"""You are the 'Statistic Improver' expert. Strategy: Find a dubious viral statistic (from older/bad studies) and do the work to improve its precision. Render a public service by fact-checking and refining data. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_hobbit(ctx): | |
| p = f"""You are the 'Hobbit' expert. Strategy: Practice 'hobbitian courage'. Don't be a martyr. Take small risks (deviate from a checklist, question a default) to improve a system without destroying your life. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_system(ctx): | |
| p = f"""You are the 'System Fixer' expert. Strategy: 'De-gum the gears' of bureaucracy. Find a sub-system that terrorizes people (like confusing fines or hospital bills) and make it work better. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_audience(ctx): | |
| p = f"""You are the 'Good Audience' expert. Strategy: Show up. Laugh at jokes. Support creators. Pluck diamonds from the rough and share them. Be the audience that great work requires. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| def query_agent_acquaintance(ctx): | |
| p = f"""You are the 'Acquaintance' expert. Strategy: Don't try to be best friends. Just be a 'Good Acquaintance'. Know neighbors' names so you can notice when something is wrong (like a gas leak). Build weak ties. | |
| User's Community Context: "{ctx}" | |
| Task: Suggest ONE specific, actionable way this user can apply your strategy to improve their community.""" | |
| return query_model_with_fallback(p) | |
| # --- NODES & GRAPH --- | |
| # 1. Input Node | |
| def pass_context(context_text): | |
| if not context_text.strip(): | |
| context_text = "I live in a typical town and want to make a difference." | |
| gr.Info(f"Brainstorming changes for: {context_text[:30]}...") | |
| return context_text | |
| input_node = FnNode( | |
| fn=pass_context, | |
| name="1. Context Definition", | |
| inputs={ | |
| "context_text": gr.Textbox( | |
| label="My Community / Situation", | |
| value="I am a student living in a college town.", | |
| lines=2, | |
| placeholder="Describe your context..." | |
| ) | |
| }, | |
| outputs={"ctx": gr.Textbox(visible=False)} | |
| ) | |
| # 2. Parallel Agents | |
| # Each takes 'ctx' from input_node | |
| # We make specific textboxes visible so the user can see the 10 distinct ideas. | |
| agents = [ | |
| FnNode(fn=query_agent_unsexy, name="Unsexy Question", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="1. Unsexy Question Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_public, name="Public Character", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="2. Public Character Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_nucleation, name="Social Nucleation", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="3. Social Nucleation Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_onion, name="Onion Merchant", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="4. Onion Merchant Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_broker, name="Honest Broker", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="5. Honest Broker Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_statistic, name="Statistic Improver", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="6. Statistic Improver Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_hobbit, name="The Hobbit", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="7. The Hobbit Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_system, name="System Fixer", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="8. System Fixer Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_audience, name="Good Audience", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="9. Good Audience Idea", lines=4, visible=True)}), | |
| FnNode(fn=query_agent_acquaintance, name="Good Acquaintance", inputs={"ctx": input_node.ctx}, outputs={"r": gr.Textbox(label="10. Good Acquaintance Idea", lines=4, visible=True)}), | |
| ] | |
| # 3. Helper to aggregate | |
| def aggregate_and_select(*responses): | |
| # responses is a tuple of 10 strings | |
| gr.Info("All experts reported. Compiling list...") | |
| final_output = "# 10 Ideas for Change\n\n" | |
| titles = [ | |
| "Unsexy Question", "Public Character", "Social Nucleation", "Onion Merchant", | |
| "Honest Broker", "Statistic Improver", "The Hobbit", "System Fixer", | |
| "Good Audience", "Good Acquaintance" | |
| ] | |
| for i, r in enumerate(responses): | |
| final_output += f"### {i+1}. {titles[i]}\n{r}\n\n---\n\n" | |
| return final_output | |
| # 4. Aggregator Node | |
| # This node takes outputs from ALL agents | |
| aggregator_inputs = {f"r{i}": agent.r for i, agent in enumerate(agents)} | |
| # Wrapper function to unpack kwargs since daggr passes them as named args match the dict keys? | |
| # Actually daggr passes arguments based on the inputs dict mapped to function args. | |
| # We need to define the function with 10 arguments. | |
| def aggregator_wrapper(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9): | |
| return aggregate_and_select(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9) | |
| output_node = FnNode( | |
| fn=aggregator_wrapper, | |
| name="3. Aggregator", | |
| inputs=aggregator_inputs, # Mapping r0 -> agent0.r, etc. | |
| outputs={"final_output": gr.Markdown(label="All 10 Community Ideas")} | |
| ) | |
| # Launch | |
| graph = Graph( | |
| name="Community Change Maker (Based on '10 Ways to Change the World')", | |
| nodes=[input_node] + agents + [output_node], | |
| ) | |
| if __name__ == "__main__": | |
| graph.launch() | |