Spaces:
Sleeping
Sleeping
Sandra Sanchez commited on
Commit ·
ff5fe0d
1
Parent(s): 2b758fd
Implement tool to translate and adapt sotry to language and culture, work on gradio design to use rows. Rewrite titles
Browse files- README.md +65 -6
- app.py +37 -49
- mcp_server/server.py +22 -0
README.md
CHANGED
|
@@ -1,14 +1,73 @@
|
|
| 1 |
---
|
| 2 |
title: Comfortool
|
| 3 |
emoji: 🐨
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 6.0.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
-
|
| 11 |
-
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: Comfortool
|
| 3 |
emoji: 🐨
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
|
|
|
| 7 |
app_file: app.py
|
| 8 |
pinned: false
|
| 9 |
+
tags:
|
| 10 |
+
- building-mcp-track-consumer
|
| 11 |
+
- building-mcp-track-creative
|
| 12 |
---
|
| 13 |
|
| 14 |
+
# Comfortool
|
| 15 |
+
|
| 16 |
+
**Autism-friendly social stories with calming illustrations**
|
| 17 |
+
|
| 18 |
+
Comfortool is an MCP server and Gradio application designed to generate personalized social stories and calming illustrations for children with autism. The project leverages LLMs and generative AI to help children understand and navigate everyday scenarios in a supportive, accessible way.
|
| 19 |
+
|
| 20 |
+
## Features
|
| 21 |
+
|
| 22 |
+
- Select a scenario and generate a simple, autism-friendly social story.
|
| 23 |
+
- Generate a calming illustration for each story.
|
| 24 |
+
- MCP tool integration (e.g., echo tool for testing).
|
| 25 |
+
- Extensible architecture for future tools.
|
| 26 |
+
|
| 27 |
+
## Future Directions
|
| 28 |
+
|
| 29 |
+
Here are some ideas for future MCP tools and features to add more value:
|
| 30 |
+
|
| 31 |
+
- **Personalized adaptation by age and aesthetics:**
|
| 32 |
+
Adapt both the story and illustration to the child's age, interests, and preferred visual style.
|
| 33 |
+
|
| 34 |
+
- **Emotional feedback tool:**
|
| 35 |
+
Allow the child or adult to indicate how they feel about the story or situation, and adapt the content to provide personalized emotional support.
|
| 36 |
+
|
| 37 |
+
- **Visual summary tool:**
|
| 38 |
+
Generate a simplified version of the story as pictograms or comics for children with reading difficulties.
|
| 39 |
+
|
| 40 |
+
- **Q&A tool:**
|
| 41 |
+
Enable the child or adult to ask questions about the story or situation, and receive simple, supportive answers.
|
| 42 |
+
|
| 43 |
+
- **Personalized routines tool:**
|
| 44 |
+
Create stories and guides for daily routines (getting up, going to school, etc.) tailored to the child's needs.
|
| 45 |
+
|
| 46 |
+
- **Translation and cultural adaptation tool:**
|
| 47 |
+
Translate the story and adapt names, places, and customs to the child's language and culture.
|
| 48 |
+
|
| 49 |
+
- **Gamification tool:**
|
| 50 |
+
Add small challenges, rewards, or games related to the story to increase motivation and learning.
|
| 51 |
+
|
| 52 |
+
- **Progress tracking tool:**
|
| 53 |
+
Save stories, record reactions, and suggest new stories based on the child's progress.
|
| 54 |
+
|
| 55 |
+
## Track
|
| 56 |
+
|
| 57 |
+
This project participates in **Track 1: Building MCP**
|
| 58 |
+
Category: **Consumer MCP Servers**
|
| 59 |
+
Tag: `building-mcp-track-consumer`
|
| 60 |
+
|
| 61 |
+
## How to run locally
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
uv run app.py
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
## How to deploy
|
| 68 |
+
|
| 69 |
+
Push your code to Hugging Face Spaces and configure your secrets (e.g., `OPENAI_API_KEY`).
|
| 70 |
+
|
| 71 |
+
---
|
| 72 |
+
|
| 73 |
+
Check out the [Spaces configuration reference](https://huggingface.co/docs/hub/spaces-config-reference) for more details.
|
app.py
CHANGED
|
@@ -32,7 +32,7 @@ def load_scenarios():
|
|
| 32 |
return [p.stem for p in TEMPLATES_DIR.glob("*.json")]
|
| 33 |
|
| 34 |
|
| 35 |
-
def generate_story(scenario_name: str):
|
| 36 |
filepath = TEMPLATES_DIR / f"{scenario_name}.json"
|
| 37 |
if not filepath.exists():
|
| 38 |
return "Template not found."
|
|
@@ -70,30 +70,24 @@ def generate_image(scenario_name: str):
|
|
| 70 |
# Retorna la imagen como objeto PIL
|
| 71 |
image = Image.open(io.BytesIO(image_bytes))
|
| 72 |
return image
|
| 73 |
-
# # Save image to generated images directory
|
| 74 |
-
# timestamp = int(time.time())
|
| 75 |
-
# image_filename = f"{scenario_name}_{timestamp}.png"
|
| 76 |
-
# image_path = GENERATED_IMAGES_DIR / image_filename
|
| 77 |
-
# with open(image_path, "wb") as f:
|
| 78 |
-
# f.write(image_bytes)
|
| 79 |
-
# img_src = str(image_path)
|
| 80 |
-
# return img_src
|
| 81 |
|
| 82 |
|
| 83 |
def show_selected(scenario_name):
|
| 84 |
return f"You selected: {scenario_name}"
|
| 85 |
|
| 86 |
|
| 87 |
-
async def
|
| 88 |
server = await create_mcp_server()
|
| 89 |
-
# Simula contexto vacío
|
| 90 |
context = {}
|
| 91 |
-
result = await server.request_handlers["
|
| 92 |
-
return result["
|
| 93 |
|
| 94 |
-
def
|
| 95 |
-
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
|
| 99 |
def main():
|
|
@@ -102,50 +96,44 @@ def main():
|
|
| 102 |
print("Scenarios loaded:", scenarios)
|
| 103 |
|
| 104 |
with gr.Blocks() as demo:
|
| 105 |
-
gr.Markdown("# 🧸 Comfortool\n###
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
| 124 |
story = generate_story(scenario_name)
|
| 125 |
-
print("Story generated:", story)
|
|
|
|
|
|
|
|
|
|
| 126 |
image = generate_image(scenario_name)
|
| 127 |
-
return
|
| 128 |
-
|
| 129 |
-
def on_echo(text):
|
| 130 |
-
print("Calling echo tool with:", text)
|
| 131 |
-
return show_echo(text)
|
| 132 |
|
| 133 |
generate_btn.click(
|
| 134 |
fn=on_generate,
|
| 135 |
-
inputs=dropdown,
|
| 136 |
outputs=[story_out, image_out]
|
| 137 |
)
|
| 138 |
|
| 139 |
-
echo_btn.click(
|
| 140 |
-
fn=on_echo,
|
| 141 |
-
inputs=echo_input,
|
| 142 |
-
outputs=echo_output
|
| 143 |
-
)
|
| 144 |
-
|
| 145 |
print("Gradio app initialized.")
|
| 146 |
return demo
|
| 147 |
|
| 148 |
-
|
| 149 |
if __name__ == "__main__":
|
| 150 |
demo = main()
|
| 151 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 32 |
return [p.stem for p in TEMPLATES_DIR.glob("*.json")]
|
| 33 |
|
| 34 |
|
| 35 |
+
def generate_story(scenario_name: str, language="en", culture="default"):
|
| 36 |
filepath = TEMPLATES_DIR / f"{scenario_name}.json"
|
| 37 |
if not filepath.exists():
|
| 38 |
return "Template not found."
|
|
|
|
| 70 |
# Retorna la imagen como objeto PIL
|
| 71 |
image = Image.open(io.BytesIO(image_bytes))
|
| 72 |
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
|
| 75 |
def show_selected(scenario_name):
|
| 76 |
return f"You selected: {scenario_name}"
|
| 77 |
|
| 78 |
|
| 79 |
+
async def call_translate_and_adapt_tool(story, language, culture):
|
| 80 |
server = await create_mcp_server()
|
|
|
|
| 81 |
context = {}
|
| 82 |
+
result = await server.request_handlers["translate_and_adapt"](context, story, language, culture)
|
| 83 |
+
return result["adapted_story"]
|
| 84 |
|
| 85 |
+
def show_translation(story, language, culture):
|
| 86 |
+
return asyncio.run(call_translate_and_adapt_tool(story, language, culture))
|
| 87 |
+
|
| 88 |
+
def format_story(story):
|
| 89 |
+
# Texto grande y claro, sin asteriscos
|
| 90 |
+
return f"<div style='font-size:1.5em; line-height:1.6; font-weight:400'>{story}</div>"
|
| 91 |
|
| 92 |
|
| 93 |
def main():
|
|
|
|
| 96 |
print("Scenarios loaded:", scenarios)
|
| 97 |
|
| 98 |
with gr.Blocks() as demo:
|
| 99 |
+
gr.Markdown("# 🧸 Comfortool\n### Social stories with comforting illustrations to support autistic kids with daily challenges")
|
| 100 |
+
|
| 101 |
+
# Inputs arriba: escenario, idioma y cultura
|
| 102 |
+
with gr.Row():
|
| 103 |
+
dropdown = gr.Dropdown(
|
| 104 |
+
choices=scenarios,
|
| 105 |
+
label="Select a challenging situation"
|
| 106 |
+
)
|
| 107 |
+
language_input = gr.Textbox(label="Translate to: (e.g., 'es', 'fr')", value="en")
|
| 108 |
+
culture_input = gr.Textbox(label="My community (e.g., 'Latino', 'Roma', 'Muslim')", value="default")
|
| 109 |
+
|
| 110 |
+
generate_btn = gr.Button("Help me cope!")
|
| 111 |
+
|
| 112 |
+
# Resultados en dos columnas
|
| 113 |
+
with gr.Row():
|
| 114 |
+
story_out = gr.HTML(label="Story")
|
| 115 |
+
image_out = gr.Image(label="Illustration")
|
| 116 |
+
|
| 117 |
+
def on_generate(scenario_name, language, culture):
|
| 118 |
+
print("Generating story for:", scenario_name, language, culture)
|
| 119 |
+
# 1. Genera la historia en inglés
|
| 120 |
story = generate_story(scenario_name)
|
| 121 |
+
print("Story generated (EN):", story)
|
| 122 |
+
# 2. Traduce y adapta usando la tool MCP
|
| 123 |
+
adapted_story = show_translation(story, language, culture)
|
| 124 |
+
print("Story adapted:", adapted_story)
|
| 125 |
image = generate_image(scenario_name)
|
| 126 |
+
return format_story(adapted_story), image
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
generate_btn.click(
|
| 129 |
fn=on_generate,
|
| 130 |
+
inputs=[dropdown, language_input, culture_input],
|
| 131 |
outputs=[story_out, image_out]
|
| 132 |
)
|
| 133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
print("Gradio app initialized.")
|
| 135 |
return demo
|
| 136 |
|
|
|
|
| 137 |
if __name__ == "__main__":
|
| 138 |
demo = main()
|
| 139 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
mcp_server/server.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
| 1 |
# mcp_server/server.py
|
| 2 |
from mcp.server import Server
|
| 3 |
from mcp.server.stdio import stdio_server
|
|
|
|
|
|
|
| 4 |
|
| 5 |
async def create_mcp_server():
|
| 6 |
server = Server(
|
|
@@ -20,6 +22,26 @@ async def create_mcp_server():
|
|
| 20 |
|
| 21 |
server.request_handlers["echo"] = echo
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
return server
|
| 24 |
|
| 25 |
|
|
|
|
| 1 |
# mcp_server/server.py
|
| 2 |
from mcp.server import Server
|
| 3 |
from mcp.server.stdio import stdio_server
|
| 4 |
+
import os
|
| 5 |
+
from openai import OpenAI
|
| 6 |
|
| 7 |
async def create_mcp_server():
|
| 8 |
server = Server(
|
|
|
|
| 22 |
|
| 23 |
server.request_handlers["echo"] = echo
|
| 24 |
|
| 25 |
+
# Tool: Translate and culturally adapt a story
|
| 26 |
+
async def translate_and_adapt(context, story, language="en", culture="default"):
|
| 27 |
+
# Puedes mejorar el prompt según tus necesidades
|
| 28 |
+
prompt = (
|
| 29 |
+
f"Translate the following social story to {language} and adapt names, places, and customs to {culture} culture. "
|
| 30 |
+
"Return only the adapted story text.\n\n"
|
| 31 |
+
f"Story:\n{story}"
|
| 32 |
+
)
|
| 33 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
| 34 |
+
client = OpenAI(api_key=openai_api_key)
|
| 35 |
+
response = client.chat.completions.create(
|
| 36 |
+
model="gpt-4o-mini",
|
| 37 |
+
messages=[{"role": "user", "content": prompt}]
|
| 38 |
+
)
|
| 39 |
+
adapted_story = response.choices[0].message.content.strip()
|
| 40 |
+
return {"adapted_story": adapted_story}
|
| 41 |
+
|
| 42 |
+
server.request_handlers["translate_and_adapt"] = translate_and_adapt
|
| 43 |
+
|
| 44 |
+
|
| 45 |
return server
|
| 46 |
|
| 47 |
|