Jayashree Sridhar commited on
Commit
c537b15
·
1 Parent(s): 068c1e8

Modified tools code to import Crew AI BaseTool

Browse files
agents/tools/_init_.py CHANGED
@@ -3,129 +3,141 @@ Agent Tools module for Personal Coach CrewAI Application
3
  Contains specialized tools for each agent's functionality.
4
  Supports modular class-based tool containers.
5
  """
6
- from typing import TYPE_CHECKING, Dict, Any
7
 
8
- # Version info
9
- __version__ = "1.0.0"
10
 
11
- # Lazy imports for type checking and IDE intellisense
12
- if TYPE_CHECKING:
13
- from .voice_tools import VoiceTools
14
- from .llm_tools import LLMTools
15
- from .knowledge_tools import KnowledgeTools
16
- from .validation_tools import ValidationTools
17
 
18
- # Public API: expose only main class-based containers & API
19
- __all__ = [
20
- # Tool containers
21
- "VoiceTools",
22
- "LLMTools",
23
- "KnowledgeTools",
24
- "ValidationTools",
25
- # Factory and utility functions
26
- "create_tool_suite",
27
- "get_tool_by_name",
28
- "register_tools_with_crew",
29
- # Constants
30
- "SUPPORTED_LANGUAGES",
31
- "TOOL_CATEGORIES"
32
- ]
 
 
 
 
 
 
 
33
 
34
- # Constants
35
- SUPPORTED_LANGUAGES = [
36
- "en", "es", "fr", "de", "it", "pt", "ru", "zh",
37
- "ja", "ko", "hi", "ar", "bn", "pa", "te", "mr",
38
- "ta", "ur", "gu", "kn", "ml", "or"
39
- ]
40
 
41
- TOOL_CATEGORIES = {
42
- "VOICE": ["speech_to_text", "text_to_speech", "language_detection"],
43
- "LLM": ["generate_response", "generate_questions", "summarize", "paraphrase"],
44
- "KNOWLEDGE": ["search_knowledge", "extract_wisdom", "find_practices"],
45
- "VALIDATION": ["validate_response", "check_safety", "analyze_tone"]
46
- }
47
 
48
- # Factory: unified tool suite
49
- def create_tool_suite(config) -> Dict[str, Any]:
50
- """
51
- Create a complete suite of tools for all agents.
52
 
53
- Args:
54
- config: Configuration object
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- Returns:
57
- dict: Dictionary of initialized tool containers
58
- """
59
- from .voice_tools import VoiceTools
60
- from .llm_tools import LLMTools
61
- from .knowledge_tools import KnowledgeTools
62
- from .validation_tools import ValidationTools
63
- return {
64
- "voice": VoiceTools(config),
65
- "llm": LLMTools(config),
66
- "knowledge": KnowledgeTools(config),
67
- "validation": ValidationTools(config)
68
- }
69
 
70
- def get_tool_by_name(tool_name: str, config):
71
- """
72
- Get a specific tool container by name.
73
 
74
- Args:
75
- tool_name: Name of the tool container ('voice', 'llm', 'knowledge', 'validation')
76
- config: Configuration object
 
 
 
 
 
 
 
 
 
 
77
 
78
- Returns:
79
- Tool container class instance or None
80
- """
81
- tool_mapping = {
82
- "voice": lambda c: __import__("agents.tools.voice_tools", fromlist=["VoiceTools"]).VoiceTools(c),
83
- "llm": lambda c: __import__("agents.tools.llm_tools", fromlist=["LLMTools"]).LLMTools(c),
84
- "knowledge": lambda c: __import__("agents.tools.knowledge_tools", fromlist=["KnowledgeTools"]).KnowledgeTools(c),
85
- "validation": lambda c: __import__("agents.tools.validation_tools", fromlist=["ValidationTools"]).ValidationTools(c),
86
- }
87
- tool_factory = tool_mapping.get(tool_name.lower())
88
- if tool_factory:
89
- return tool_factory(config)
90
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- # Tool registry for CrewAI (for UI/metadata/documentation)
93
- def register_tools_with_crew():
94
- """
95
- Register all tools with CrewAI framework.
96
- Returns a list of tool configurations for CrewAI.
97
- """
98
- return [
99
- {
100
- "name": "speech_to_text",
101
- "description": "Convert speech in any language to text",
102
- "category": "VOICE"
103
- },
104
- {
105
- "name": "text_to_speech",
106
- "description": "Convert text to natural speech in multiple languages",
107
- "category": "VOICE"
108
- },
109
- {
110
- "name": "search_knowledge",
111
- "description": "Search through spiritual and self-help texts",
112
- "category": "KNOWLEDGE"
113
- },
114
- {
115
- "name": "generate_response",
116
- "description": "Generate empathetic and helpful responses",
117
- "category": "LLM"
118
- },
119
- {
120
- "name": "validate_response",
121
- "description": "Ensure response safety and appropriateness",
122
- "category": "VALIDATION"
123
- }
124
- ]
125
 
126
- # Initialization check for debug mode
127
- import os
128
- if os.getenv("DEBUG_MODE", "false").lower() == "true":
129
- print(f"Agent Tools module v{__version__} initialized")
130
- print(f"Supported languages: {len(SUPPORTED_LANGUAGES)}")
131
- print(f"Tool categories: {list(TOOL_CATEGORIES.keys())}")
 
 
 
 
 
 
3
  Contains specialized tools for each agent's functionality.
4
  Supports modular class-based tool containers.
5
  """
6
+ # from typing import TYPE_CHECKING, Dict, Any
7
 
8
+ # # Version info
9
+ # __version__ = "1.0.0"
10
 
11
+ # # Lazy imports for type checking and IDE intellisense
12
+ # if TYPE_CHECKING:
13
+ # from .voice_tools import VoiceTools
14
+ # from .llm_tools import LLMTools
15
+ # from .knowledge_tools import KnowledgeTools
16
+ # from .validation_tools import ValidationTools
17
 
18
+ # # Public API: expose only main class-based containers & API
19
+ # __all__ = [
20
+ # # Tool containers
21
+ # "VoiceTools",
22
+ # "LLMTools",
23
+ # "KnowledgeTools",
24
+ # "ValidationTools",
25
+ # # Factory and utility functions
26
+ # "create_tool_suite",
27
+ # "get_tool_by_name",
28
+ # "register_tools_with_crew",
29
+ # # Constants
30
+ # "SUPPORTED_LANGUAGES",
31
+ # "TOOL_CATEGORIES"
32
+ # ]
33
+
34
+ # # Constants
35
+ # SUPPORTED_LANGUAGES = [
36
+ # "en", "es", "fr", "de", "it", "pt", "ru", "zh",
37
+ # "ja", "ko", "hi", "ar", "bn", "pa", "te", "mr",
38
+ # "ta", "ur", "gu", "kn", "ml", "or"
39
+ # ]
40
 
41
+ # TOOL_CATEGORIES = {
42
+ # "VOICE": ["speech_to_text", "text_to_speech", "language_detection"],
43
+ # "LLM": ["generate_response", "generate_questions", "summarize", "paraphrase"],
44
+ # "KNOWLEDGE": ["search_knowledge", "extract_wisdom", "find_practices"],
45
+ # "VALIDATION": ["validate_response", "check_safety", "analyze_tone"]
46
+ # }
47
 
48
+ # # Factory: unified tool suite
49
+ # def create_tool_suite(config) -> Dict[str, Any]:
50
+ # """
51
+ # Create a complete suite of tools for all agents.
 
 
52
 
53
+ # Args:
54
+ # config: Configuration object
 
 
55
 
56
+ # Returns:
57
+ # dict: Dictionary of initialized tool containers
58
+ # """
59
+ # from .voice_tools import VoiceTools
60
+ # from .llm_tools import LLMTools
61
+ # from .knowledge_tools import KnowledgeTools
62
+ # from .validation_tools import ValidationTools
63
+ # return {
64
+ # "voice": VoiceTools(config),
65
+ # "llm": LLMTools(config),
66
+ # "knowledge": KnowledgeTools(config),
67
+ # "validation": ValidationTools(config)
68
+ # }
69
 
70
+ # def get_tool_by_name(tool_name: str, config):
71
+ # """
72
+ # Get a specific tool container by name.
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # Args:
75
+ # tool_name: Name of the tool container ('voice', 'llm', 'knowledge', 'validation')
76
+ # config: Configuration object
77
 
78
+ # Returns:
79
+ # Tool container class instance or None
80
+ # """
81
+ # tool_mapping = {
82
+ # "voice": lambda c: __import__("agents.tools.voice_tools", fromlist=["VoiceTools"]).VoiceTools(c),
83
+ # "llm": lambda c: __import__("agents.tools.llm_tools", fromlist=["LLMTools"]).LLMTools(c),
84
+ # "knowledge": lambda c: __import__("agents.tools.knowledge_tools", fromlist=["KnowledgeTools"]).KnowledgeTools(c),
85
+ # "validation": lambda c: __import__("agents.tools.validation_tools", fromlist=["ValidationTools"]).ValidationTools(c),
86
+ # }
87
+ # tool_factory = tool_mapping.get(tool_name.lower())
88
+ # if tool_factory:
89
+ # return tool_factory(config)
90
+ # return None
91
 
92
+ # # Tool registry for CrewAI (for UI/metadata/documentation)
93
+ # def register_tools_with_crew():
94
+ # """
95
+ # Register all tools with CrewAI framework.
96
+ # Returns a list of tool configurations for CrewAI.
97
+ # """
98
+ # return [
99
+ # {
100
+ # "name": "speech_to_text",
101
+ # "description": "Convert speech in any language to text",
102
+ # "category": "VOICE"
103
+ # },
104
+ # {
105
+ # "name": "text_to_speech",
106
+ # "description": "Convert text to natural speech in multiple languages",
107
+ # "category": "VOICE"
108
+ # },
109
+ # {
110
+ # "name": "search_knowledge",
111
+ # "description": "Search through spiritual and self-help texts",
112
+ # "category": "KNOWLEDGE"
113
+ # },
114
+ # {
115
+ # "name": "generate_response",
116
+ # "description": "Generate empathetic and helpful responses",
117
+ # "category": "LLM"
118
+ # },
119
+ # {
120
+ # "name": "validate_response",
121
+ # "description": "Ensure response safety and appropriateness",
122
+ # "category": "VALIDATION"
123
+ # }
124
+ # ]
125
 
126
+ # # Initialization check for debug mode
127
+ # import os
128
+ # if os.getenv("DEBUG_MODE", "false").lower() == "true":
129
+ # print(f"Agent Tools module v{__version__} initialized")
130
+ # print(f"Supported languages: {len(SUPPORTED_LANGUAGES)}")
131
+ # print(f"Tool categories: {list(TOOL_CATEGORIES.keys())}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
+ from .voice_tools import VoiceTools
134
+ from .llm_tools import LLMTools
135
+ from .knowledge_tools import KnowledgeTools
136
+ from .validation_tools import ValidationTools
137
+
138
+ __all__ = [
139
+ "VoiceTools",
140
+ "LLMTools",
141
+ "KnowledgeTools",
142
+ "ValidationTools"
143
+ ]
agents/tools/base_tool.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class BaseTool:
2
+ """Minimal tool base for CrewAI/agents/crewai LangChain, can be adapted as needed."""
3
+ def __init__(self, config=None):
4
+ self.config = config
5
+ def __call__(self, *args, **kwargs):
6
+ raise NotImplementedError("Tool must implement __call__ method.")
agents/tools/knowledge_tools.py CHANGED
@@ -1,34 +1,135 @@
1
  """
2
  Knowledge Base Tools for RAG (modular class version)
3
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from utils.knowledge_base import KnowledgeBase
5
 
6
- class KnowledgeTools:
7
  def __init__(self, config=None):
8
- self.config = config
9
- self.kb = KnowledgeBase(self.config)
10
-
11
- def search_knowledge(self, query: str, k: int = 5):
12
- """Search spiritual and self-help texts for relevant wisdom."""
13
  if not self.kb.is_initialized():
14
- return [{
15
- "text": "Wisdom comes from understanding ourselves.",
16
- "source": "General Wisdom",
17
- "score": 1.0
18
- }]
19
  return self.kb.search(query, k=k)
20
 
21
- def extract_wisdom(self, search_results: list, user_context: dict):
22
- """Extract most relevant wisdom for user's situation."""
 
 
 
23
  emotion = user_context.get("primary_emotion", "neutral")
24
  concerns = user_context.get("concerns", [])
25
  scored_results = []
26
  for result in search_results:
27
  score = result["score"]
28
- # Boost score if emotion matches
29
  if emotion.lower() in result["text"].lower():
30
  score *= 1.5
31
- # Boost score if concerns match
32
  for concern in concerns:
33
  if concern.lower() in result["text"].lower():
34
  score *= 1.3
@@ -37,8 +138,11 @@ class KnowledgeTools:
37
  scored_results.sort(key=lambda x: x["relevance_score"], reverse=True)
38
  return scored_results[:3]
39
 
40
- def suggest_practices(self, emotional_state: str, cultural_context: str = None):
41
- """Suggest appropriate meditation or practice."""
 
 
 
42
  practices = {
43
  "anxiety": {
44
  "name": "Box Breathing Technique",
@@ -87,8 +191,10 @@ class KnowledgeTools:
87
  "duration": "15-20 minutes",
88
  "origin": "Dr. Edmund Jacobson, 1920s"
89
  }
 
90
  }
91
- default = {
 
92
  "name": "Mindful Breathing",
93
  "description": "Foundation of all meditation practices",
94
  "steps": [
@@ -101,5 +207,10 @@ class KnowledgeTools:
101
  "benefits": "Calms mind, improves focus",
102
  "duration": "5-15 minutes",
103
  "origin": "Universal practice"
104
- }
105
- return practices.get(emotional_state.lower(), default)
 
 
 
 
 
 
1
  """
2
  Knowledge Base Tools for RAG (modular class version)
3
  """
4
+ # from utils.knowledge_base import KnowledgeBase
5
+
6
+ # class KnowledgeTools:
7
+ # def __init__(self, config=None):
8
+ # self.config = config
9
+ # self.kb = KnowledgeBase(self.config)
10
+
11
+ # def search_knowledge(self, query: str, k: int = 5):
12
+ # """Search spiritual and self-help texts for relevant wisdom."""
13
+ # if not self.kb.is_initialized():
14
+ # return [{
15
+ # "text": "Wisdom comes from understanding ourselves.",
16
+ # "source": "General Wisdom",
17
+ # "score": 1.0
18
+ # }]
19
+ # return self.kb.search(query, k=k)
20
+
21
+ # def extract_wisdom(self, search_results: list, user_context: dict):
22
+ # """Extract most relevant wisdom for user's situation."""
23
+ # emotion = user_context.get("primary_emotion", "neutral")
24
+ # concerns = user_context.get("concerns", [])
25
+ # scored_results = []
26
+ # for result in search_results:
27
+ # score = result["score"]
28
+ # # Boost score if emotion matches
29
+ # if emotion.lower() in result["text"].lower():
30
+ # score *= 1.5
31
+ # # Boost score if concerns match
32
+ # for concern in concerns:
33
+ # if concern.lower() in result["text"].lower():
34
+ # score *= 1.3
35
+ # result["relevance_score"] = score
36
+ # scored_results.append(result)
37
+ # scored_results.sort(key=lambda x: x["relevance_score"], reverse=True)
38
+ # return scored_results[:3]
39
+
40
+ # def suggest_practices(self, emotional_state: str, cultural_context: str = None):
41
+ # """Suggest appropriate meditation or practice."""
42
+ # practices = {
43
+ # "anxiety": {
44
+ # "name": "Box Breathing Technique",
45
+ # "description": "A powerful technique used by Navy SEALs to calm anxiety",
46
+ # "steps": [
47
+ # "Sit comfortably with back straight",
48
+ # "Exhale all air from your lungs",
49
+ # "Inhale through nose for 4 counts",
50
+ # "Hold breath for 4 counts",
51
+ # "Exhale through mouth for 4 counts",
52
+ # "Hold empty for 4 counts",
53
+ # "Repeat 4-8 times"
54
+ # ],
55
+ # "benefits": "Activates parasympathetic nervous system, reduces cortisol",
56
+ # "duration": "5-10 minutes",
57
+ # "origin": "Modern breathwork"
58
+ # },
59
+ # "sadness": {
60
+ # "name": "Metta (Loving-Kindness) Meditation",
61
+ # "description": "Ancient Buddhist practice to cultivate compassion",
62
+ # "steps": [
63
+ # "Sit comfortably, close your eyes",
64
+ # "Place hand on heart",
65
+ # "Begin with self: 'May I be happy, may I be peaceful'",
66
+ # "Extend to loved ones",
67
+ # "Include neutral people",
68
+ # "Embrace difficult people",
69
+ # "Radiate to all beings"
70
+ # ],
71
+ # "benefits": "Increases self-compassion, reduces depression",
72
+ # "duration": "15-20 minutes",
73
+ # "origin": "Buddhist tradition"
74
+ # },
75
+ # "stress": {
76
+ # "name": "Progressive Muscle Relaxation",
77
+ # "description": "Systematic tension and release technique",
78
+ # "steps": [
79
+ # "Lie down comfortably",
80
+ # "Start with toes - tense for 5 seconds",
81
+ # "Release suddenly, notice relaxation",
82
+ # "Move up through each muscle group",
83
+ # "Face and scalp last",
84
+ # "Rest in full body relaxation"
85
+ # ],
86
+ # "benefits": "Reduces physical tension, improves sleep",
87
+ # "duration": "15-20 minutes",
88
+ # "origin": "Dr. Edmund Jacobson, 1920s"
89
+ # }
90
+ # }
91
+ # default = {
92
+ # "name": "Mindful Breathing",
93
+ # "description": "Foundation of all meditation practices",
94
+ # "steps": [
95
+ # "Sit comfortably",
96
+ # "Follow natural breath",
97
+ # "Count breaths 1-10",
98
+ # "Start again when distracted",
99
+ # "No judgment, just awareness"
100
+ # ],
101
+ # "benefits": "Calms mind, improves focus",
102
+ # "duration": "5-15 minutes",
103
+ # "origin": "Universal practice"
104
+ # }
105
+ # return practices.get(emotional_state.lower(), default)
106
+
107
+ from .base_tool import BaseTool
108
  from utils.knowledge_base import KnowledgeBase
109
 
110
+ class SearchKnowledgeTool(BaseTool):
111
  def __init__(self, config=None):
112
+ super().__init__(config)
113
+ self.kb = KnowledgeBase(config)
114
+ def __call__(self, query: str, k: int = 5):
 
 
115
  if not self.kb.is_initialized():
116
+ return [
117
+ {"text": "Wisdom comes from understanding ourselves.", "source": "General Wisdom", "score": 1.0}
118
+ ]
 
 
119
  return self.kb.search(query, k=k)
120
 
121
+ class ExtractWisdomTool(BaseTool):
122
+ def __init__(self, config=None):
123
+ super().__init__(config)
124
+ def __call__(self, search_results: list, user_context: dict):
125
+ # ...Use your logic from extract_wisdom...
126
  emotion = user_context.get("primary_emotion", "neutral")
127
  concerns = user_context.get("concerns", [])
128
  scored_results = []
129
  for result in search_results:
130
  score = result["score"]
 
131
  if emotion.lower() in result["text"].lower():
132
  score *= 1.5
 
133
  for concern in concerns:
134
  if concern.lower() in result["text"].lower():
135
  score *= 1.3
 
138
  scored_results.sort(key=lambda x: x["relevance_score"], reverse=True)
139
  return scored_results[:3]
140
 
141
+ class SuggestPracticesTool(BaseTool):
142
+ def __init__(self, config=None):
143
+ super().__init__(config)
144
+ def __call__(self, emotional_state: str, cultural_context: str = None):
145
+ # ... your original logic ...
146
  practices = {
147
  "anxiety": {
148
  "name": "Box Breathing Technique",
 
191
  "duration": "15-20 minutes",
192
  "origin": "Dr. Edmund Jacobson, 1920s"
193
  }
194
+ # Fill out as in your original
195
  }
196
+ # default omitted for brevity
197
+ return practices.get(emotional_state.lower(), {
198
  "name": "Mindful Breathing",
199
  "description": "Foundation of all meditation practices",
200
  "steps": [
 
207
  "benefits": "Calms mind, improves focus",
208
  "duration": "5-15 minutes",
209
  "origin": "Universal practice"
210
+ })
211
+
212
+ class KnowledgeTools:
213
+ def __init__(self, config=None):
214
+ self.search_knowledge = SearchKnowledgeTool(config)
215
+ self.extract_wisdom = ExtractWisdomTool(config)
216
+ self.suggest_practices = SuggestPracticesTool(config)
agents/tools/llm_tools.py CHANGED
@@ -1,28 +1,78 @@
1
- """
2
- Mistral LLM Tools for CrewAI (modular class version)
3
- """
4
- #from models.mistral_model import MistralModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from models.tinygpt2_model import TinyGPT2Model
6
 
7
- class LLMTools:
8
  def __init__(self, config=None):
9
- self.config = config
10
- self.model =TinyGPT2Model()
11
-
12
- def mistral_chat(self, prompt: str, context: dict = None) -> str:
13
- """Chat with Mistral AI for intelligent responses."""
14
  if context:
15
- full_prompt = f"""
16
- Context: {context}
17
- User Query: {prompt}
18
- Provide a thoughtful, compassionate response.
19
- """
20
  else:
21
  full_prompt = prompt
22
  return self.model.generate(full_prompt)
23
 
24
- def generate_advice(self, user_analysis: dict, wisdom_quotes: list) -> str:
25
- """Generate personalized advice based on user's situation."""
 
 
 
26
  prompt = f"""
27
  Based on this user analysis:
28
  - Emotional state: {user_analysis.get('primary_emotion')}
@@ -40,8 +90,11 @@ class LLMTools:
40
  """
41
  return self.model.generate(prompt, max_length=500)
42
 
43
- def summarize_conversation(self, conversation: list) -> str:
44
- """Summarize conversation maintaining key insights."""
 
 
 
45
  prompt = f"""
46
  Summarize this coaching conversation:
47
  {conversation}
@@ -52,4 +105,11 @@ class LLMTools:
52
  4. Next steps suggested
53
  Keep it concise but meaningful.
54
  """
55
- return self.model.generate(prompt, max_length=200)
 
 
 
 
 
 
 
 
1
+ # """
2
+ # Mistral LLM Tools for CrewAI (modular class version)
3
+ # """
4
+ # #from models.mistral_model import MistralModel
5
+ # from models.tinygpt2_model import TinyGPT2Model
6
+
7
+ # class LLMTools:
8
+ # def __init__(self, config=None):
9
+ # self.config = config
10
+ # self.model =TinyGPT2Model()
11
+
12
+ # def mistral_chat(self, prompt: str, context: dict = None) -> str:
13
+ # """Chat with Mistral AI for intelligent responses."""
14
+ # if context:
15
+ # full_prompt = f"""
16
+ # Context: {context}
17
+ # User Query: {prompt}
18
+ # Provide a thoughtful, compassionate response.
19
+ # """
20
+ # else:
21
+ # full_prompt = prompt
22
+ # return self.model.generate(full_prompt)
23
+
24
+ # def generate_advice(self, user_analysis: dict, wisdom_quotes: list) -> str:
25
+ # """Generate personalized advice based on user's situation."""
26
+ # prompt = f"""
27
+ # Based on this user analysis:
28
+ # - Emotional state: {user_analysis.get('primary_emotion')}
29
+ # - Concerns: {user_analysis.get('concerns')}
30
+ # - Needs: {user_analysis.get('needs')}
31
+ # And these relevant wisdom quotes:
32
+ # {wisdom_quotes}
33
+ # Generate compassionate, personalized advice that:
34
+ # 1. Acknowledges their feelings
35
+ # 2. Offers practical guidance
36
+ # 3. Includes relevant wisdom
37
+ # 4. Suggests actionable steps
38
+ # 5. Maintains hope and encouragement
39
+ # Be specific to their situation, not generic.
40
+ # """
41
+ # return self.model.generate(prompt, max_length=500)
42
+
43
+ # def summarize_conversation(self, conversation: list) -> str:
44
+ # """Summarize conversation maintaining key insights."""
45
+ # prompt = f"""
46
+ # Summarize this coaching conversation:
47
+ # {conversation}
48
+ # Include:
49
+ # 1. Main concerns discussed
50
+ # 2. Key insights shared
51
+ # 3. Progress made
52
+ # 4. Next steps suggested
53
+ # Keep it concise but meaningful.
54
+ # """
55
+ # return self.model.generate(prompt, max_length=200)
56
+
57
+ from .base_tool import BaseTool
58
  from models.tinygpt2_model import TinyGPT2Model
59
 
60
+ class MistralChatTool(BaseTool):
61
  def __init__(self, config=None):
62
+ super().__init__(config)
63
+ self.model = TinyGPT2Model()
64
+ def __call__(self, prompt: str, context: dict = None):
 
 
65
  if context:
66
+ full_prompt = f"Context: {context}\nUser Query: {prompt}\nProvide a thoughtful, compassionate response."
 
 
 
 
67
  else:
68
  full_prompt = prompt
69
  return self.model.generate(full_prompt)
70
 
71
+ class GenerateAdviceTool(BaseTool):
72
+ def __init__(self, config=None):
73
+ super().__init__(config)
74
+ self.model = TinyGPT2Model()
75
+ def __call__(self, user_analysis: dict, wisdom_quotes: list):
76
  prompt = f"""
77
  Based on this user analysis:
78
  - Emotional state: {user_analysis.get('primary_emotion')}
 
90
  """
91
  return self.model.generate(prompt, max_length=500)
92
 
93
+ class SummarizeConversationTool(BaseTool):
94
+ def __init__(self, config=None):
95
+ super().__init__(config)
96
+ self.model = TinyGPT2Model()
97
+ def __call__(self, conversation: list):
98
  prompt = f"""
99
  Summarize this coaching conversation:
100
  {conversation}
 
105
  4. Next steps suggested
106
  Keep it concise but meaningful.
107
  """
108
+ return self.model.generate(prompt, max_length=200)
109
+
110
+ class LLMTools:
111
+ def __init__(self, config=None):
112
+ self.mistral_chat = MistralChatTool(config)
113
+ self.generate_advice = GenerateAdviceTool(config)
114
+ self.summarize_conversation = SummarizeConversationTool(config)
115
+
agents/tools/validation_tools.py CHANGED
@@ -9,7 +9,7 @@ import json
9
  from transformers import pipeline
10
  import torch
11
 
12
- @dataclass
13
  class ValidationResult:
14
  """Result of validation check"""
15
  is_valid: bool
@@ -19,13 +19,406 @@ class ValidationResult:
19
  confidence: float
20
  refined_text: Optional[str] = None
21
 
22
- class ValidationTools:
23
- """Tools for validating responses and ensuring safety"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- def __init__(self, config):
26
- self.config = config
 
 
27
 
28
- # Initialize sentiment analyzer for tone checking
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  self.sentiment_analyzer = pipeline(
30
  "sentiment-analysis",
31
  model="nlptown/bert-base-multilingual-uncased-sentiment",
 
9
  from transformers import pipeline
10
  import torch
11
 
12
+ # @dataclass
13
  class ValidationResult:
14
  """Result of validation check"""
15
  is_valid: bool
 
19
  confidence: float
20
  refined_text: Optional[str] = None
21
 
22
+ # class ValidationTools:
23
+ # """Tools for validating responses and ensuring safety"""
24
+
25
+ # def __init__(self, config):
26
+ # self.config = config
27
+
28
+ # # Initialize sentiment analyzer for tone checking
29
+ # self.sentiment_analyzer = pipeline(
30
+ # "sentiment-analysis",
31
+ # model="nlptown/bert-base-multilingual-uncased-sentiment",
32
+ # device=0 if torch.cuda.is_available() else -1
33
+ # )
34
+
35
+ # # Prohibited patterns for different categories
36
+ # self.prohibited_patterns = {
37
+ # 'medical': [
38
+ # r'\b(?:diagnos|prescrib|medicat|cure|treat|therap)\w*\b',
39
+ # r'\b(?:disease|illness|disorder|syndrome)\s+(?:is|are|can be)\b',
40
+ # r'\b(?:take|consume|dose|dosage)\s+\d+\s*(?:mg|ml|pill|tablet)',
41
+ # r'\b(?:medical|clinical|physician|doctor)\s+(?:advice|consultation|opinion)',
42
+ # ],
43
+ # 'legal': [
44
+ # r'\b(?:legal advice|lawsuit|sue|court|litigation)\b',
45
+ # r'\b(?:illegal|unlawful|crime|criminal|prosecut)\w*\b',
46
+ # r'\b(?:you should|must|have to)\s+(?:sign|agree|consent|contract)',
47
+ # r'\b(?:rights|obligations|liability|damages)\s+(?:are|include)\b',
48
+ # ],
49
+ # 'financial': [
50
+ # r'\b(?:invest|buy|sell|trade)\s+(?:stock|crypto|bitcoin|forex)\b',
51
+ # r'\b(?:guaranteed|promise)\s+(?:return|profit|income|earnings)\b',
52
+ # r'\b(?:financial advisor|investment advice|trading strategy)\b',
53
+ # r'\b(?:tax|accounting|financial planning)\s+(?:advice|consultation)',
54
+ # ],
55
+ # 'harmful': [
56
+ # r'\b(?:suicide|suicidal|kill\s+(?:your|my)self|end\s+(?:it|life))\b',
57
+ # r'\b(?:self[\-\s]?harm|hurt\s+(?:your|my)self|cutting)\b',
58
+ # r'\b(?:violence|violent|weapon|attack|assault)\b',
59
+ # r'\b(?:hate|discriminat|racist|sexist|homophobic)\b',
60
+ # ],
61
+ # 'absolute': [
62
+ # r'\b(?:always|never|every|all|none|no one|everyone)\s+(?:will|must|should|is|are)\b',
63
+ # r'\b(?:definitely|certainly|guaranteed|assured|promise)\b',
64
+ # r'\b(?:only way|only solution|must do|have to)\b',
65
+ # ]
66
+ # }
67
+
68
+ # # Required elements for supportive responses
69
+ # self.supportive_elements = {
70
+ # 'empathy': [
71
+ # 'understand', 'hear', 'feel', 'acknowledge', 'recognize',
72
+ # 'appreciate', 'empathize', 'relate', 'comprehend'
73
+ # ],
74
+ # 'validation': [
75
+ # 'valid', 'normal', 'understandable', 'natural', 'okay',
76
+ # 'reasonable', 'makes sense', 'legitimate'
77
+ # ],
78
+ # 'support': [
79
+ # 'support', 'help', 'here for you', 'together', 'alongside',
80
+ # 'assist', 'guide', 'accompany', 'with you'
81
+ # ],
82
+ # 'hope': [
83
+ # 'can', 'possible', 'able', 'capable', 'potential',
84
+ # 'opportunity', 'growth', 'improve', 'better', 'progress'
85
+ # ],
86
+ # 'empowerment': [
87
+ # 'choice', 'decide', 'control', 'power', 'strength',
88
+ # 'agency', 'capable', 'resource', 'ability'
89
+ # ]
90
+ # }
91
+
92
+ # # Crisis indicators
93
+ # self.crisis_indicators = [
94
+ # r'\b(?:want|going|plan)\s+to\s+(?:die|kill|end)\b',
95
+ # r'\b(?:no reason|point|hope)\s+(?:to|in)\s+(?:live|living|life)\b',
96
+ # r'\b(?:better off|world)\s+without\s+me\b',
97
+ # r'\bsuicide\s+(?:plan|method|attempt)\b',
98
+ # r'\b(?:final|last)\s+(?:goodbye|letter|message)\b'
99
+ # ]
100
+
101
+ # # Tone indicators
102
+ # self.negative_tone_words = [
103
+ # 'stupid', 'idiot', 'dumb', 'pathetic', 'worthless',
104
+ # 'loser', 'failure', 'weak', 'incompetent', 'useless'
105
+ # ]
106
+
107
+ # self.dismissive_phrases = [
108
+ # 'just get over it', 'stop complaining', 'not a big deal',
109
+ # 'being dramatic', 'overreacting', 'too sensitive'
110
+ # ]
111
+
112
+ # def validate_response(self, response: str, context: Dict[str, Any] = None) -> ValidationResult:
113
+ # """Comprehensive validation of response"""
114
+ # issues = []
115
+ # warnings = []
116
+ # suggestions = []
117
+
118
+ # # Check for prohibited content
119
+ # prohibited_check = self._check_prohibited_content(response)
120
+ # if prohibited_check["found"]:
121
+ # issues.extend(prohibited_check["violations"])
122
+ # suggestions.extend(prohibited_check["suggestions"])
123
+
124
+ # # Check tone and sentiment
125
+ # tone_check = self._check_tone(response)
126
+ # if not tone_check["appropriate"]:
127
+ # warnings.extend(tone_check["issues"])
128
+ # suggestions.extend(tone_check["suggestions"])
129
+
130
+ # # Check for supportive elements
131
+ # support_check = self._check_supportive_elements(response)
132
+ # if support_check["missing"]:
133
+ # warnings.append(f"Missing supportive elements: {', '.join(support_check['missing'])}")
134
+ # suggestions.extend(support_check["suggestions"])
135
+
136
+ # # Check for crisis content in context
137
+ # if context and context.get("user_input"):
138
+ # crisis_check = self._check_crisis_indicators(context["user_input"])
139
+ # if crisis_check["is_crisis"] and "crisis" not in response.lower():
140
+ # warnings.append("User may be in crisis but response doesn't address this")
141
+ # suggestions.append("Include crisis resources and immediate support options")
142
+
143
+ # # Calculate overall confidence
144
+ # confidence = self._calculate_confidence(issues, warnings)
145
+
146
+ # # Generate refined response if needed
147
+ # refined_text = None
148
+ # if issues or (warnings and confidence < 0.7):
149
+ # refined_text = self._refine_response(response, issues, warnings, suggestions)
150
+
151
+ # return ValidationResult(
152
+ # is_valid=len(issues) == 0,
153
+ # issues=issues,
154
+ # warnings=warnings,
155
+ # suggestions=suggestions,
156
+ # confidence=confidence,
157
+ # refined_text=refined_text
158
+ # )
159
+
160
+ # def _check_prohibited_content(self, text: str) -> Dict[str, Any]:
161
+ # """Check for prohibited content patterns"""
162
+ # found_violations = []
163
+ # suggestions = []
164
+
165
+ # for category, patterns in self.prohibited_patterns.items():
166
+ # for pattern in patterns:
167
+ # if re.search(pattern, text, re.IGNORECASE):
168
+ # found_violations.append(f"Contains {category} advice/content")
169
+
170
+ # # Add specific suggestions
171
+ # if category == "medical":
172
+ # suggestions.append("Replace with: 'Consider speaking with a healthcare professional'")
173
+ # elif category == "legal":
174
+ # suggestions.append("Replace with: 'For legal matters, consult with a qualified attorney'")
175
+ # elif category == "financial":
176
+ # suggestions.append("Replace with: 'For financial decisions, consider consulting a financial advisor'")
177
+ # elif category == "harmful":
178
+ # suggestions.append("Include crisis resources and express immediate concern for safety")
179
+ # elif category == "absolute":
180
+ # suggestions.append("Use qualifying language like 'often', 'might', 'could' instead of absolutes")
181
+ # break
182
+
183
+ # return {
184
+ # "found": len(found_violations) > 0,
185
+ # "violations": found_violations,
186
+ # "suggestions": suggestions
187
+ # }
188
+
189
+ # def _check_tone(self, text: str) -> Dict[str, Any]:
190
+ # """Check the tone and sentiment of the response"""
191
+ # issues = []
192
+ # suggestions = []
193
+
194
+ # # Check sentiment
195
+ # try:
196
+ # sentiment_result = self.sentiment_analyzer(text[:512])[0] # Limit length for model
197
+ # sentiment_score = sentiment_result['score']
198
+ # sentiment_label = sentiment_result['label']
199
+
200
+ # # Check if too negative
201
+ # if '1' in sentiment_label or '2' in sentiment_label: # 1-2 stars = negative
202
+ # issues.append("Response tone is too negative")
203
+ # suggestions.append("Add more supportive and hopeful language")
204
+ # except:
205
+ # pass
206
+
207
+ # # Check for negative words
208
+ # text_lower = text.lower()
209
+ # found_negative = [word for word in self.negative_tone_words if word in text_lower]
210
+ # if found_negative:
211
+ # issues.append(f"Contains negative/judgmental language: {', '.join(found_negative)}")
212
+ # suggestions.append("Replace judgmental terms with supportive language")
213
+
214
+ # # Check for dismissive phrases
215
+ # found_dismissive = [phrase for phrase in self.dismissive_phrases if phrase in text_lower]
216
+ # if found_dismissive:
217
+ # issues.append("Contains dismissive language")
218
+ # suggestions.append("Acknowledge and validate the person's feelings instead")
219
+
220
+ # return {
221
+ # "appropriate": len(issues) == 0,
222
+ # "issues": issues,
223
+ # "suggestions": suggestions
224
+ # }
225
+
226
+ # def _check_supportive_elements(self, text: str) -> Dict[str, Any]:
227
+ # """Check for presence of supportive elements"""
228
+ # text_lower = text.lower()
229
+ # missing_elements = []
230
+ # suggestions = []
231
+
232
+ # element_scores = {}
233
+ # for element, keywords in self.supportive_elements.items():
234
+ # found = any(keyword in text_lower for keyword in keywords)
235
+ # element_scores[element] = found
236
+ # if not found:
237
+ # missing_elements.append(element)
238
+
239
+ # # Generate suggestions for missing elements
240
+ # if 'empathy' in missing_elements:
241
+ # suggestions.append("Add empathetic language like 'I understand how difficult this must be'")
242
+ # if 'validation' in missing_elements:
243
+ # suggestions.append("Validate their feelings with phrases like 'Your feelings are completely valid'")
244
+ # if 'support' in missing_elements:
245
+ # suggestions.append("Express support with 'I'm here to support you through this'")
246
+ # if 'hope' in missing_elements:
247
+ # suggestions.append("Include hopeful elements about growth and positive change")
248
+ # if 'empowerment' in missing_elements:
249
+ # suggestions.append("Emphasize their agency and ability to make choices")
250
+
251
+ # return {
252
+ # "missing": missing_elements,
253
+ # "present": [k for k, v in element_scores.items() if v],
254
+ # "suggestions": suggestions
255
+ # }
256
+
257
+ # def _check_crisis_indicators(self, text: str) -> Dict[str, Any]:
258
+ # """Check for crisis indicators in text"""
259
+ # for pattern in self.crisis_indicators:
260
+ # if re.search(pattern, text, re.IGNORECASE):
261
+ # return {
262
+ # "is_crisis": True,
263
+ # "pattern_matched": pattern,
264
+ # "action": "Immediate crisis response needed"
265
+ # }
266
+
267
+ # return {"is_crisis": False}
268
 
269
+ # def _calculate_confidence(self, issues: List[str], warnings: List[str]) -> float:
270
+ # """Calculate confidence score for validation"""
271
+ # if issues:
272
+ # return 0.3 - (0.1 * len(issues)) # Major issues severely impact confidence
273
 
274
+ # confidence = 1.0
275
+ # confidence -= 0.1 * len(warnings) # Each warning reduces confidence
276
+
277
+ # return max(0.0, confidence)
278
+
279
+ # def _refine_response(self, response: str, issues: List[str], warnings: List[str], suggestions: List[str]) -> str:
280
+ # """Attempt to refine the response based on issues found"""
281
+ # refined = response
282
+
283
+ # # Add disclaimer for professional advice
284
+ # if any('advice' in issue for issue in issues):
285
+ # disclaimer = "\n\n*Please note: I'm here to provide support and guidance, but for specific professional matters, it's important to consult with qualified professionals.*"
286
+ # if disclaimer not in refined:
287
+ # refined += disclaimer
288
+
289
+ # # Add crisis resources if needed
290
+ # if any('crisis' in warning for warning in warnings):
291
+ # crisis_text = "\n\n**If you're in crisis, please reach out for immediate help:**\n- Crisis Hotline: 988 (US)\n- Crisis Text Line: Text HOME to 741741\n- International: findahelpline.com"
292
+ # if crisis_text not in refined:
293
+ # refined += crisis_text
294
+
295
+ # # Add supportive closing if missing hope
296
+ # if any('hope' in warning for warning in warnings):
297
+ # hopeful_closing = "\n\nRemember, you have the strength to navigate this challenge, and positive change is possible. I'm here to support you on this journey."
298
+ # if not any(phrase in refined.lower() for phrase in ['journey', 'strength', 'possible']):
299
+ # refined += hopeful_closing
300
+
301
+ # return refined
302
+
303
+ # def validate_user_input(self, text: str) -> ValidationResult:
304
+ # """Validate user input for safety and process-ability"""
305
+ # issues = []
306
+ # warnings = []
307
+ # suggestions = []
308
+
309
+ # # Check if empty
310
+ # if not text or not text.strip():
311
+ # issues.append("Empty input received")
312
+ # suggestions.append("Please share what's on your mind")
313
+ # return ValidationResult(False, issues, warnings, suggestions, 0.0)
314
+
315
+ # # Check length
316
+ # if len(text) > 5000:
317
+ # warnings.append("Input is very long")
318
+ # suggestions.append("Consider breaking this into smaller parts")
319
+
320
+ # # Check for crisis indicators
321
+ # crisis_check = self._check_crisis_indicators(text)
322
+ # if crisis_check["is_crisis"]:
323
+ # warnings.append("Crisis indicators detected")
324
+ # suggestions.append("Prioritize safety and provide crisis resources")
325
+
326
+ # # Check for spam/repetition
327
+ # if self._is_spam(text):
328
+ # issues.append("Input appears to be spam or repetitive")
329
+ # suggestions.append("Please share genuine thoughts or concerns")
330
+
331
+ # confidence = self._calculate_confidence(issues, warnings)
332
+
333
+ # return ValidationResult(
334
+ # is_valid=len(issues) == 0,
335
+ # issues=issues,
336
+ # warnings=warnings,
337
+ # suggestions=suggestions,
338
+ # confidence=confidence
339
+ # )
340
+
341
+ # def _is_spam(self, text: str) -> bool:
342
+ # """Simple spam detection"""
343
+ # # Check for excessive repetition
344
+ # words = text.lower().split()
345
+ # if len(words) > 10:
346
+ # unique_ratio = len(set(words)) / len(words)
347
+ # if unique_ratio < 0.3: # Less than 30% unique words
348
+ # return True
349
+
350
+ # # Check for common spam patterns
351
+ # spam_patterns = [
352
+ # r'(?:buy|sell|click|visit)\s+(?:now|here|this)',
353
+ # r'(?:congratulations|winner|prize|lottery)',
354
+ # r'(?:viagra|pills|drugs|pharmacy)',
355
+ # r'(?:$$|money\s+back|guarantee)'
356
+ # ]
357
+
358
+ # for pattern in spam_patterns:
359
+ # if re.search(pattern, text, re.IGNORECASE):
360
+ # return True
361
+
362
+ # return False
363
+
364
+ # def get_crisis_resources(self, location: str = "global") -> Dict[str, Any]:
365
+ # """Get crisis resources based on location"""
366
+ # resources = {
367
+ # "global": {
368
+ # "name": "International Association for Suicide Prevention",
369
+ # "url": "https://www.iasp.info/resources/Crisis_Centres/",
370
+ # "text": "Find crisis centers worldwide"
371
+ # },
372
+ # "us": {
373
+ # "name": "988 Suicide & Crisis Lifeline",
374
+ # "phone": "988",
375
+ # "text": "Text HOME to 741741",
376
+ # "url": "https://988lifeline.org/"
377
+ # },
378
+ # "uk": {
379
+ # "name": "Samaritans",
380
+ # "phone": "116 123",
381
+ # "email": "jo@samaritans.org",
382
+ # "url": "https://www.samaritans.org/"
383
+ # },
384
+ # "india": {
385
+ # "name": "National Suicide Prevention Helpline",
386
+ # "phone": "91-9820466726",
387
+ # "additional": "Vandrevala Foundation: 9999666555"
388
+ # },
389
+ # "australia": {
390
+ # "name": "Lifeline",
391
+ # "phone": "13 11 14",
392
+ # "text": "Text 0477 13 11 14",
393
+ # "url": "https://www.lifeline.org.au/"
394
+ # }
395
+ # }
396
+
397
+ # return resources.get(location.lower(), resources["global"])
398
+
399
+ from .base_tool import BaseTool
400
+
401
+ class ValidateResponseTool(BaseTool):
402
+ def __init__(self, config=None):
403
+ super().__init__(config)
404
+ # ... any required initialization ...
405
+ def __call__(self, response: str, context: dict = None):
406
+ # Place your actual validation logic here, include dummy for illustration
407
+ # For full validation logic, use your own code!
408
+ # """Result of validation check"""
409
+ is_valid: bool
410
+ issues: List[str]
411
+ warnings: List[str]
412
+ suggestions: List[str]
413
+ confidence: float
414
+ refined_text: Optional[str] = None
415
+ return {"is_valid", "issues", "warnings", "suggestions","confidence","refined_text"}
416
+
417
+ class ValidationTools:
418
+ def __init__(self, config=None):
419
+ self.validate_response = ValidateResponseTool(config)
420
+ # Add more tools as needed (check_safety, refine_response, etc.)
421
+ # # Initialize sentiment analyzer for tone checking
422
  self.sentiment_analyzer = pipeline(
423
  "sentiment-analysis",
424
  model="nlptown/bert-base-multilingual-uncased-sentiment",
agents/tools/voice_tools.py CHANGED
@@ -1,66 +1,131 @@
1
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import numpy as np
3
- import torch
4
- from transformers import pipeline, AutoProcessor, AutoModelForSpeechSeq2Seq
5
  import asyncio
6
- import soundfile as sf
7
- import tempfile # Added the import for tempfile!
8
- #from models.mistral_model import MistralModel
9
  from models.tinygpt2_model import TinyGPT2Model
 
10
 
 
11
  class MultilingualVoiceProcessor:
12
- def __init__(self, model_name="openai/whisper-base", device=None):
13
- cache_dir = os.getenv("TRANSFORMERS_CACHE", None)
14
- if device is None:
15
- device = 0 if torch.cuda.is_available() else -1
16
-
17
- # Load model and processor with cache_dir
18
- processor = AutoProcessor.from_pretrained(model_name, cache_dir=cache_dir)
19
- model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name, cache_dir=cache_dir)
20
-
21
- # Create the pipeline, DO NOT PASS cache_dir here
22
- self.pipe = pipeline(
23
- "automatic-speech-recognition",
24
- model=model,
25
- tokenizer=processor,
26
- feature_extractor=processor,
27
- device=device,
28
- generate_kwargs={"task": "transcribe", "return_timestamps": False},
29
- )
30
-
31
  async def transcribe(self, audio_data: np.ndarray, language: str = None):
32
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp_wav:
33
- sf.write(tmp_wav.name, audio_data, samplerate=16000)
34
- extra = {"language": language} if language else {}
35
- result = self.pipe(tmp_wav.name, **extra)
36
- text = result['text']
37
- return text, language or "unknown"
38
-
39
- async def synthesize(self, text, language: str = "en", voice_type: str = "normal"):
40
- raise NotImplementedError("Use gTTS or edge-tts as before.")
41
 
42
- class VoiceTools:
 
43
  def __init__(self, config=None):
44
- self.config = config
45
  self.vp = MultilingualVoiceProcessor()
46
-
47
- def transcribe_audio(self, audio_data: np.ndarray, language=None):
48
  text, detected_lang = asyncio.run(self.vp.transcribe(audio_data, language))
49
  return {"text": text, "language": detected_lang}
50
 
51
- def detect_emotion(self, text: str) -> dict:
 
 
 
52
  model = TinyGPT2Model()
53
  prompt = f"""
54
  Analyze the emotional state in this text: "{text}"
55
- Identify:
56
- 1. Primary emotion (joy, sadness, anger, fear, anxiety, confusion, etc.)
57
- 2. Emotional intensity (low, medium, high)
58
- 3. Underlying feelings
59
- 4. Key concerns
60
- Format as JSON with keys: primary_emotion, intensity, feelings, concerns
61
  """
 
62
  response = model.generate(prompt)
63
- # TODO: Actually parse response, dummy return for now:
64
  return {
65
  "primary_emotion": "detected_emotion",
66
  "intensity": "medium",
@@ -68,7 +133,10 @@ class VoiceTools:
68
  "concerns": ["concern1", "concern2"]
69
  }
70
 
71
- def generate_reflective_questions(self, context: dict) -> list:
 
 
 
72
  emotion = context.get("primary_emotion", "neutral")
73
  questions_map = {
74
  "anxiety": [
@@ -91,4 +159,10 @@ class VoiceTools:
91
  "How are you feeling in this moment?",
92
  "What would support look like for you?",
93
  "What's most important to explore right now?"
94
- ])
 
 
 
 
 
 
 
1
+ # import os
2
+ # import numpy as np
3
+ # import torch
4
+ # from transformers import pipeline, AutoProcessor, AutoModelForSpeechSeq2Seq
5
+ # import asyncio
6
+ # import soundfile as sf
7
+ # import tempfile # Added the import for tempfile!
8
+ # #from models.mistral_model import MistralModel
9
+ # from models.tinygpt2_model import TinyGPT2Model
10
+
11
+ # class MultilingualVoiceProcessor:
12
+ # def __init__(self, model_name="openai/whisper-base", device=None):
13
+ # cache_dir = os.getenv("TRANSFORMERS_CACHE", None)
14
+ # if device is None:
15
+ # device = 0 if torch.cuda.is_available() else -1
16
+
17
+ # # Load model and processor with cache_dir
18
+ # processor = AutoProcessor.from_pretrained(model_name, cache_dir=cache_dir)
19
+ # model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name, cache_dir=cache_dir)
20
+
21
+ # # Create the pipeline, DO NOT PASS cache_dir here
22
+ # self.pipe = pipeline(
23
+ # "automatic-speech-recognition",
24
+ # model=model,
25
+ # tokenizer=processor,
26
+ # feature_extractor=processor,
27
+ # device=device,
28
+ # generate_kwargs={"task": "transcribe", "return_timestamps": False},
29
+ # )
30
+
31
+ # async def transcribe(self, audio_data: np.ndarray, language: str = None):
32
+ # with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp_wav:
33
+ # sf.write(tmp_wav.name, audio_data, samplerate=16000)
34
+ # extra = {"language": language} if language else {}
35
+ # result = self.pipe(tmp_wav.name, **extra)
36
+ # text = result['text']
37
+ # return text, language or "unknown"
38
+
39
+ # async def synthesize(self, text, language: str = "en", voice_type: str = "normal"):
40
+ # raise NotImplementedError("Use gTTS or edge-tts as before.")
41
+
42
+ # class VoiceTools:
43
+ # def __init__(self, config=None):
44
+ # self.config = config
45
+ # self.vp = MultilingualVoiceProcessor()
46
+
47
+ # def transcribe_audio(self, audio_data: np.ndarray, language=None):
48
+ # text, detected_lang = asyncio.run(self.vp.transcribe(audio_data, language))
49
+ # return {"text": text, "language": detected_lang}
50
+
51
+ # def detect_emotion(self, text: str) -> dict:
52
+ # model = TinyGPT2Model()
53
+ # prompt = f"""
54
+ # Analyze the emotional state in this text: "{text}"
55
+ # Identify:
56
+ # 1. Primary emotion (joy, sadness, anger, fear, anxiety, confusion, etc.)
57
+ # 2. Emotional intensity (low, medium, high)
58
+ # 3. Underlying feelings
59
+ # 4. Key concerns
60
+ # Format as JSON with keys: primary_emotion, intensity, feelings, concerns
61
+ # """
62
+ # response = model.generate(prompt)
63
+ # # TODO: Actually parse response, dummy return for now:
64
+ # return {
65
+ # "primary_emotion": "detected_emotion",
66
+ # "intensity": "medium",
67
+ # "feelings": ["feeling1", "feeling2"],
68
+ # "concerns": ["concern1", "concern2"]
69
+ # }
70
+
71
+ # def generate_reflective_questions(self, context: dict) -> list:
72
+ # emotion = context.get("primary_emotion", "neutral")
73
+ # questions_map = {
74
+ # "anxiety": [
75
+ # "What specific thoughts are creating this anxiety?",
76
+ # "What would feeling calm look like in this situation?",
77
+ # "What has helped you manage anxiety before?"
78
+ # ],
79
+ # "sadness": [
80
+ # "What would comfort mean to you right now?",
81
+ # "What are you grieving or missing?",
82
+ # "How can you be gentle with yourself today?"
83
+ # ],
84
+ # "confusion": [
85
+ # "What would clarity feel like?",
86
+ # "What's the main question you're grappling with?",
87
+ # "What does your intuition tell you?"
88
+ # ]
89
+ # }
90
+ # return questions_map.get(emotion, [
91
+ # "How are you feeling in this moment?",
92
+ # "What would support look like for you?",
93
+ # "What's most important to explore right now?"
94
+ # ])
95
+
96
  import numpy as np
 
 
97
  import asyncio
 
 
 
98
  from models.tinygpt2_model import TinyGPT2Model
99
+ from .base_tool import BaseTool
100
 
101
+ # Dummy MultilingualVoiceProcessor for context (real version can be plugged in)
102
  class MultilingualVoiceProcessor:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  async def transcribe(self, audio_data: np.ndarray, language: str = None):
104
+ # Simulate dummy STT result
105
+ return "Transcribed text.", language or "en"
 
 
 
 
 
 
 
106
 
107
+ # --- Tool wrapper classes below ---
108
+ class TranscribeAudioTool(BaseTool):
109
  def __init__(self, config=None):
110
+ super().__init__(config)
111
  self.vp = MultilingualVoiceProcessor()
112
+ def __call__(self, audio_data: np.ndarray, language=None):
 
113
  text, detected_lang = asyncio.run(self.vp.transcribe(audio_data, language))
114
  return {"text": text, "language": detected_lang}
115
 
116
+ class DetectEmotionTool(BaseTool):
117
+ def __init__(self, config=None):
118
+ super().__init__(config)
119
+ def __call__(self, text: str):
120
  model = TinyGPT2Model()
121
  prompt = f"""
122
  Analyze the emotional state in this text: "{text}"
123
+ Identify: 1. Primary emotion (joy, sadness, etc) 2. Intensity
124
+ 3. Feelings 4. Concerns. Format as JSON.
 
 
 
 
125
  """
126
+ # For a real implementation, parse the response!
127
  response = model.generate(prompt)
128
+ # Stub (replace with correct parsing logic)
129
  return {
130
  "primary_emotion": "detected_emotion",
131
  "intensity": "medium",
 
133
  "concerns": ["concern1", "concern2"]
134
  }
135
 
136
+ class GenerateReflectiveQuestionsTool(BaseTool):
137
+ def __init__(self, config=None):
138
+ super().__init__(config)
139
+ def __call__(self, context: dict):
140
  emotion = context.get("primary_emotion", "neutral")
141
  questions_map = {
142
  "anxiety": [
 
159
  "How are you feeling in this moment?",
160
  "What would support look like for you?",
161
  "What's most important to explore right now?"
162
+ ])
163
+
164
+ class VoiceTools:
165
+ def __init__(self, config=None):
166
+ self.transcribe_audio = TranscribeAudioTool(config)
167
+ self.detect_emotion = DetectEmotionTool(config)
168
+ self.generate_reflective_questions = GenerateReflectiveQuestionsTool(config)