| | import gradio as gr |
| | import os |
| | import base64 |
| | from datetime import datetime |
| | import requests |
| | import trello |
| | from dotenv import load_dotenv |
| | import urllib3 |
| | import wave |
| | import audioop |
| | import io |
| | import speech_recognition as sr |
| |
|
| | |
| | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
| |
|
| | |
| | load_dotenv() |
| |
|
| | SAMBANOVA_API_KEY = "34115dcb-baab-4390-ab5c-e501666f9f4e" |
| | SAMBANOVA_URL = "https://api.sambanova.ai/v1/chat/completions" |
| |
|
| | |
| | trello_client = trello.TrelloClient( |
| | api_key=os.getenv('TRELLO_API_KEY'), |
| | token=os.getenv('TRELLO_TOKEN') |
| | ) |
| |
|
| | def get_trello_members(): |
| | """Get all members from Trello workspace""" |
| | try: |
| | boards = trello_client.list_boards() |
| | if not boards: |
| | raise Exception("No Trello boards found") |
| | |
| | board = boards[0] |
| | members = board.get_members() |
| | return {(member.full_name or member.username): member.id for member in members} |
| | except Exception as e: |
| | print(f"Error fetching Trello members: {str(e)}") |
| | return {} |
| |
|
| | def process_audio_data(audio_path): |
| | """Process WAV audio file""" |
| | try: |
| | with wave.open(audio_path, 'rb') as wav_file: |
| | |
| | n_channels = wav_file.getnchannels() |
| | sampwidth = wav_file.getsampwidth() |
| | framerate = wav_file.getframerate() |
| | n_frames = wav_file.getnframes() |
| | |
| | |
| | audio_data = wav_file.readframes(n_frames) |
| | |
| | |
| | if n_channels == 2: |
| | audio_data = audioop.tomono(audio_data, sampwidth, 1, 1) |
| | |
| | |
| | if sampwidth != 2: |
| | audio_data = audioop.lin2lin(audio_data, sampwidth, 2) |
| | |
| | |
| | if framerate != 16000: |
| | audio_data, _ = audioop.ratecv(audio_data, 2, 1, framerate, 16000, None) |
| | framerate = 16000 |
| | |
| | return audio_data, framerate |
| | |
| | except Exception as e: |
| | print(f"Error processing audio: {str(e)}") |
| | raise |
| |
|
| | def transcribe_audio(audio_file): |
| | """Convert audio to text using Speech Recognition""" |
| | try: |
| | |
| | recognizer = sr.Recognizer() |
| | |
| | |
| | if isinstance(audio_file, tuple): |
| | audio_path = audio_file[0] |
| | else: |
| | audio_path = audio_file |
| | |
| | print(f"Processing audio file: {audio_path}") |
| | |
| | try: |
| | |
| | with sr.AudioFile(audio_path) as source: |
| | |
| | recognizer.adjust_for_ambient_noise(source) |
| | |
| | audio_data = recognizer.record(source) |
| | |
| | |
| | text = recognizer.recognize_google( |
| | audio_data, |
| | language='en-US', |
| | show_all=False, |
| | with_confidence=False |
| | ) |
| | |
| | if not text: |
| | raise Exception("No transcription results returned") |
| | |
| | return text.strip() |
| | |
| | except sr.UnknownValueError: |
| | raise Exception("Speech could not be understood. Please try speaking more clearly.") |
| | except sr.RequestError as e: |
| | raise Exception(f"Could not request results from Google Speech Recognition service; {e}") |
| | |
| | except Exception as e: |
| | print(f"Transcription error details: {str(e)}") |
| | raise Exception(f"Transcription error: {str(e)}") |
| |
|
| | def analyze_emotion(text): |
| | """Analyze text emotion using Hugging Face API""" |
| | API_URL = "https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions" |
| | headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACE_API_KEY')}"} |
| | |
| | try: |
| | response = requests.post(API_URL, headers=headers, json={"inputs": text}) |
| | emotions = response.json() |
| | |
| | |
| | if isinstance(emotions, list) and len(emotions) > 0: |
| | emotion_scores = [item for item in emotions[0] if item['label'] != 'neutral'] |
| | |
| | |
| | urgent_emotions = { |
| | 'anger': 0.15, |
| | 'fear': 0.40, |
| | 'annoyance': 0.10, |
| | 'disapproval': 0.30, |
| | 'nervousness': 0.25, |
| | 'disgust': 0.20, |
| | 'disappointment': 0.40, |
| | 'grief': 0.05, |
| | 'remorse': 0.10, |
| | 'sadness': 0.40 |
| | } |
| | |
| | high_priority_emotions = { |
| | 'desire': 0.25, |
| | 'excitement': 0.35, |
| | 'surprise': 0.15, |
| | 'curiosity': 0.25, |
| | 'optimism': 0.20, |
| | 'pride': 0.10, |
| | 'joy': 0.40, |
| | 'love': 0.25, |
| | 'admiration': 0.25, |
| | 'gratitude': 0.45 |
| | } |
| | |
| | |
| | urgent_score = 0 |
| | high_priority_score = 0 |
| | |
| | for item in emotion_scores: |
| | emotion = item['label'] |
| | score = item['score'] |
| | |
| | if emotion in urgent_emotions and score > urgent_emotions[emotion]: |
| | urgent_score += score |
| | elif emotion in high_priority_emotions and score > high_priority_emotions[emotion]: |
| | high_priority_score += score |
| | |
| | |
| | if urgent_score > 0.4: |
| | return "urgent" |
| | elif high_priority_score > 0.3 or urgent_score > 0.2: |
| | return "high" |
| | return "normal" |
| | |
| | return "normal" |
| | except Exception as e: |
| | print(f"Error in emotion analysis: {str(e)}") |
| | return "normal" |
| |
|
| | def improve_task_description(text): |
| | """Improve and summarize task description using SambaNova API and emotion analysis""" |
| | try: |
| | |
| | emotion_urgency = analyze_emotion(text) |
| | |
| | prompt = f"""Please analyze and structure this task description, including determining its urgency level. |
| | |
| | Original task: {text} |
| | |
| | Initial emotion-based urgency assessment: {emotion_urgency} |
| | |
| | Please provide: |
| | 1. A clear, concise task title |
| | 2. Key objectives |
| | 3. Suggested deadline (if not specified) |
| | 4. Any important details or requirements |
| | 5. Urgency level assessment (choose one: normal, high, urgent) based on: |
| | - Time-sensitive language (ASAP, immediately, urgent, etc.) |
| | - Deadlines mentioned |
| | - Impact and consequences described |
| | - Business criticality |
| | - Emotional context and tone |
| | |
| | Format the response with "URGENCY_LEVEL: [level]" as the first line, followed by the structured description. |
| | Consider the emotion-based urgency assessment provided above when making the final urgency determination. |
| | """ |
| |
|
| | headers = { |
| | 'Authorization': f'Bearer {SAMBANOVA_API_KEY}', |
| | 'Content-Type': 'application/json' |
| | } |
| | |
| | data = { |
| | 'messages': [ |
| | {'role': 'user', 'content': prompt} |
| | ], |
| | 'model': 'Meta-Llama-3.1-8B-Instruct', |
| | 'max_tokens': 2000, |
| | 'temperature': 0.7 |
| | } |
| | |
| | response = requests.post( |
| | SAMBANOVA_URL, |
| | headers=headers, |
| | json=data, |
| | verify=False, |
| | timeout=620 |
| | ) |
| | |
| | if response.status_code != 200: |
| | raise Exception(f"SambaNova API request failed: {response.text}") |
| | |
| | response_text = response.json()['choices'][0]['message']['content'] |
| | |
| | |
| | lines = response_text.split('\n') |
| | urgency_line = lines[0].strip() |
| | |
| | |
| | urgency = emotion_urgency |
| | |
| | if urgency_line.startswith("URGENCY_LEVEL:"): |
| | level = urgency_line.split(":")[1].strip().lower() |
| | if level in ["normal", "high", "urgent"]: |
| | |
| | urgency_levels = {"normal": 0, "high": 1, "urgent": 2} |
| | if urgency_levels[level] > urgency_levels[emotion_urgency]: |
| | urgency = level |
| | description = '\n'.join(lines[1:]).strip() |
| | else: |
| | description = response_text |
| | |
| | return description, urgency |
| | except Exception as e: |
| | raise Exception(f"Error improving task description: {str(e)}") |
| |
|
| | def create_trello_card(task_description, selected_members, location=None, urgency="normal"): |
| | """Create a Trello card with the improved task description""" |
| | try: |
| | boards = trello_client.list_boards() |
| | if not boards: |
| | raise Exception("No Trello boards found") |
| | |
| | board = boards[0] |
| | print(f"Using board: {board.name}") |
| | |
| | lists = board.list_lists() |
| | if not lists: |
| | raise Exception("No lists found in the board") |
| | |
| | todo_list = lists[0] |
| | print(f"Using list: {todo_list.name}") |
| | |
| | |
| | timestamp = datetime.now().strftime("%Y-%m-%d %H:%M") |
| | title = task_description.split('\n')[0] |
| | |
| | |
| | urgency_markers = { |
| | "normal": "📘", |
| | "high": "⚠️", |
| | "urgent": "🔴" |
| | } |
| | urgency_marker = urgency_markers.get(urgency.lower(), "📘") |
| | formatted_title = f"[{timestamp}] {urgency_marker} {title}" |
| | |
| | location_text = "Remote/Virtual" |
| | location_coords = None |
| | |
| | if location: |
| | location_text = location |
| | |
| | |
| | urgency_status = { |
| | "normal": "Normal Priority", |
| | "high": "High Priority", |
| | "urgent": "URGENT" |
| | } |
| | status_text = urgency_status.get(urgency.lower(), "Normal Priority") |
| | |
| | formatted_description = f"""🎯 TASK DETAILS |
| | ------------------------ |
| | {task_description} |
| | |
| | 📋 METADATA |
| | ------------------------ |
| | 🕒 Created: {timestamp} |
| | 🏷️ Source: TaskWhisper AI |
| | ⚡ Priority: {status_text} |
| | 📍 Location: {location_text} |
| | |
| | ✅ CHECKLIST |
| | ------------------------ |
| | - [ ] Task reviewed |
| | - [ ] Requirements clear |
| | - [ ] Timeline confirmed |
| | - [ ] Resources identified |
| | |
| | 📝 NOTES |
| | ------------------------ |
| | Add your progress notes here... |
| | """ |
| | |
| | card = todo_list.add_card( |
| | name=formatted_title, |
| | desc=formatted_description |
| | ) |
| | |
| | if location_coords: |
| | card.set_pos(location_coords) |
| | |
| | |
| | available_labels = board.get_labels() |
| | urgency_colors = { |
| | "normal": "blue", |
| | "high": "yellow", |
| | "urgent": "red" |
| | } |
| | label_color = urgency_colors.get(urgency.lower(), "blue") |
| | |
| | |
| | priority_label = next((label for label in available_labels if label.color == label_color), None) |
| | if priority_label: |
| | card.add_label(priority_label) |
| | else: |
| | print(f"Warning: {label_color} label not found on board") |
| | |
| | |
| | if selected_members: |
| | for member_id in selected_members: |
| | try: |
| | member = next((m for m in board.get_members() if m.id == member_id), None) |
| | if member: |
| | card.add_member(member) |
| | else: |
| | print(f"Warning: Member with ID {member_id} not found on board") |
| | except Exception as e: |
| | print(f"Error adding member {member_id}: {str(e)}") |
| | |
| | return card.url |
| | except Exception as e: |
| | print(f"Trello card creation error details: {str(e)}") |
| | raise Exception(f"Error creating Trello card: {str(e)}") |
| |
|
| | def process_input(input_text, selected_members): |
| | """Process input text and create Trello card""" |
| | try: |
| | |
| | improved_description, urgency = improve_task_description(input_text) |
| | |
| | |
| | card_url = create_trello_card(improved_description, selected_members, urgency=urgency) |
| | |
| | |
| | members_dict = get_trello_members() |
| | member_names = [name for name, mid in members_dict.items() |
| | if mid in selected_members] |
| | |
| | urgency_emoji = {"normal": "📘", "high": "⚠️", "urgent": "🔴"} |
| | |
| | return f""" |
| | Original Input: |
| | -------------- |
| | {input_text} |
| | |
| | Improved Task Description: |
| | ------------------------ |
| | {improved_description} |
| | |
| | Task Created in Trello: |
| | ---------------------- |
| | Priority: {urgency_emoji.get(urgency, "📘")} {urgency.upper()} |
| | Assigned to: {', '.join(member_names) if member_names else 'Not assigned'} |
| | Card URL: {card_url} |
| | """ |
| | except Exception as e: |
| | return f"Error processing input: {str(e)}" |
| |
|
| | def process_audio(audio_file, selected_members): |
| | """Process audio input and create Trello card""" |
| | try: |
| | if audio_file is None: |
| | return "Error: No audio file or text provided" |
| | print(f"Audio file type: {type(audio_file)}") |
| | print(f"Audio file content: {audio_file}") |
| | text = transcribe_audio(audio_file) |
| | |
| | return process_input(text, selected_members) |
| | except Exception as e: |
| | print(f"Audio processing error details: {str(e)}") |
| | return f"Error processing audio: {str(e)}" |
| |
|
| | def process_audio_with_members(audio, selected_members): |
| | """Process audio with selected members""" |
| | try: |
| | if audio is None: |
| | return "Error: Please provide an audio input (record or upload)" |
| | |
| | print(f"Received audio input: {type(audio)}") |
| | print(f"Audio content: {audio}") |
| | |
| | |
| | members_dict = get_trello_members() |
| | selected_member_ids = [] |
| | for name in (selected_members or []): |
| | if name in members_dict: |
| | selected_member_ids.append(members_dict[name]) |
| | else: |
| | print(f"Warning: Member {name} not found in members dictionary") |
| | |
| | try: |
| | result = process_audio(audio, selected_member_ids) |
| | return result |
| | except Exception as e: |
| | error_msg = str(e) |
| | if "Speech could not be understood" in error_msg: |
| | return "Could not understand the speech. Please try again with clearer audio." |
| | elif "Could not request results" in error_msg: |
| | return "Network error. Please check your internet connection and try again." |
| | else: |
| | return f"Error processing audio: {error_msg}" |
| | |
| | except Exception as e: |
| | print(f"Error in process_audio_with_members: {str(e)}") |
| | return f"Error processing audio with members: {str(e)}" |
| |
|
| | def process_text_with_members(text, selected_members): |
| | """Process text with selected members""" |
| | try: |
| | |
| | members_dict = get_trello_members() |
| | |
| | print(f"Members dict: {members_dict}") |
| | print(f"Selected members: {selected_members}") |
| | |
| | selected_member_ids = [] |
| | for name in (selected_members or []): |
| | if name in members_dict: |
| | selected_member_ids.append(members_dict[name]) |
| | else: |
| | print(f"Warning: Member {name} not found in members dictionary") |
| | |
| | return process_input(text, selected_member_ids) |
| | except Exception as e: |
| | print(f"Error in process_text_with_members: {str(e)}") |
| | return f"Error processing text with members: {str(e)}" |
| |
|
| | |
| | with gr.Blocks(title="TaskWhisper - Smart Task Manager") as interface: |
| | gr.Markdown("# 🎙️ TaskWhisper - Smart Task Manager") |
| | gr.Markdown("Record audio or type your task. The AI will help improve and structure your task description.") |
| | |
| | |
| | members = get_trello_members() |
| | |
| | with gr.Tab("Audio Input"): |
| | audio_input = gr.Audio( |
| | label="Record or Upload Audio", |
| | sources=["microphone", "upload"], |
| | type="filepath", |
| | format="wav", |
| | interactive=True |
| | ) |
| | gr.Markdown(""" |
| | *Instructions:* |
| | - Use microphone to record directly |
| | - Or upload an audio file (WAV format) |
| | - Speak clearly for better results |
| | - Keep background noise minimal |
| | """) |
| | member_dropdown_audio = gr.Dropdown( |
| | choices=list(members.keys()), |
| | multiselect=True, |
| | label="Assign to Members", |
| | info="Select one or more members to assign the task", |
| | value=[] |
| | ) |
| | audio_button = gr.Button("Process Audio") |
| | |
| | with gr.Tab("Text Input"): |
| | text_input = gr.Textbox( |
| | lines=3, |
| | placeholder="Type your task here (e.g., 'Need to prepare quarterly report with sales data by next Friday')", |
| | label="Text Input" |
| | ) |
| | member_dropdown_text = gr.Dropdown( |
| | choices=list(members.keys()), |
| | multiselect=True, |
| | label="Assign to Members", |
| | info="Select one or more members to assign the task", |
| | value=[] |
| | ) |
| | text_button = gr.Button("Process Text") |
| | |
| | output = gr.Textbox( |
| | label="Task Details", |
| | lines=15 |
| | ) |
| | |
| | |
| | audio_button.click( |
| | fn=process_audio_with_members, |
| | inputs=[audio_input, member_dropdown_audio], |
| | outputs=output |
| | ) |
| | |
| | text_button.click( |
| | fn=process_text_with_members, |
| | inputs=[text_input, member_dropdown_text], |
| | outputs=output |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | interface.launch(share=True) |