| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
|
|
| |
|
|
|
|
| |
| |
| |
| |
|
|
|
|
| |
|
|
|
|
| import os |
| import yaml |
| import pandas as pd |
| import numpy as np |
|
|
| from datetime import datetime, timedelta |
|
|
| |
| import openai |
| import os |
| from openai import OpenAI |
|
|
| import gradio as gr |
|
|
| import json |
|
|
| import sqlite3 |
| import uuid |
| import socket |
| import difflib |
| import time |
| import shutil |
| import requests |
| import re |
|
|
| import json |
| import markdown |
| from fpdf import FPDF |
| import hashlib |
|
|
| from transformers import pipeline |
| from transformers.pipelines.audio_utils import ffmpeg_read |
|
|
| from todoist_api_python.api import TodoistAPI |
|
|
| |
| from twilio.rest import Client |
|
|
| import asyncio |
| import uvicorn |
| import fastapi |
| from fastapi import FastAPI, Request, HTTPException |
| from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse |
| from fastapi.staticfiles import StaticFiles |
| from pathlib import Path |
|
|
| import nest_asyncio |
| from twilio.twiml.messaging_response import MessagingResponse |
|
|
| from requests.auth import HTTPBasicAuth |
|
|
| from google.cloud import storage, exceptions |
| from google.cloud.exceptions import NotFound |
| from google.oauth2 import service_account |
|
|
| from reportlab.pdfgen import canvas |
| from reportlab.lib.pagesizes import letter |
| from reportlab.pdfbase import pdfmetrics |
| from reportlab.lib import colors |
| from reportlab.pdfbase.ttfonts import TTFont |
|
|
| import logging |
|
|
| |
| logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s") |
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
|
|
| |
| openai_api_key = os.environ["OPENAI_API_KEY"] |
| |
| todoist_api_key = os.environ["TODOIST_API_KEY"] |
|
|
| EVERNOTE_API_TOKEN = os.environ["EVERNOTE_API_TOKEN"] |
|
|
| account_sid = os.environ["TWILLO_ACCOUNT_SID"] |
| auth_token = os.environ["TWILLO_AUTH_TOKEN"] |
| twilio_phone_number = os.environ["TWILLO_PHONE_NUMBER"] |
|
|
| google_credentials_json = os.environ["GOOGLE_APPLICATION_CREDENTIALS"] |
| twillo_client = Client(account_sid, auth_token) |
|
|
| |
|
|
| |
| def load_reasoning_json(filepath): |
| """Load JSON file and return the dictionary.""" |
| with open(filepath, "r") as file: |
| data = json.load(file) |
| return data |
|
|
| |
| def load_action_map(filepath): |
| """Load action map JSON file and map strings to actual function objects.""" |
| with open(filepath, "r") as file: |
| action_map_raw = json.load(file) |
| |
| return {action: globals()[func_name] for action, func_name in action_map_raw.items()} |
|
|
|
|
| |
|
|
|
|
| |
|
|
| def find_reference(task_topic): |
| """Finds a reference related to the task topic.""" |
| print(f"Finding reference for topic: {task_topic}") |
| return f"Reference found for topic: {task_topic}" |
|
|
| def generate_summary(reference): |
| """Generates a summary of the reference.""" |
| print(f"Generating summary for reference: {reference}") |
| return f"Summary of {reference}" |
|
|
| def suggest_relevance(summary): |
| """Suggests how the summary relates to the project.""" |
| print(f"Suggesting relevance of summary: {summary}") |
| return f"Relevance of {summary} suggested" |
|
|
| def tool_research(task_topic): |
| """Performs tool research and returns analysis.""" |
| print("Performing tool research") |
| return "Tool analysis data" |
|
|
| def generate_comparison_table(tool_analysis): |
| """Generates a comparison table for a competitive tool.""" |
| print(f"Generating comparison table for analysis: {tool_analysis}") |
| return f"Comparison table for {tool_analysis}" |
|
|
| def generate_integration_memo(tool_analysis): |
| """Generates an integration memo for a tool.""" |
| print(f"Generating integration memo for analysis: {tool_analysis}") |
| return f"Integration memo for {tool_analysis}" |
|
|
| def analyze_issue(task_topic): |
| """Analyzes an issue and returns the analysis.""" |
| print("Analyzing issue") |
| return "Issue analysis data" |
|
|
| def generate_issue_memo(issue_analysis): |
| """Generates an issue memo based on the analysis.""" |
| print(f"Generating issue memo for analysis: {issue_analysis}") |
| return f"Issue memo for {issue_analysis}" |
|
|
| def list_ideas(task_topic): |
| """Lists potential ideas for brainstorming.""" |
| print("Listing ideas") |
| return ["Idea 1", "Idea 2", "Idea 3"] |
|
|
| def construct_matrix(ideas): |
| """Constructs a matrix (e.g., feasibility or impact/effort) for the ideas.""" |
| print(f"Constructing matrix for ideas: {ideas}") |
| return {"Idea 1": "High Impact/Low Effort", "Idea 2": "Low Impact/High Effort", "Idea 3": "High Impact/High Effort"} |
|
|
| def prioritize_ideas(matrix): |
| """Prioritizes ideas based on the matrix.""" |
| print(f"Prioritizing ideas based on matrix: {matrix}") |
| return ["Idea 3", "Idea 1", "Idea 2"] |
|
|
| def setup_action_plan(prioritized_ideas): |
| """Sets up an action plan based on the prioritized ideas.""" |
| print(f"Setting up action plan for ideas: {prioritized_ideas}") |
| return f"Action plan created for {prioritized_ideas}" |
|
|
| def unsupported_task(task_topic): |
| """Handles unsupported tasks.""" |
| print("Task not supported") |
| return "Unsupported task" |
|
|
|
|
| |
|
|
|
|
| todoist_api = TodoistAPI(todoist_api_key) |
|
|
| |
| def fetch_todoist_task(): |
| try: |
| tasks = todoist_api.get_tasks() |
| if tasks: |
| recent_task = tasks[0] |
| return f"Recent Task: {recent_task.content}" |
| return "No tasks found in Todoist." |
| except Exception as e: |
| return f"Error fetching tasks: {str(e)}" |
|
|
| def add_to_todoist(task_topic, todoist_priority = 3): |
| try: |
| |
| |
| todoist_api.add_task( |
| content=task_topic, |
| priority=todoist_priority |
| ) |
| msg = f"Task added: {task_topic} with priority {todoist_priority}" |
| logger.debug(msg) |
|
|
| return msg |
| except Exception as e: |
| |
| return f"An error occurred: {e}" |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
| |
| |
|
|
|
|
| |
|
|
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| def add_to_evernote(task_topic): |
| return f"Task added to Evernote with title '{task_topic}'." |
|
|
|
|
| |
|
|
|
|
| |
| TASK_WORKFLOW_TREE = load_reasoning_json('curify_ideas_reasoning.json') |
| action_map = load_action_map('action_map.json') |
|
|
| |
|
|
|
|
| def generate_task_hash(task_description): |
| try: |
| |
| if not isinstance(task_description, str): |
| logger.warning("task_description is not a string, attempting conversion.") |
| task_description = str(task_description) |
| |
| |
| encoded_description = task_description.encode("utf-8", errors="ignore") |
| task_hash = hashlib.md5(encoded_description).hexdigest() |
|
|
| logger.debug(f"Generated task hash: {task_hash}") |
| return task_hash |
| except Exception as e: |
| |
| logger.error(f"Error generating task hash: {e}", exc_info=True) |
| return 'output' |
| |
| def save_to_google_storage(bucket_name, file_path, destination_blob_name, expiration_minutes = 1440): |
| credentials_dict = json.loads(google_credentials_json) |
|
|
| |
| credentials = service_account.Credentials.from_service_account_info(credentials_dict) |
| gcs_client = storage.Client(credentials=credentials, project=credentials.project_id) |
|
|
| |
| try: |
| bucket = gcs_client.get_bucket(bucket_name) |
| except NotFound: |
| print(f"❌ Bucket '{bucket_name}' not found. Please check the bucket name.") |
| bucket = gcs_client.create_bucket(bucket_name) |
| print(f"✅ Bucket '{bucket_name}' created.") |
| except Exception as e: |
| print(f"❌ An unexpected error occurred: {e}") |
| raise |
| |
| blob = bucket.blob(destination_blob_name) |
| |
| |
| blob.upload_from_filename(file_path) |
| |
| |
| signed_url = blob.generate_signed_url( |
| version="v4", |
| expiration=timedelta(minutes=expiration_minutes), |
| method="GET" |
| ) |
| print(f"✅ File uploaded to Google Cloud Storage. Signed URL: {signed_url}") |
| return signed_url |
|
|
|
|
| |
| def is_simplified(text): |
| simplified_range = re.compile('[\u4e00-\u9fff]') |
| simplified_characters = [char for char in text if simplified_range.match(char)] |
| return len(simplified_characters) > len(text) * 0.5 |
|
|
| |
| def choose_font_for_content(content): |
| return 'NotoSansSC' if is_simplified(content) else 'NotoSansTC' |
|
|
| |
| def generate_document(task_description, md_content, user_name='jayw', bucket_name='curify'): |
| logger.debug("Starting to generate document") |
|
|
| |
| task_hash = generate_task_hash(task_description) |
|
|
| |
| max_hash_length = 64 |
| truncated_hash = task_hash[:max_hash_length] |
|
|
| |
| local_filename = f"{truncated_hash}.pdf" |
| c = canvas.Canvas(local_filename, pagesize=letter) |
|
|
| |
| sc_font_path = 'NotoSansSC-Regular.ttf' |
| tc_font_path = 'NotoSansTC-Regular.ttf' |
|
|
| try: |
| |
| sc_font = TTFont('NotoSansSC', sc_font_path) |
| pdfmetrics.registerFont(sc_font) |
|
|
| |
| tc_font = TTFont('NotoSansTC', tc_font_path) |
| pdfmetrics.registerFont(tc_font) |
| |
| |
| c.setFont('NotoSansSC', 12) |
| except Exception as e: |
| logger.error(f"Error loading font files: {e}") |
| raise RuntimeError("Failed to load one or more fonts. Ensure the font files are accessible.") |
|
|
| |
| y_position = 750 |
|
|
| |
| for key, value in md_content.items(): |
| |
| c.setFont(choose_font_for_content(key), 14) |
| c.drawString(100, y_position, f"# {key}") |
| y_position -= 20 |
|
|
| |
| c.setFont(choose_font_for_content(str(value)), 12) |
|
|
| |
| if isinstance(value, list): |
| for item in value: |
| c.drawString(100, y_position, f"- {item}") |
| y_position -= 15 |
| else: |
| c.drawString(100, y_position, value) |
| y_position -= 15 |
|
|
| |
| if y_position < 100: |
| c.showPage() |
| c.setFont('NotoSansSC', 12) |
| y_position = 750 |
|
|
| |
| c.save() |
|
|
| |
| destination_blob_name = f"{user_name}/{truncated_hash}.pdf" |
|
|
| |
| public_url = save_to_google_storage(bucket_name, local_filename, destination_blob_name) |
| logger.debug("Finished generating document") |
| return public_url |
| |
| |
|
|
|
|
| def execute_with_retry(sql, params=(), attempts=5, delay=1, db_name = 'curify_ideas.db'): |
| for attempt in range(attempts): |
| try: |
| with sqlite3.connect(db_name) as conn: |
| cursor = conn.cursor() |
| cursor.execute(sql, params) |
| conn.commit() |
| break |
| except sqlite3.OperationalError as e: |
| if "database is locked" in str(e) and attempt < attempts - 1: |
| time.sleep(delay) |
| else: |
| raise e |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
|
|
|
|
| def convert_to_listed_json(input_string): |
| """ |
| Converts a string to a listed JSON object. |
| |
| Parameters: |
| input_string (str): The JSON-like string to be converted. |
| |
| Returns: |
| list: A JSON object parsed into a Python list of dictionaries. |
| """ |
| try: |
| |
| trimmed_string = input_string[input_string.index('['):input_string.rindex(']') + 1] |
|
|
| json_object = json.loads(trimmed_string) |
| return json_object |
| except json.JSONDecodeError as e: |
| return None |
| return None |
| |
|
|
| def validate_and_extract_json(json_string): |
| """ |
| Validates the JSON string, extracts fields with possible variants using fuzzy matching. |
| |
| Args: |
| - json_string (str): The JSON string to validate and extract from. |
| - field_names (list): List of field names to extract, with possible variants. |
| |
| Returns: |
| - dict: Extracted values with the best matched field names. |
| """ |
| |
| trimmed_string = json_string[json_string.index('{'):json_string.rindex('}') + 1] |
| try: |
| parsed_json = json.loads(trimmed_string) |
| return parsed_json |
| except json.JSONDecodeError as e: |
| return None |
|
|
| |
| return None |
|
|
| def json_to_pandas(dat_json, dat_schema = {'name':"", 'description':""}): |
| dat_df = pd.DataFrame([dat_schema]) |
| try: |
| dat_df = pd.DataFrame(dat_json) |
|
|
| except Exception as e: |
| dat_df = pd.DataFrame([dat_schema]) |
| |
| return dat_df |
|
|
|
|
| |
|
|
|
|
| client = OpenAI( |
| api_key= os.environ.get("OPENAI_API_KEY"), |
| ) |
|
|
| |
| def call_openai_api(prompt, model="gpt-4o", max_tokens=5000, retries=3, backoff_factor=2): |
| """ |
| Send a prompt to the OpenAI API and handle potential errors robustly. |
| |
| Parameters: |
| prompt (str): The user input or task prompt to send to the model. |
| model (str): The OpenAI model to use (default is "gpt-4"). |
| max_tokens (int): The maximum number of tokens in the response. |
| retries (int): Number of retry attempts in case of transient errors. |
| backoff_factor (int): Backoff time multiplier for retries. |
| |
| Returns: |
| str: The model's response content if successful. |
| """ |
| for attempt in range(1, retries + 1): |
| try: |
| response = client.chat.completions.create( |
| model="gpt-4o", |
| messages=[{"role": "user", "content": prompt}], |
| max_tokens=5000, |
| ) |
| return response.choices[0].message.content.strip() |
| |
| except (openai.RateLimitError, openai.APIConnectionError) as e: |
| logging.warning(f"Transient error: {e}. Attempt {attempt} of {retries}. Retrying...") |
| except (openai.BadRequestError, openai.AuthenticationError) as e: |
| logging.error(f"Unrecoverable error: {e}. Check your inputs or API key.") |
| break |
| except Exception as e: |
| logging.error(f"Unexpected error: {e}. Attempt {attempt} of {retries}. Retrying...") |
| |
| |
| if attempt < retries: |
| time.sleep(backoff_factor * attempt) |
| |
| raise RuntimeError(f"Failed to fetch response from OpenAI API after {retries} attempts.") |
|
|
| def fn_analyze_task(project_context, task_description): |
| prompt = ( |
| f"You are working in the context of {project_context}. " |
| f"Your task is to analyze the task: {task_description} " |
| "Please analyze the following aspects: " |
| "1) Determine which project this item belongs to. If the idea does not belong to any existing project, categorize it under 'Other'. " |
| "2) Assess whether this idea can be treated as a concrete task. " |
| "3) Evaluate whether a document can be generated as an intermediate result. " |
| "4) Identify the appropriate category of the task. Possible categories are: 'Blogs/Papers', 'Tools', 'Brainstorming', 'Issues', and 'Others'. " |
| "5) Extract the topic of the task. " |
| "Please provide the output in JSON format using the structure below: " |
| "{" |
| " \"description\": \"\", " |
| " \"project_association\": \"\", " |
| " \"is_task\": \"Yes/No\", " |
| " \"is_document\": \"Yes/No\", " |
| " \"task_category\": \"\", " |
| " \"task_topic\": \"\" " |
| "}" |
| ) |
| res_task_analysis = call_openai_api(prompt) |
|
|
| try: |
| json_task_analysis = validate_and_extract_json(res_task_analysis) |
|
|
| return json_task_analysis |
| except ValueError as e: |
| logger.debug("ValueError occurred: %s", str(e), exc_info=True) |
| return None |
|
|
|
|
| |
|
|
| |
| def fn_process_task(project_desc_table, task_description, bucket_name='curify'): |
| |
| project_context = project_desc_table.to_string(index=False) |
| task_analysis = fn_analyze_task(project_context, task_description) |
|
|
| if task_analysis: |
| execution_status = [] |
| execution_results = task_analysis.copy() |
| execution_results['deliverables'] = '' |
|
|
| def traverse(node, previous_output=None): |
| if not node: |
| return |
|
|
| |
| if "check" in node: |
| |
| if node["check"] in execution_results: |
| value = execution_results[node["check"]] |
| traverse(node.get(value, node.get("default")), previous_output) |
| else: |
| |
| logger.error(f"Key '{node['check']}' not found in execution_results.") |
| return |
| |
| |
| elif "action" in node: |
| action_name = node["action"] |
| input_key = node.get("input", 'task_topic') |
|
|
| if input_key in execution_results.keys(): |
| inputs = {input_key: execution_results[input_key]} |
| else: |
| |
| logger.error(f"Workflow action {action_name} input key {input_key} not in execution_results.") |
| return |
|
|
| logger.debug(f"Executing: {action_name} with inputs: {inputs}") |
| |
| |
| action_func = action_map.get(action_name, unsupported_task) |
| try: |
| output = action_func(**inputs) |
| except Exception as e: |
| |
| logger.error(f"Error executing action '{action_name}': {e}") |
| return |
|
|
| |
| execution_status.append({"action": action_name, "output": output}) |
|
|
| |
| if 'output' in node: |
| |
| execution_results[node['output']] = output |
| else: |
| |
| execution_results['deliverables'] += output |
| |
| |
| if "next" in node and node["next"]: |
| traverse(node["next"], previous_output) |
|
|
| try: |
| traverse(TASK_WORKFLOW_TREE["start"]) |
| execution_results['doc_url'] = generate_document(task_description, execution_results) |
| except Exception as e: |
| logger.error(f"Traverse Error: {e}") |
| finally: |
| |
| return task_analysis, pd.DataFrame(execution_status), execution_results |
| else: |
| logger.error("Empty task analysis.") |
| return {}, pd.DataFrame(), {} |
|
|
| |
|
|
|
|
| |
| ideas_df = pd.DataFrame(columns=["Idea ID", "Content", "Tags"]) |
|
|
| def extract_ideas(context, text): |
| """ |
| Extract project ideas from text, with or without a context, and return in JSON format. |
| |
| Parameters: |
| context (str): Context of the extraction. Can be empty. |
| text (str): Text to extract ideas from. |
| |
| Returns: |
| list: A list of ideas, each represented as a dictionary with name and description. |
| """ |
| if context: |
| |
| prompt = ( |
| f"You are working in the context of {context}. " |
| "Please extract the ongoing projects with project name and description." |
| "Please only the listed JSON as output string." |
| f"Ongoing projects: {text}" |
| ) |
| else: |
| |
| prompt = ( |
| "Given the following information about the user." |
| "Please extract the ongoing projects with project name and description." |
| "Please only the listed JSON as output string." |
| f"Ongoing projects: {text}" |
| ) |
|
|
| |
| return call_openai_api(prompt) |
|
|
| def df_to_string(df, empty_message = ''): |
| """ |
| Converts a DataFrame to a string if it is not empty. |
| If the DataFrame is empty, returns an empty string. |
| |
| Parameters: |
| ideas_df (pd.DataFrame): The DataFrame to be converted. |
| |
| Returns: |
| str: A string representation of the DataFrame or an empty string. |
| """ |
| if df.empty: |
| return empty_message |
| else: |
| return df.to_string(index=False) |
|
|
|
|
| |
|
|
|
|
| |
| shared_state = {"project_desc_table": pd.DataFrame(), "task_analysis_txt": "", "execution_status": pd.DataFrame(), "execution_results": {}} |
|
|
| |
| def fetch_updated_state(): |
| |
| for key, value in shared_state.items(): |
| if isinstance(value, pd.DataFrame): |
| logger.debug(f"{key}: DataFrame:\n{value.to_string()}") |
| elif isinstance(value, dict): |
| logger.debug(f"{key}: Dictionary: {value}") |
| elif isinstance(value, str): |
| logger.debug(f"{key}: String: {value}") |
| else: |
| logger.debug(f"{key}: Unsupported type: {value}") |
| return shared_state['project_desc_table'], shared_state['task_analysis_txt'], shared_state['execution_status'], shared_state['execution_results'] |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def update_gradio_state(project_desc_table, task_analysis_txt, execution_status, execution_results): |
| |
| shared_state['project_desc_table'] = project_desc_table |
| shared_state['task_analysis_txt'] = task_analysis_txt |
| shared_state['execution_status'] = execution_status |
| shared_state['execution_results'] = execution_results |
| return True |
|
|
|
|
| |
|
|
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| def project_extraction(project_description): |
|
|
| str_projects = extract_ideas('AI-powered tools for productivity', project_description) |
| json_projects = convert_to_listed_json(str_projects) |
|
|
| project_desc_table = json_to_pandas(json_projects) |
| update_gradio_state(project_desc_table, "", pd.DataFrame(), {}) |
| return project_desc_table |
|
|
|
|
| |
|
|
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| def message_back(task_message, execution_status, doc_url, from_whatsapp): |
| |
| task_steps_list = "\n".join( |
| [f"{i + 1}. {step['action']} - {step.get('output', '')}" for i, step in enumerate(execution_status.to_dict(orient="records"))] |
| ) |
|
|
| |
| body_message = ( |
| f"*Task Message:*\n{task_message}\n\n" |
| f"*Execution Status:*\n{task_steps_list}\n\n" |
| f"*Doc URL:*\n{doc_url}\n\n" |
| ) |
|
|
| |
| try: |
| twillo_client.messages.create( |
| from_=twilio_phone_number, |
| to=from_whatsapp, |
| body=body_message |
| ) |
| except Exception as e: |
| logger.error(f"Twilio Error: {e}") |
| raise HTTPException(status_code=500, detail=f"Error sending WhatsApp message: {str(e)}") |
|
|
| return {"status": "success"} |
|
|
| |
| whisper_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-medium") |
|
|
| |
| def transcribe_audio_from_media_url(media_url): |
| try: |
| media_response = requests.get(media_url, auth=HTTPBasicAuth(account_sid, auth_token)) |
| |
| media_response.raise_for_status() |
| audio_data = media_response.content |
|
|
| |
| audio_file_path = "temp_audio_file.mp3" |
| with open(audio_file_path, "wb") as audio_file: |
| audio_file.write(audio_data) |
|
|
| |
| transcription = whisper_pipeline(audio_file_path, return_timestamps=True) |
| logger.debug(f"Transcription: {transcription['text']}") |
| return transcription["text"] |
|
|
| except Exception as e: |
| logger.error(f"An error occurred: {e}") |
| return None |
|
|
|
|
| |
|
|
|
|
| app = FastAPI() |
|
|
| @app.get("/state") |
| async def fetch_state(): |
| return shared_state |
|
|
| @app.route("/whatsapp-webhook/", methods=["POST"]) |
| async def whatsapp_webhook(request: Request): |
| form_data = await request.form() |
| |
| print("Received data:", form_data) |
| |
| |
| incoming_msg = form_data.get("Body", "").strip() |
| from_number = form_data.get("From", "") |
| media_url = form_data.get("MediaUrl0", "") |
| media_type = form_data.get("MediaContentType0", "") |
|
|
| |
| transcription = None |
|
|
| if media_type.startswith("audio"): |
| |
| try: |
| transcription = transcribe_audio_from_media_url(media_url) |
| except Exception as e: |
| return JSONResponse( |
| {"error": f"Failed to process voice input: {str(e)}"}, status_code=500 |
| ) |
| |
| processed_input = transcription if transcription else incoming_msg |
|
|
| logger.debug(f"Processed input: {processed_input}") |
|
|
| try: |
| |
| project_desc_table, _ = fetch_updated_state() |
| |
| |
| if project_desc_table.empty: |
| return JSONResponse(content={}) |
| |
| |
| task_analysis_txt, execution_status, execution_results = fn_process_task(project_desc_table, processed_input) |
| update_gradio_state(task_analysis_txt, execution_status, execution_results) |
| |
| doc_url = 'Fail to generate doc' |
| if 'doc_url' in execution_results: |
| doc_url = execution_results['doc_url'] |
| |
| |
| response = message_back(processed_input, execution_status, doc_url, from_number) |
| logger.debug(response) |
| |
| return JSONResponse(content=str(response)) |
| |
| except Exception as e: |
| logger.error(f"Error during task processing: {e}") |
| return JSONResponse(content={"error": str(e)}, status_code=500) |
|
|
| |
|
|
|
|
| |
| def mock_login(email): |
| if email.endswith("@gmail.com"): |
| return f"✅ Logged in as {email}", gr.update(visible=False), gr.update(visible=True) |
| else: |
| return "❌ Invalid Gmail address. Please try again.", gr.update(), gr.update() |
|
|
| |
| def onboarding_survey(role, industry, project_description): |
| return (project_extraction(project_description), |
| gr.update(visible=False), gr.update(visible=True)) |
|
|
| |
| def integrate_todoist(): |
| return "✅ Successfully connected to Todoist!" |
|
|
| def integrate_evernote(): |
| return "✅ Successfully connected to Evernote!" |
|
|
| def integrate_calendar(): |
| return "✅ Successfully connected to Google Calendar!" |
|
|
| def load_svg_with_size(file_path, width="600px", height="400px"): |
| |
| with open(file_path, "r", encoding="utf-8") as file: |
| svg_content = file.read() |
| |
| |
| styled_svg = f""" |
| <div style="width: {width}; height: {height}; overflow: auto;"> |
| {svg_content} |
| </div> |
| """ |
| return styled_svg |
|
|
|
|
| |
|
|
|
|
| |
| def create_gradio_interface(state=None): |
| with gr.Blocks( |
| css=""" |
| .gradio-table td { |
| white-space: normal !important; |
| word-wrap: break-word !important; |
| } |
| .gradio-table { |
| width: 100% !important; /* Adjust to 100% to fit the container */ |
| table-layout: fixed !important; /* Fixed column widths */ |
| overflow-x: hidden !important; /* Disable horizontal scrolling */ |
| } |
| .gradio-container { |
| overflow-x: hidden !important; /* Disable horizontal scroll for entire container */ |
| padding: 0 !important; /* Remove any default padding */ |
| } |
| .gradio-column { |
| max-width: 100% !important; /* Ensure columns take up full width */ |
| overflow: hidden !important; /* Hide overflow to prevent horizontal scroll */ |
| } |
| .gradio-row { |
| overflow-x: hidden !important; /* Prevent horizontal scroll on rows */ |
| } |
| """) as demo: |
|
|
| |
| with gr.Group(visible=True) as login_page: |
| gr.Markdown("### **1️⃣ Login with Gmail**") |
| email_input = gr.Textbox(label="Enter your Gmail Address", placeholder="example@gmail.com") |
| login_button = gr.Button("Login") |
| login_result = gr.Textbox(label="Login Status", interactive=False, visible=False) |
| |
| with gr.Group(visible=False) as onboarding_page: |
| gr.Markdown("### **2️⃣ Tell Us About Yourself**") |
| role = gr.Textbox(label="What is your role?", placeholder="e.g. Developer, Designer") |
| industry = gr.Textbox(label="Which industry are you in?", placeholder="e.g. Software, Finance") |
| project_description = gr.Textbox(label="Describe your project", placeholder="e.g. A task management app") |
| submit_survey = gr.Button("Submit") |
|
|
| |
| with gr.Group(visible=False) as integrations_page: |
| gr.Markdown("### **3️⃣ Connect Integrations**") |
| gr.Markdown("Click on the buttons below to connect each tool:") |
|
|
| |
| todoist_button = gr.Button("Connect to Todoist") |
| todoist_result = gr.Textbox(label="Todoist Status", interactive=False, visible=False) |
| |
| evernote_button = gr.Button("Connect to Evernote") |
| evernote_result = gr.Textbox(label="Evernote Status", interactive=False, visible=False) |
| |
| calendar_button = gr.Button("Connect to Google Calendar") |
| calendar_result = gr.Textbox(label="Google Calendar Status", interactive=False, visible=False) |
|
|
| |
| skip_integrations = gr.Button("Skip ➡️") |
| next_button = gr.Button("Proceed to QR Code") |
|
|
| with gr.Group(visible=False) as qr_code_page: |
| |
| gr.Markdown("## Curify: Unified AI Tools for Productivity") |
| |
| with gr.Tab("Curify Idea"): |
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown("#### ** QR Code**") |
| |
| svg_file_path = "qr.svg" |
| |
| svg_content = load_svg_with_size(svg_file_path, width="200px", height="200px") |
| gr.HTML(svg_content) |
|
|
| |
| with gr.Column(): |
| |
| gr.Markdown("## Projects Overview") |
| project_desc_table = gr.DataFrame( |
| type="pandas" |
| ) |
|
|
| gr.Markdown("## Enter task message.") |
| idea_input = gr.Textbox( |
| label=None, |
| placeholder="Describe the task you want to execute (e.g., Research Paper Review)") |
| |
| task_btn = gr.Button("Generate Task Steps") |
| fetch_state_btn = gr.Button("Fetch Updated State") |
|
|
| with gr.Column(): |
| gr.Markdown("## Task analysis") |
| task_analysis_txt = gr.Textbox( |
| label=None, |
| placeholder="Here is the execution status of your task...") |
|
|
| gr.Markdown("## Execution status") |
| execution_status = gr.DataFrame( |
| type="pandas" |
| ) |
| gr.Markdown("## Execution output") |
| execution_results = gr.JSON( |
| label=None |
| ) |
| state_output = gr.State() |
|
|
| task_btn.click( |
| fn_process_task, |
| inputs=[project_desc_table, idea_input], |
| outputs=[task_analysis_txt, execution_status, execution_results] |
| ) |
|
|
| fetch_state_btn.click( |
| fetch_updated_state, |
| inputs=None, |
| outputs=[project_desc_table, task_analysis_txt, execution_status, execution_results] |
| ) |
|
|
| |
| login_button.click( |
| mock_login, |
| inputs=email_input, |
| outputs=[login_result, login_page, onboarding_page] |
| ) |
|
|
| |
| submit_survey.click( |
| onboarding_survey, |
| inputs=[role, industry, project_description], |
| outputs=[project_desc_table, onboarding_page, integrations_page] |
| ) |
|
|
| |
| todoist_button.click(integrate_todoist, outputs=todoist_result) |
| evernote_button.click(integrate_evernote, outputs=evernote_result) |
| calendar_button.click(integrate_calendar, outputs=calendar_result) |
|
|
| |
| skip_integrations.click( |
| lambda: (gr.update(visible=False), gr.update(visible=True)), |
| outputs=[integrations_page, qr_code_page] |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| return demo |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| demo = create_gradio_interface() |
| |
| gradio_asgi_app = demo.launch(share=False, inbrowser=False, server_name="0.0.0.0", server_port=7860, inline=False) |
|
|
| logging.debug(f"Gradio version: {gr.__version__}") |
| logging.debug(f"FastAPI version: {fastapi.__version__}") |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| @app.get("/", response_class=RedirectResponse) |
| async def index(): |
| return RedirectResponse(url="/gradio", status_code=307) |
|
|
| |
| if __name__ == "__main__": |
| |
| uvicorn.run(app, host="0.0.0.0", port=7860) |