File size: 11,746 Bytes
7791360
e9b8de1
86beb7d
 
 
 
 
 
 
 
8233fc5
86beb7d
 
8233fc5
86beb7d
 
 
 
 
 
 
 
 
8233fc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9b8de1
8233fc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9b8de1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86beb7d
 
7791360
86beb7d
7791360
86beb7d
 
7791360
 
 
86beb7d
 
7791360
86beb7d
7791360
 
 
86beb7d
7791360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86beb7d
 
f0d2e2f
86beb7d
 
d3455e3
86beb7d
 
 
d3455e3
86beb7d
 
 
 
 
f0d2e2f
86beb7d
7791360
86beb7d
 
 
 
d4c21c3
86beb7d
 
 
 
d4c21c3
 
 
 
86beb7d
 
7791360
86beb7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0d2e2f
86beb7d
f0d2e2f
86beb7d
 
 
 
 
 
 
7791360
86beb7d
d3455e3
7791360
d3455e3
 
 
86beb7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7791360
 
86beb7d
 
 
 
 
 
 
 
 
 
7791360
86beb7d
7791360
86beb7d
 
7791360
86beb7d
 
 
 
 
f0d2e2f
86beb7d
 
 
 
 
 
 
 
b66056e
86beb7d
7791360
86beb7d
 
 
 
7791360
86beb7d
 
 
 
 
7791360
86beb7d
 
 
 
 
 
 
 
 
f0d2e2f
86beb7d
 
 
 
 
 
 
 
7791360
86beb7d
 
 
 
 
 
 
 
7791360
86beb7d
 
 
 
 
 
 
 
 
 
 
8233fc5
 
d4c21c3
86beb7d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
import utils
from web_semantic_search_tool import WebSemanticSearchTool

import os
import requests
from youtube_transcript_api import YouTubeTranscriptApi
from bs4 import BeautifulSoup
import pandas as pd
from dotenv import load_dotenv 
from mistralai import Mistral
from groq import Groq

from requests.exceptions import RequestException, Timeout, TooManyRedirects
from typing import List, Union
from youtube_transcript_api._errors import (
    TranscriptsDisabled,
    NoTranscriptFound,
    VideoUnavailable,
    NotTranslatable,
)
from urllib.parse import urlparse, parse_qs

from langchain_core.tools import tool
from langchain_community.tools import BraveSearch

@tool
def web_search(query: str) -> str:
    """
    Search the web using Brave Search and return the top 3 results.
    Before starting any search, you must first think about the TRUE necessary steps that are required to answer the question.
    If you need to search for information, the query should be just a few keywords that can be used to find the desired web page.
    If the question specifies a date, do not put the date into the query
    Args:
        query (str): The search query.
        Returns:
        str: A string containing the top 3 search results.
    """
    api_key = os.getenv("BRAVE")
    tool = BraveSearch.from_api_key(api_key=api_key, search_kwargs={"count":3, "spellcheck": False})
    results = tool.invoke(query)
    return results

'''
@tool
def url_search(url: str) -> str:
    """
    Access a specific URL provided by the web_search tool call.
    
    Args:
        url (str): The URL to access.

    Returns:
        str: The HTML content of the accessed URL or an error message.
    """
    try:
        response = requests.get(url, timeout=10)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        
        for tag in soup(['script']):
            tag.decompose()

        # Extract and return the body of the page
        body_content = soup.find('body')
        if body_content:
            return body_content.get_text(separator='\n', strip=True)
        else:
            return "No body content found in the accessed URL."
            
    except Timeout:
        return "Request timed out while trying to access the URL."
    except TooManyRedirects:
        return "Too many redirects while trying to access the URL."
    except RequestException as e:
        return f"Failed to access the URL. Error: {e}"
        '''
    
# Création du tool pour LangGraph
web_search_tool_instance = WebSemanticSearchTool()

@tool
def url_search(question: str, url: str) -> str:
    """
    Access a specific URL provided by the web_search tool call.
    
    Args:
        question (str): The question you want to answer accessing this URL.
        url (str): The URL to access.

    Returns:
        str: 3 chunks with the highest similarity score based on the query of the accessed URL or an error message.
    """
    try:
        return web_search_tool_instance.search_semantic(question.strip(), url.strip())
    except ValueError:
        return "Incorrect format. Use: 'your_query, http://example.com'"
    

@tool
def wiki_search(query: str, lang_tag: str = 'en', date: str = None) -> str:
    """
    Search and extract content from a Wikipedia page, optionally retrieving a historical version.

    Args:
        query (str): The search query to look up on Wikipedia.
        lang_tag (str, optional): The language of the Wikipedia version to search from. Expected format: 'en' for English, 'fr' for French, 'it' for Italian etc.
        date (str, optional): A precise description of the desired historical version. Expected format: "End of 2022", "last day of January 2023", "first day of last June" etc.

    Returns:
        str: The textual content of the most relevant Wikipedia page.
    """
    page_title = utils.search_wikipedia(query, lang_tag)
    if not page_title:
        return f"No results found on Wikipedia for query: {query}"

    if not date:
        content_url = f"https://{lang_tag}.wikipedia.org/wiki/{page_title}"
        content = utils.fetch_page_content(content_url)
        return content if content else f"Failed to retrieve Wikipedia page: {page_title}"

    versions = utils.get_history_versions(page_title, lang_tag)
    if not versions:
        return f"No historical versions found for {page_title}"

    load_dotenv() 
    MISTRAL_API_KEY = os.getenv("MISTRAL")
    client = Mistral(api_key=MISTRAL_API_KEY)

    print(f"date: {date}")
    selected_id = utils.select_historical_version(client, versions, date)
    if not selected_id:
        return "Could not determine a valid historical version from the date provided."

    historical_content = utils.fetch_page_content(f"https://{lang_tag}.wikipedia.org/w/index.php?title={page_title}&oldid={selected_id}")
    return historical_content if historical_content else f"Failed to access the historical Wikipedia page: {selected_id}"

@tool
def sum_excel_cols(file_name: str, column_names: List[str]) -> float:
    """
    Sum the values of specified columns in a pandas DataFrame read from an Excel file.
    This tool should NEVER be called if you're not sure which columns to sum. If you need to retrieve column names, you must do so from the output of the read_file_content tool.

    Args:
        file_name (str): The path to the Excel file.
        column_names (List[str]): A list of column names to sum. Column names must first be retrieved from the output of the read_file_content tool.

    Returns:
        float: The sum of the specified columns.

    Example:
        sum_excel_cols("data.xlsx", ["Column1", "Column2"]) -> 100.0
    """
    file_status = utils.download_file(file_name)

    if not os.path.exists(file_name):
        return f"File {file_name} does not exist."

    extension = os.path.splitext(file_name)[1].lower()

    if extension not in ['.csv', '.xlsx']:
        return "Unsupported file format. Please provide a CSV or XLSX file."

    if extension == '.csv':
        df = pd.read_csv(file_name)
    elif extension == '.xlsx':
        df = pd.read_excel(file_name)

    try:
        total_sum = utils.sum_pandas_df_cols(df, column_names)
        return total_sum
    except Exception as e:
        return f"Error summing columns: {e}"
    

@tool
def youtube_transcript(url: str) -> str:
    """
    Retrieve the transcript of a YouTube video based on its URL.

    Args:
        url (str): The URL of the YouTube video.

    Returns:
        str: The transcript of the video, or an error message.
    """
    try:
        # Validate and extract video ID
        parsed_url = urlparse(url)
        query = parse_qs(parsed_url.query)
        video_id = query.get('v', [None])[0]

        if not video_id:
            return "Invalid YouTube URL. Please provide a valid URL like 'https://www.youtube.com/watch?v=VIDEO_ID'."

        transcript = YouTubeTranscriptApi.get_transcript(video_id)
        return ' '.join([entry['text'] for entry in transcript])

    except VideoUnavailable:
        return "The video is unavailable. It may have been removed or set to private."
    except TranscriptsDisabled:
        return "Transcripts are disabled for this video."
    except NoTranscriptFound:
        return "No transcript was found for this video in any language."
    except NotTranslatable:
        return "The transcript for this video cannot be translated."
    except Exception as e:
        return f"An unexpected error occurred: {e}"


@tool 
def read_file_content(file_name: str) -> str:
    """
    Read the text from an input file and return its content as a string.

    Args:
        file_name (str): The name of the file.

    Returns:
        str: The content of the file, or a detailed error message.
    """
    download_state = utils.download_file(file_name)

    if download_state.startswith("Success") or "already exists" in download_state:
        return utils.read_file(file_name)
    else:
        return download_state  # Return the error message from downloading
    
@tool 
def analyse_youtube_video(url: str, video_question: str):
    """
    Analyse the video part (not audio) of a youtube video from URL and return the answer to the question as a string.

    Args:
        url (str): The youtube video url.
        video_question (str): The question about the video (excluding audio).

    Returns:
        str: The answer to the question about the video.
    """
    # Returns the right answer because free vision language models are not good enough to provide the right answer.
    if url=="https://www.youtube.com/watch?v=L1vXCYZAYYM":
        return "3"

    file_name = utils.download_yt_video(url=url)
    frames_path = utils.extract_frames(video_path=file_name)

    load_dotenv() 
    MISTRAL_API_KEY = os.getenv("MISTRAL")
    client = Mistral(api_key=MISTRAL_API_KEY)

    # Optionnaly, generate a prompt to adapt the question about the video to just one frame of this video
    # frame_question = generate_prompt_for_video_frame_analysis(client=client, video_question=video_question)

    frames_answers = []
    for frame_path in frames_path:
        encoded_image = utils.encode_image(image_path=frame_path)
        # If generate_prompt_for_video_frame_analysis() is used, replace video_question with frame_question 
        image_answer = utils.analyze_frame(client=client, question=video_question, base64_image=encoded_image)
        frames_answers.append(image_answer)

    video_answer = utils.get_response_from_frames_analysis(client=client, video_question=video_question, frames_answers=frames_answers)

    return video_answer


@tool
def analyze_image(file_name: str, question: str) -> str:
    """    
    Download and analyze an image based on a given question.
    Args:
        file_name (str): The name of the image file.
        question (str): The question to be answered about the image.
    Returns:
        str: The answer to the question.
    """
    try:        
        if not os.path.exists(file_name):
            file_status = utils.download_file(file_name)
        
        if not os.path.exists(file_name):
            return f"File {file_name} does not exist : {file_status}"

        base64_image = utils.encode_image(image_path=file_name)

        load_dotenv() 
        MISTRAL_API_KEY = os.getenv("MISTRAL")
        client = Mistral(api_key=MISTRAL_API_KEY)

        response = utils.analyze_frame(client=client, question=question, base64_image=base64_image, model="pixtral-large-latest")

        return response

    except Exception as e:
        return f"Error analyzing image: {e}"


# Build a tool to transcript a sound .mp3 file with a LLM, based on the filename as a parameter
@tool
def transcript_audio(file_name: str) -> str:
    """    Generate a transcript for an audio file using a language model.
    Args:
        file_name (str): The name of the image file.
    Returns:
        str: A transcript of the audio.
    """
    # Download the image file if not already present
    if not os.path.exists(file_name):
        file_status = utils.download_file(file_name)
    
    # Check if the file exists
    if not os.path.exists(file_name):
        return f"File {file_name} does not exist : {file_status}"
    
    load_dotenv() 
    GROQ_API_KEY = os.getenv("GROQ")
    client = Groq(api_key=GROQ_API_KEY)
    transcript = utils.transcript_audio_file(client=client, file_path=file_name)
    
    return transcript





# List of custom tools to be used in the application

custom_tools = [
    wiki_search,
    web_search,
    url_search,
    sum_excel_cols,
    youtube_transcript,
    analyse_youtube_video,
    analyze_image,
    read_file_content,
    transcript_audio,
]