kokluch commited on
Commit
146f00f
·
1 Parent(s): 201adcb

Add Youtube tool

Browse files
Files changed (3) hide show
  1. agent.py +5 -8
  2. requirements.txt +2 -1
  3. yt_tool.py +93 -0
agent.py CHANGED
@@ -16,8 +16,9 @@ from langgraph.graph.message import add_messages
16
  from langgraph.prebuilt import ToolNode
17
  from langgraph.prebuilt import tools_condition
18
  from mediawikiapi import MediaWikiAPI
19
- from transformers import pipeline
20
  from wikipedia_tool import WikipediaTool
 
 
21
 
22
  @tool
23
  def read_xlsx_file(file_path: str) -> str:
@@ -83,7 +84,8 @@ class Agent:
83
  TavilySearch(),
84
  read_xlsx_file,
85
  addition,
86
- multiple
 
87
  ]
88
 
89
  self.llm_with_tools = llm.bind_tools(self.tools)
@@ -198,12 +200,7 @@ class Agent:
198
  temp_dir = tempfile.gettempdir() # Get the temporary directory path
199
  audio_path = os.path.join(temp_dir, os.path.basename(filename))
200
 
201
- pipe = pipeline(
202
- task="automatic-speech-recognition",
203
- model="openai/whisper-large-v3"
204
- )
205
-
206
- result = pipe(audio_path)
207
 
208
  audio_message = HumanMessage(result["text"])
209
 
 
16
  from langgraph.prebuilt import ToolNode
17
  from langgraph.prebuilt import tools_condition
18
  from mediawikiapi import MediaWikiAPI
 
19
  from wikipedia_tool import WikipediaTool
20
+ from yt_tool import speech_recognition_pipe, yt_transcribe
21
+
22
 
23
  @tool
24
  def read_xlsx_file(file_path: str) -> str:
 
84
  TavilySearch(),
85
  read_xlsx_file,
86
  addition,
87
+ multiple,
88
+ yt_transcribe
89
  ]
90
 
91
  self.llm_with_tools = llm.bind_tools(self.tools)
 
200
  temp_dir = tempfile.gettempdir() # Get the temporary directory path
201
  audio_path = os.path.join(temp_dir, os.path.basename(filename))
202
 
203
+ result = speech_recognition_pipe(audio_path)
 
 
 
 
 
204
 
205
  audio_message = HumanMessage(result["text"])
206
 
requirements.txt CHANGED
@@ -16,4 +16,5 @@ openpyxl
16
  protobuf~=5.29.4
17
  genai~=2.1.0
18
  transformers~=4.52.4
19
- torch
 
 
16
  protobuf~=5.29.4
17
  genai~=2.1.0
18
  transformers~=4.52.4
19
+ torch
20
+ yt-dlp
yt_tool.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import yt_dlp as youtube_dl
3
+ from transformers import pipeline
4
+ from transformers.pipelines.audio_utils import ffmpeg_read
5
+ from langchain_core.tools import tool
6
+
7
+ import tempfile
8
+ import os
9
+
10
+ # credit https://huggingface.co/spaces/hf-audio/whisper-large-v3
11
+
12
+ MODEL_NAME = "openai/whisper-tiny.en"
13
+ BATCH_SIZE = 8
14
+ FILE_LIMIT_MB = 1000
15
+ YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
16
+
17
+ device = "mps" if torch.mps.is_available() else "cpu"
18
+
19
+ speech_recognition_pipe = pipeline(
20
+ task="automatic-speech-recognition",
21
+ model=MODEL_NAME,
22
+ chunk_length_s=30,
23
+ device=device,
24
+ )
25
+
26
+ def download_yt_audio(yt_url, filename):
27
+ info_loader = youtube_dl.YoutubeDL()
28
+
29
+ try:
30
+ info = info_loader.extract_info(yt_url, download=False)
31
+ except youtube_dl.utils.DownloadError as err:
32
+ raise str(err)
33
+
34
+ file_length = info["duration_string"]
35
+ file_h_m_s = file_length.split(":")
36
+ file_h_m_s = [int(sub_length) for sub_length in file_h_m_s]
37
+
38
+ if len(file_h_m_s) == 1:
39
+ file_h_m_s.insert(0, 0)
40
+ if len(file_h_m_s) == 2:
41
+ file_h_m_s.insert(0, 0)
42
+ file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2]
43
+
44
+ if file_length_s > YT_LENGTH_LIMIT_S:
45
+ yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S))
46
+ file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s))
47
+ raise f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video."
48
+
49
+ ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"}
50
+
51
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
52
+ try:
53
+ ydl.download([yt_url])
54
+ except youtube_dl.utils.ExtractorError as err:
55
+ raise str(err)
56
+
57
+ def _return_yt_html_embed(yt_url):
58
+ video_id = yt_url.split("?v=")[-1]
59
+ HTML_str = (
60
+ f'<center> <iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"> </iframe>'
61
+ " </center>"
62
+ )
63
+ return HTML_str
64
+
65
+ @tool
66
+ def yt_transcribe(yt_url, max_filesize=75.0):
67
+ """
68
+ Transcribes the audio from a given YouTube video URL.
69
+
70
+ Args:
71
+ yt_url (str): The URL of the YouTube video.
72
+ max_filesize (float, optional): The maximum allowed filesize of the video in MB.
73
+ Defaults to 75.0.
74
+
75
+ Returns:
76
+ tuple: A tuple containing:
77
+ - str: An HTML embed string for the YouTube video.
78
+ - str: The transcribed text of the video's audio.
79
+ """
80
+ html_embed_str = _return_yt_html_embed(yt_url)
81
+
82
+ with tempfile.TemporaryDirectory() as tmpdirname:
83
+ filepath = os.path.join(tmpdirname, "video.mp4")
84
+ download_yt_audio(yt_url, filepath)
85
+ with open(filepath, "rb") as f:
86
+ inputs = f.read()
87
+
88
+ inputs = ffmpeg_read(inputs, speech_recognition_pipe.feature_extractor.sampling_rate)
89
+ inputs = {"array": inputs, "sampling_rate": speech_recognition_pipe.feature_extractor.sampling_rate}
90
+
91
+ text = speech_recognition_pipe(inputs, batch_size=8, return_timestamps=True)["text"]
92
+
93
+ return html_embed_str, text