id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
01e330ed0de2-0
|
Source code for langchain.document_loaders.base
"""Abstract interface for document loader implementations."""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional
from langchain.document_loaders.blob_loaders import Blob
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
[docs]class BaseLoader(ABC):
"""Interface for loading Documents.
Implementations should implement the lazy-loading method using generators
to avoid loading all Documents into memory at once.
The `load` method will remain as is for backwards compatibility, but its
implementation should be just `list(self.lazy_load())`.
"""
# Sub-classes should implement this method
# as return list(self.lazy_load()).
# This method returns a List which is materialized in memory.
[docs] @abstractmethod
def load(self) -> List[Document]:
"""Load data into Document objects."""
[docs] def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> List[Document]:
"""Load Documents and split into chunks. Chunks are returned as Documents.
Args:
text_splitter: TextSplitter instance to use for splitting documents.
Defaults to RecursiveCharacterTextSplitter.
Returns:
List of Documents.
"""
if text_splitter is None:
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
else:
_text_splitter = text_splitter
docs = self.load()
return _text_splitter.split_documents(docs)
# Attention: This method will be upgraded into an abstractmethod once it's
# implemented in all the existing subclasses.
[docs] def lazy_load(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/base.html
|
01e330ed0de2-1
|
# implemented in all the existing subclasses.
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""A lazy loader for Documents."""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement lazy_load()"
)
[docs]class BaseBlobParser(ABC):
"""Abstract interface for blob parsers.
A blob parser provides a way to parse raw data stored in a blob into one
or more documents.
The parser can be composed with blob loaders, making it easy to re-use
a parser independent of how the blob was originally loaded.
"""
[docs] @abstractmethod
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface.
Subclasses are required to implement this method.
Args:
blob: Blob instance
Returns:
Generator of documents
"""
[docs] def parse(self, blob: Blob) -> List[Document]:
"""Eagerly parse the blob into a document or documents.
This is a convenience method for interactive development environment.
Production applications should favor the lazy_parse method instead.
Subclasses should generally not over-ride this parse method.
Args:
blob: Blob instance
Returns:
List of documents
"""
return list(self.lazy_parse(blob))
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/base.html
|
159912d219c2-0
|
Source code for langchain.document_loaders.url_selenium
"""Loader that uses Selenium to load a page, then uses unstructured to load the html.
"""
import logging
from typing import TYPE_CHECKING, List, Literal, Optional, Union
if TYPE_CHECKING:
from selenium.webdriver import Chrome, Firefox
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class SeleniumURLLoader(BaseLoader):
"""Loader that uses Selenium and to load a page and unstructured to load the html.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
browser (str): The browser to use, either 'chrome' or 'firefox'.
binary_location (Optional[str]): The location of the browser binary.
executable_path (Optional[str]): The path to the browser executable.
headless (bool): If True, the browser will run in headless mode.
arguments [List[str]]: List of arguments to pass to the browser.
"""
[docs] def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
browser: Literal["chrome", "firefox"] = "chrome",
binary_location: Optional[str] = None,
executable_path: Optional[str] = None,
headless: bool = True,
arguments: List[str] = [],
):
"""Load a list of URLs using Selenium and unstructured."""
try:
import selenium # noqa:F401
except ImportError:
raise ImportError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
159912d219c2-1
|
import selenium # noqa:F401
except ImportError:
raise ImportError(
"selenium package not found, please install it with "
"`pip install selenium`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.browser = browser
self.binary_location = binary_location
self.executable_path = executable_path
self.headless = headless
self.arguments = arguments
def _get_driver(self) -> Union["Chrome", "Firefox"]:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == "chrome":
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(executable_path=self.executable_path, options=chrome_options)
elif self.browser.lower() == "firefox":
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
firefox_options = FirefoxOptions()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
159912d219c2-2
|
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument("--headless")
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(
executable_path=self.executable_path, options=firefox_options
)
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
[docs] def load(self) -> List[Document]:
"""Load the specified URLs using Selenium and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
driver = self._get_driver()
for url in self.urls:
try:
driver.get(url)
page_content = driver.page_source
elements = partition_html(text=page_content)
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
driver.quit()
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
|
13fc9bc7b719-0
|
Source code for langchain.document_loaders.blackboard
"""Loads all documents from a blackboard course."""
import contextlib
import re
from pathlib import Path
from typing import Any, List, Optional, Tuple
from urllib.parse import unquote
from langchain.docstore.document import Document
from langchain.document_loaders.directory import DirectoryLoader
from langchain.document_loaders.pdf import PyPDFLoader
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class BlackboardLoader(WebBaseLoader):
"""Loads all documents from a Blackboard course.
This loader is not compatible with all Blackboard courses. It is only
compatible with courses that use the new Blackboard interface.
To use this loader, you must have the BbRouter cookie. You can get this
cookie by logging into the course and then copying the value of the
BbRouter cookie from the browser's developer tools.
Example:
.. code-block:: python
from langchain.document_loaders import BlackboardLoader
loader = BlackboardLoader(
blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1",
bbrouter="expires:12345...",
)
documents = loader.load()
""" # noqa: E501
base_url: str
"""Base url of the blackboard course."""
folder_path: str
"""Path to the folder containing the documents."""
load_all_recursively: bool
"""If True, load all documents recursively."""
[docs] def __init__(
self,
blackboard_course_url: str,
bbrouter: str,
load_all_recursively: bool = True,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-1
|
bbrouter: str,
load_all_recursively: bool = True,
basic_auth: Optional[Tuple[str, str]] = None,
cookies: Optional[dict] = None,
continue_on_failure: Optional[bool] = False,
):
"""Initialize with blackboard course url.
The BbRouter cookie is required for most blackboard courses.
Args:
blackboard_course_url: Blackboard course url.
bbrouter: BbRouter cookie.
load_all_recursively: If True, load all documents recursively.
basic_auth: Basic auth credentials.
cookies: Cookies.
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
Raises:
ValueError: If blackboard course url is invalid.
"""
super().__init__(blackboard_course_url)
# Get base url
try:
self.base_url = blackboard_course_url.split("/webapps/blackboard")[0]
except IndexError:
raise IndexError(
"Invalid blackboard course url. "
"Please provide a url that starts with "
"https://<blackboard_url>/webapps/blackboard"
)
if basic_auth is not None:
self.session.auth = basic_auth
# Combine cookies
if cookies is None:
cookies = {}
cookies.update({"BbRouter": bbrouter})
self.session.cookies.update(cookies)
self.load_all_recursively = load_all_recursively
self.continue_on_failure = continue_on_failure
self.check_bs4()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-2
|
self.continue_on_failure = continue_on_failure
self.check_bs4()
[docs] def check_bs4(self) -> None:
"""Check if BeautifulSoup4 is installed.
Raises:
ImportError: If BeautifulSoup4 is not installed.
"""
try:
import bs4 # noqa: F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BlackboardLoader. "
"Please install it with `pip install beautifulsoup4`."
)
[docs] def load(self) -> List[Document]:
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f"Fetching documents from {url}")
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f"Fetching documents from {self.web_path}")
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
def _get_folder_path(self, soup: Any) -> str:
"""Get the folder path to save the Documents in.
Args:
soup: BeautifulSoup4 soup object.
Returns:
Folder path.
"""
# Get the course name
course_name = soup.find("span", {"id": "crumb_1"})
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-3
|
course_name = soup.find("span", {"id": "crumb_1"})
if course_name is None:
raise ValueError("No course name found.")
course_name = course_name.text.strip()
# Prepare the folder path
course_name_clean = (
unquote(course_name)
.replace(" ", "_")
.replace("/", "_")
.replace(":", "_")
.replace(",", "_")
.replace("?", "_")
.replace("'", "_")
.replace("!", "_")
.replace('"', "_")
)
# Get the folder path
folder_path = Path(".") / course_name_clean
return str(folder_path)
def _get_documents(self, soup: Any) -> List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
def _get_attachments(self, soup: Any) -> List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
# Get content list
content_list = soup.find("ul", {"class": "contentList"})
if content_list is None:
raise ValueError("No content list found.")
content_list: BeautifulSoup # type: ignore
# Get all attachments
attachments = []
for attachment in content_list.find_all("ul", {"class": "attachments"}):
attachment: Tag # type: ignore
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-4
|
attachment: Tag # type: ignore
for link in attachment.find_all("a"):
link: Tag # type: ignore
href = link.get("href")
# Only add if href is not None and does not start with #
if href is not None and not href.startswith("#"):
attachments.append(href)
return attachments
def _download_attachments(self, attachments: List[str]) -> None:
"""Download all attachments.
Args:
attachments: List of attachments.
"""
# Make sure the folder exists
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
# Download all attachments
for attachment in attachments:
self.download(attachment)
def _load_documents(self) -> List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
# Create the document loader
loader = DirectoryLoader(
path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore
)
# Load the documents
documents = loader.load()
# Return all documents
return documents
def _get_paths(self, soup: Any) -> List[str]:
"""Get all relative paths in the navbar."""
relative_paths = []
course_menu = soup.find("ul", {"class": "courseMenu"})
if course_menu is None:
raise ValueError("No course menu found.")
for link in course_menu.find_all("a"):
href = link.get("href")
if href is not None and href.startswith("/"):
relative_paths.append(href)
return relative_paths
[docs] def download(self, path: str) -> None:
"""Download a file from an url.
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-5
|
"""Download a file from an url.
Args:
path: Path to the file.
"""
# Get the file content
response = self.session.get(self.base_url + path, allow_redirects=True)
# Get the filename
filename = self.parse_filename(response.url)
# Write the file to disk
with open(Path(self.folder_path) / filename, "wb") as f:
f.write(response.content)
[docs] def parse_filename(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == ".pdf":
return url_path.name
else:
return self._parse_filename_from_url(url)
def _parse_filename_from_url(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
Raises:
ValueError: If the filename could not be parsed.
"""
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
if filename_matches:
filename = filename_matches.group(1)
else:
raise ValueError(f"Could not parse filename from {url}")
if ".pdf" not in filename:
raise ValueError(f"Incorrect file type: {filename}")
filename = filename.split(".pdf")[0] + ".pdf"
filename = unquote(filename)
filename = filename.replace("%20", " ")
return filename
if __name__ == "__main__":
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
13fc9bc7b719-6
|
return filename
if __name__ == "__main__":
loader = BlackboardLoader(
"https://<YOUR BLACKBOARD URL"
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
"<YOUR BBROUTER COOKIE HERE>",
load_all_recursively=True,
)
documents = loader.load()
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html
|
c306242295f5-0
|
Source code for langchain.document_loaders.diffbot
"""Loader that uses Diffbot to load webpages in text format."""
import logging
from typing import Any, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class DiffbotLoader(BaseLoader):
"""Loads Diffbot file json."""
[docs] def __init__(
self, api_token: str, urls: List[str], continue_on_failure: bool = True
):
"""Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
"""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
def _diffbot_api_url(self, diffbot_api: str) -> str:
return f"https://api.diffbot.com/v3/{diffbot_api}"
def _get_diffbot_data(self, url: str) -> Any:
"""Get Diffbot file from Diffbot REST API."""
# TODO: Add support for other Diffbot APIs
diffbot_url = self._diffbot_api_url("article")
params = {
"token": self.api_token,
"url": url,
}
response = requests.get(diffbot_url, params=params, timeout=10)
# TODO: handle non-ok errors
return response.json() if response.ok else {}
[docs] def load(self) -> List[Document]:
"""Extract text from Diffbot on all the URLs and return Documents"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html
|
c306242295f5-1
|
"""Extract text from Diffbot on all the URLs and return Documents"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data["objects"][0]["text"] if "objects" in data else ""
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html
|
86c31f8fe515-0
|
Source code for langchain.document_loaders.larksuite
"""Loads LarkSuite (FeiShu) document json dump."""
import json
import urllib.request
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class LarkSuiteDocLoader(BaseLoader):
"""Loads LarkSuite (FeiShu) document."""
[docs] def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id.
Args:
domain: The domain to load the LarkSuite.
access_token: The access_token to use.
document_id: The document_id to load.
"""
self.domain = domain
self.access_token = access_token
self.document_id = document_id
def _get_larksuite_api_json_data(self, api_url: str) -> Any:
"""Get LarkSuite (FeiShu) API response json data."""
headers = {"Authorization": f"Bearer {self.access_token}"}
request = urllib.request.Request(api_url, headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load LarkSuite (FeiShu) document."""
api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents"
metadata_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}"
)
raw_content_json = self._get_larksuite_api_json_data(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/larksuite.html
|
86c31f8fe515-1
|
)
raw_content_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}/raw_content"
)
text = raw_content_json["data"]["content"]
metadata = {
"document_id": self.document_id,
"revision_id": metadata_json["data"]["document"]["revision_id"],
"title": metadata_json["data"]["document"]["title"],
}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load LarkSuite (FeiShu) document."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/larksuite.html
|
2701945504d7-0
|
Source code for langchain.document_loaders.joplin
import json
import urllib
from datetime import datetime
from typing import Iterator, List, Optional
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.utils import get_from_env
LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}"
[docs]class JoplinLoader(BaseLoader):
"""
Loader that fetches notes from Joplin.
In order to use this loader, you need to have Joplin running with the
Web Clipper enabled (look for "Web Clipper" in the app settings).
To get the access token, you need to go to the Web Clipper options and
under "Advanced Options" you will find the access token.
You can find more information about the Web Clipper service here:
https://joplinapp.org/clipper/
"""
[docs] def __init__(
self,
access_token: Optional[str] = None,
port: int = 41184,
host: str = "localhost",
) -> None:
"""
Args:
access_token: The access token to use.
port: The port where the Web Clipper service is running. Default is 41184.
host: The host where the Web Clipper service is running.
Default is localhost.
"""
access_token = access_token or get_from_env(
"access_token", "JOPLIN_ACCESS_TOKEN"
)
base_url = f"http://{host}:{port}"
self._get_note_url = (
f"{base_url}/notes?token={access_token}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
|
2701945504d7-1
|
f"{base_url}/notes?token={access_token}"
f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}"
)
self._get_folder_url = (
f"{base_url}/folders/{{id}}?token={access_token}&fields=title"
)
self._get_tag_url = (
f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title"
)
def _get_notes(self) -> Iterator[Document]:
has_more = True
page = 1
while has_more:
req_note = urllib.request.Request(self._get_note_url.format(page=page))
with urllib.request.urlopen(req_note) as response:
json_data = json.loads(response.read().decode())
for note in json_data["items"]:
metadata = {
"source": LINK_NOTE_TEMPLATE.format(id=note["id"]),
"folder": self._get_folder(note["parent_id"]),
"tags": self._get_tags(note["id"]),
"title": note["title"],
"created_time": self._convert_date(note["created_time"]),
"updated_time": self._convert_date(note["updated_time"]),
}
yield Document(page_content=note["body"], metadata=metadata)
has_more = json_data["has_more"]
page += 1
def _get_folder(self, folder_id: str) -> str:
req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id))
with urllib.request.urlopen(req_folder) as response:
json_data = json.loads(response.read().decode())
return json_data["title"]
def _get_tags(self, note_id: str) -> List[str]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
|
2701945504d7-2
|
def _get_tags(self, note_id: str) -> List[str]:
req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id))
with urllib.request.urlopen(req_tag) as response:
json_data = json.loads(response.read().decode())
return [tag["title"] for tag in json_data["items"]]
def _convert_date(self, date: int) -> str:
return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S")
[docs] def lazy_load(self) -> Iterator[Document]:
yield from self._get_notes()
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/joplin.html
|
7a7b4153010f-0
|
Source code for langchain.document_loaders.ifixit
"""Loads iFixit data."""
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.web_base import WebBaseLoader
IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0"
[docs]class IFixitLoader(BaseLoader):
"""Load iFixit repair guides, device wikis and answers.
iFixit is the largest, open repair community on the web. The site contains nearly
100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is
licensed under CC-BY.
This loader will allow you to download the text of a repair guide, text of Q&A's
and wikis from devices on iFixit using their open APIs and web scraping.
"""
[docs] def __init__(self, web_path: str):
"""Initialize with a web path."""
if not web_path.startswith("https://www.ifixit.com"):
raise ValueError("web path must start with 'https://www.ifixit.com'")
path = web_path.replace("https://www.ifixit.com", "")
allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"]
""" TODO: Add /Wiki """
if not any(path.startswith(allowed_path) for allowed_path in allowed_paths):
raise ValueError(
"web path must start with /Device, /Guide, /Teardown or /Answers"
)
pieces = [x for x in path.split("/") if x]
"""Teardowns are just guides by a different name"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
|
7a7b4153010f-1
|
"""Teardowns are just guides by a different name"""
self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide"
if self.page_type == "Guide" or self.page_type == "Answers":
self.id = pieces[2]
else:
self.id = pieces[1]
self.web_path = web_path
[docs] def load(self) -> List[Document]:
if self.page_type == "Device":
return self.load_device()
elif self.page_type == "Guide" or self.page_type == "Teardown":
return self.load_guide()
elif self.page_type == "Answers":
return self.load_questions_and_answers()
else:
raise ValueError("Unknown page type: " + self.page_type)
[docs] @staticmethod
def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]:
"""Load suggestions.
Args:
query: A query string
doc_type: The type of document to search for. Can be one of "all",
"device", "guide", "teardown", "answer", "wiki".
Returns:
"""
res = requests.get(
IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type
)
if res.status_code != 200:
raise ValueError(
'Could not load suggestions for "' + query + '"\n' + res.json()
)
data = res.json()
results = data["results"]
output = []
for result in results:
try:
loader = IFixitLoader(result["url"])
if loader.page_type == "Device":
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
|
7a7b4153010f-2
|
if loader.page_type == "Device":
output += loader.load_device(include_guides=False)
else:
output += loader.load()
except ValueError:
continue
return output
[docs] def load_questions_and_answers(
self, url_override: Optional[str] = None
) -> List[Document]:
"""Load a list of questions and answers.
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
loader = WebBaseLoader(self.web_path if url_override is None else url_override)
soup = loader.scrape()
output = []
title = soup.find("h1", "post-title").text
output.append("# " + title)
output.append(soup.select_one(".post-content .post-text").text.strip())
answersHeader = soup.find("div", "post-answers-header")
if answersHeader:
output.append("\n## " + answersHeader.text.strip())
for answer in soup.select(".js-answers-list .post.post-answer"):
if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]:
output.append("\n### Accepted Answer")
elif "post-helpful" in answer["class"]:
output.append("\n### Most Helpful Answer")
else:
output.append("\n### Other Answer")
output += [
a.text.strip() for a in answer.select(".post-content .post-text")
]
output.append("\n")
text = "\n".join(output).strip()
metadata = {"source": self.web_path, "title": title}
return [Document(page_content=text, metadata=metadata)]
[docs] def load_device(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
|
7a7b4153010f-3
|
[docs] def load_device(
self, url_override: Optional[str] = None, include_guides: bool = True
) -> List[Document]:
"""Loads a device
Args:
url_override: A URL to override the default URL.
include_guides: Whether to include guides linked to from the device.
Defaults to True.
Returns:
"""
documents = []
if url_override is None:
url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id
else:
url = url_override
res = requests.get(url)
data = res.json()
text = "\n".join(
[
data[key]
for key in ["title", "description", "contents_raw"]
if key in data
]
).strip()
metadata = {"source": self.web_path, "title": data["title"]}
documents.append(Document(page_content=text, metadata=metadata))
if include_guides:
"""Load and return documents for each guide linked to from the device"""
guide_urls = [guide["url"] for guide in data["guides"]]
for guide_url in guide_urls:
documents.append(IFixitLoader(guide_url).load()[0])
return documents
[docs] def load_guide(self, url_override: Optional[str] = None) -> List[Document]:
"""Load a guide
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
if url_override is None:
url = IFIXIT_BASE_URL + "/guides/" + self.id
else:
url = url_override
res = requests.get(url)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
|
7a7b4153010f-4
|
else:
url = url_override
res = requests.get(url)
if res.status_code != 200:
raise ValueError(
"Could not load guide: " + self.web_path + "\n" + res.json()
)
data = res.json()
doc_parts = ["# " + data["title"], data["introduction_raw"]]
doc_parts.append("\n\n###Tools Required:")
if len(data["tools"]) == 0:
doc_parts.append("\n - None")
else:
for tool in data["tools"]:
doc_parts.append("\n - " + tool["text"])
doc_parts.append("\n\n###Parts Required:")
if len(data["parts"]) == 0:
doc_parts.append("\n - None")
else:
for part in data["parts"]:
doc_parts.append("\n - " + part["text"])
for row in data["steps"]:
doc_parts.append(
"\n\n## "
+ (
row["title"]
if row["title"] != ""
else "Step {}".format(row["orderby"])
)
)
for line in row["lines"]:
doc_parts.append(line["text_raw"])
doc_parts.append(data["conclusion_raw"])
text = "\n".join(doc_parts)
metadata = {"source": self.web_path, "title": data["title"]}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
|
14dbe94e8f57-0
|
Source code for langchain.document_loaders.pubmed
from typing import Iterator, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.pubmed import PubMedAPIWrapper
[docs]class PubMedLoader(BaseLoader):
"""Loads a query result from PubMed biomedical library into a list of Documents.
Attributes:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
"""
[docs] def __init__(
self,
query: str,
load_max_docs: Optional[int] = 3,
):
"""Initialize the PubMedLoader.
Args:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
Defaults to 3.
"""
self.query = query
self.load_max_docs = load_max_docs
self._client = PubMedAPIWrapper(
top_k_results=load_max_docs,
)
[docs] def load(self) -> List[Document]:
return list(self._client.lazy_load_docs(self.query))
[docs] def lazy_load(self) -> Iterator[Document]:
for doc in self._client.lazy_load_docs(self.query):
yield doc
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pubmed.html
|
f6c47e0c3569-0
|
Source code for langchain.document_loaders.onedrive
"""Loads data from OneDrive"""
from __future__ import annotations
import logging
import os
import tempfile
from enum import Enum
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union
from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.onedrive_file import OneDriveFileLoader
if TYPE_CHECKING:
from O365 import Account
from O365.drive import Drive, Folder
SCOPES = ["offline_access", "Files.Read.All"]
logger = logging.getLogger(__name__)
class _OneDriveSettings(BaseSettings):
client_id: str = Field(..., env="O365_CLIENT_ID")
client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET")
class Config:
env_prefix = ""
case_sentive = False
env_file = ".env"
class _OneDriveTokenStorage(BaseSettings):
token_path: FilePath = Field(Path.home() / ".credentials" / "o365_token.txt")
class _FileType(str, Enum):
DOC = "doc"
DOCX = "docx"
PDF = "pdf"
class _SupportedFileTypes(BaseModel):
file_types: List[_FileType]
def fetch_mime_types(self) -> Dict[str, str]:
mime_types_mapping = {}
for file_type in self.file_types:
if file_type.value == "doc":
mime_types_mapping[file_type.value] = "application/msword"
elif file_type.value == "docx":
mime_types_mapping[
file_type.value
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
|
f6c47e0c3569-1
|
mime_types_mapping[
file_type.value
] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document" # noqa: E501
elif file_type.value == "pdf":
mime_types_mapping[file_type.value] = "application/pdf"
return mime_types_mapping
[docs]class OneDriveLoader(BaseLoader, BaseModel):
"""Loads data from OneDrive."""
settings: _OneDriveSettings = Field(default_factory=_OneDriveSettings)
""" The settings for the OneDrive API client."""
drive_id: str = Field(...)
""" The ID of the OneDrive drive to load data from."""
folder_path: Optional[str] = None
""" The path to the folder to load data from."""
object_ids: Optional[List[str]] = None
""" The IDs of the objects to load data from."""
auth_with_token: bool = False
""" Whether to authenticate with a token or not. Defaults to False."""
def _auth(self) -> Type[Account]:
"""
Authenticates the OneDrive API client using the specified
authentication method and returns the Account object.
Returns:
Type[Account]: The authenticated Account object.
"""
try:
from O365 import FileSystemTokenBackend
except ImportError:
raise ImportError(
"O365 package not found, please install it with `pip install o365`"
)
if self.auth_with_token:
token_storage = _OneDriveTokenStorage()
token_path = token_storage.token_path
token_backend = FileSystemTokenBackend(
token_path=token_path.parent, token_filename=token_path.name
)
account = Account(
credentials=(
self.settings.client_id,
self.settings.client_secret.get_secret_value(),
),
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
|
f6c47e0c3569-2
|
self.settings.client_id,
self.settings.client_secret.get_secret_value(),
),
scopes=SCOPES,
token_backend=token_backend,
**{"raise_http_errors": False},
)
else:
token_backend = FileSystemTokenBackend(
token_path=Path.home() / ".credentials"
)
account = Account(
credentials=(
self.settings.client_id,
self.settings.client_secret.get_secret_value(),
),
scopes=SCOPES,
token_backend=token_backend,
**{"raise_http_errors": False},
)
# make the auth
account.authenticate()
return account
def _get_folder_from_path(self, drive: Type[Drive]) -> Union[Folder, Drive]:
"""
Returns the folder or drive object located at the
specified path relative to the given drive.
Args:
drive (Type[Drive]): The root drive from which the folder path is relative.
Returns:
Union[Folder, Drive]: The folder or drive object
located at the specified path.
Raises:
FileNotFoundError: If the path does not exist.
"""
subfolder_drive = drive
if self.folder_path is None:
return subfolder_drive
subfolders = [f for f in self.folder_path.split("/") if f != ""]
if len(subfolders) == 0:
return subfolder_drive
items = subfolder_drive.get_items()
for subfolder in subfolders:
try:
subfolder_drive = list(filter(lambda x: subfolder in x.name, items))[0]
items = subfolder_drive.get_items()
except (IndexError, AttributeError):
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
|
f6c47e0c3569-3
|
items = subfolder_drive.get_items()
except (IndexError, AttributeError):
raise FileNotFoundError("Path {} not exist.".format(self.folder_path))
return subfolder_drive
def _load_from_folder(self, folder: Type[Folder]) -> List[Document]:
"""
Loads all supported document files from the specified folder
and returns a list of Document objects.
Args:
folder (Type[Folder]): The folder object to load the documents from.
Returns:
List[Document]: A list of Document objects representing
the loaded documents.
"""
docs = []
file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"])
file_mime_types = file_types.fetch_mime_types()
items = folder.get_items()
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
for file in items:
if file.is_file:
if file.mime_type in list(file_mime_types.values()):
loader = OneDriveFileLoader(file=file)
docs.extend(loader.load())
return docs
def _load_from_object_ids(self, drive: Type[Drive]) -> List[Document]:
"""
Loads all supported document files from the specified OneDrive
drive based on their object IDs and returns a list
of Document objects.
Args:
drive (Type[Drive]): The OneDrive drive object
to load the documents from.
Returns:
List[Document]: A list of Document objects representing
the loaded documents.
"""
docs = []
file_types = _SupportedFileTypes(file_types=["doc", "docx", "pdf"])
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
|
f6c47e0c3569-4
|
file_mime_types = file_types.fetch_mime_types()
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
for object_id in self.object_ids if self.object_ids else [""]:
file = drive.get_item(object_id)
if not file:
logger.warning(
"There isn't a file with "
f"object_id {object_id} in drive {drive}."
)
continue
if file.is_file:
if file.mime_type in list(file_mime_types.values()):
loader = OneDriveFileLoader(file=file)
docs.extend(loader.load())
return docs
[docs] def load(self) -> List[Document]:
"""
Loads all supported document files from the specified OneDrive drive
and return a list of Document objects.
Returns:
List[Document]: A list of Document objects
representing the loaded documents.
Raises:
ValueError: If the specified drive ID
does not correspond to a drive in the OneDrive storage.
"""
account = self._auth()
storage = account.storage()
drive = storage.get_drive(self.drive_id)
docs: List[Document] = []
if not drive:
raise ValueError(f"There isn't a drive with id {self.drive_id}.")
if self.folder_path:
folder = self._get_folder_from_path(drive=drive)
docs.extend(self._load_from_folder(folder=folder))
elif self.object_ids:
docs.extend(self._load_from_object_ids(drive=drive))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive.html
|
6ecaff7bdb66-0
|
Source code for langchain.document_loaders.airbyte
"""Loads local airbyte json files."""
from typing import Any, Callable, Iterator, List, Mapping, Optional
from libs.langchain.langchain.utils.utils import guard_import
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
RecordHandler = Callable[[Any, Optional[str]], Document]
[docs]class AirbyteCDKLoader(BaseLoader):
"""Loads records using an Airbyte source connector implemented using the CDK."""
[docs] def __init__(
self,
config: Mapping[str, Any],
source_class: Any,
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
from airbyte_cdk.models.airbyte_protocol import AirbyteRecordMessage
from airbyte_cdk.sources.embedded.base_integration import (
BaseEmbeddedIntegration,
)
from airbyte_cdk.sources.embedded.runner import CDKRunner
class CDKIntegration(BaseEmbeddedIntegration):
def _handle_record(
self, record: AirbyteRecordMessage, id: Optional[str]
) -> Document:
if record_handler:
return record_handler(record, id)
return Document(page_content="", metadata=record.data)
self._integration = CDKIntegration(
config=config,
runner=CDKRunner(source=source_class(), name=source_class.__name__),
)
self._stream_name = stream_name
self._state = state
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
return self._integration._load_data(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte.html
|
6ecaff7bdb66-1
|
return self._integration._load_data(
stream_name=self._stream_name, state=self._state
)
[docs]class AirbyteHubspotLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_hubspot", pip_name="airbyte-source-hubspot"
).SourceHubspot
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
[docs]class AirbyteStripeLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_stripe", pip_name="airbyte-source-stripe"
).SourceStripe
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
[docs]class AirbyteTypeformLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte.html
|
6ecaff7bdb66-2
|
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_typeform", pip_name="airbyte-source-typeform"
).SourceTypeform
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
[docs]class AirbyteZendeskSupportLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_zendesk_support", pip_name="airbyte-source-zendesk-support"
).SourceZendeskSupport
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
[docs]class AirbyteShopifyLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_shopify", pip_name="airbyte-source-shopify"
).SourceShopify
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte.html
|
6ecaff7bdb66-3
|
record_handler=record_handler,
state=state,
)
[docs]class AirbyteSalesforceLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_salesforce", pip_name="airbyte-source-salesforce"
).SourceSalesforce
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
[docs]class AirbyteGongLoader(AirbyteCDKLoader):
[docs] def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
source_class = guard_import(
"source_gong", pip_name="airbyte-source-gong"
).SourceGong
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte.html
|
2f79ebb12472-0
|
Source code for langchain.document_loaders.whatsapp_chat
import re
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]def concatenate_rows(date: str, sender: str, text: str) -> str:
"""Combine message information in a readable format ready to be used."""
return f"{sender} on {date}: {text}\n\n"
[docs]class WhatsAppChatLoader(BaseLoader):
"""Loads WhatsApp messages text file."""
[docs] def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
text_content = ""
with open(p, encoding="utf8") as f:
lines = f.readlines()
message_line_regex = r"""
\[?
(
\d{1,4}
[\/.]
\d{1,2}
[\/.]
\d{1,4}
,\s
\d{1,2}
:\d{2}
(?:
:\d{2}
)?
(?:[\s_](?:AM|PM))?
)
\]?
[\s-]*
([~\w\s]+)
[:]+
\s
(.+)
"""
ignore_lines = ["This message was deleted", "<Media omitted>"]
for line in lines:
result = re.match(
message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE
)
if result:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
|
2f79ebb12472-1
|
)
if result:
date, sender, text = result.groups()
if text not in ignore_lines:
text_content += concatenate_rows(date, sender, text)
metadata = {"source": str(p)}
return [Document(page_content=text_content, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
|
050ca86ec9c9-0
|
Source code for langchain.document_loaders.docugami
"""Loads processed documents from Docugami."""
import io
import logging
import os
import re
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
TD_NAME = "{http://www.w3.org/1999/xhtml}td"
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
DOCUMENT_ID_KEY = "id"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
[docs]class DocugamiLoader(BaseLoader, BaseModel):
"""Loads processed docs from Docugami.
To use, you should have the ``lxml`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
"""The Docugami API endpoint to use."""
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
"""The Docugami API access token to use."""
docset_id: Optional[str]
"""The Docugami API docset ID to use."""
document_ids: Optional[Sequence[str]]
"""The Docugami API document IDs to use."""
file_paths: Optional[Sequence[Union[Path, str]]]
"""The local file paths to use."""
min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-1
|
"""The minimum chunk size to use when parsing DGML. Defaults to 32."""
@root_validator
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either local file paths are given, or remote API docset ID.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
# helpers
def _xpath_qname_for_chunk(chunk: Any) -> str:
"""Get the xpath qname for a chunk."""
qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}"
parent = chunk.getparent()
if parent is not None:
doppelgangers = [x for x in parent if x.tag == chunk.tag]
if len(doppelgangers) > 1:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-2
|
if len(doppelgangers) > 1:
idx_of_self = doppelgangers.index(chunk)
qname = f"{qname}[{idx_of_self + 1}]"
return qname
def _xpath_for_chunk(chunk: Any) -> str:
"""Get the xpath for a chunk."""
ancestor_chain = chunk.xpath("ancestor-or-self::*")
return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain)
def _structure_value(node: Any) -> str:
"""Get the structure value for a node."""
structure = (
"table"
if node.tag == TABLE_NAME
else node.attrib["structure"]
if "structure" in node.attrib
else None
)
return structure
def _is_structural(node: Any) -> bool:
"""Check if a node is structural."""
return _structure_value(node) is not None
def _is_heading(node: Any) -> bool:
"""Check if a node is a heading."""
structure = _structure_value(node)
return structure is not None and structure.lower().startswith("h")
def _get_text(node: Any) -> str:
"""Get the text of a node."""
return " ".join(node.itertext()).strip()
def _has_structural_descendant(node: Any) -> bool:
"""Check if a node has a structural descendant."""
for child in node:
if _is_structural(child) or _has_structural_descendant(child):
return True
return False
def _leaf_structural_nodes(node: Any) -> List:
"""Get the leaf structural nodes of a node."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-3
|
"""Get the leaf structural nodes of a node."""
if _is_structural(node) and not _has_structural_descendant(node):
return [node]
else:
leaf_nodes = []
for child in node:
leaf_nodes.extend(_leaf_structural_nodes(child))
return leaf_nodes
def _create_doc(node: Any, text: str) -> Document:
"""Create a Document from a node and text."""
metadata = {
XPATH_KEY: _xpath_for_chunk(node),
DOCUMENT_ID_KEY: document["id"],
DOCUMENT_NAME_KEY: document["name"],
STRUCTURE_KEY: node.attrib.get("structure", ""),
TAG_KEY: re.sub(r"\{.*\}", "", node.tag),
}
if doc_metadata:
metadata.update(doc_metadata)
return Document(
page_content=text,
metadata=metadata,
)
# parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
chunks: List[Document] = []
prev_small_chunk_text = None
for node in _leaf_structural_nodes(root):
text = _get_text(node)
if prev_small_chunk_text:
text = prev_small_chunk_text + " " + text
prev_small_chunk_text = None
if _is_heading(node) or len(text) < self.min_chunk_size:
# Save headings or other small chunks to be appended to the next chunk
prev_small_chunk_text = text
else:
chunks.append(_create_doc(node, text))
if prev_small_chunk_text and len(chunks) > 0:
# small chunk at the end left over, just append to last chunk
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-4
|
# small chunk at the end left over, just append to last chunk
chunks[-1].page_content += " " + prev_small_chunk_text
return chunks
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-5
|
"""Gets project metadata for all files"""
project_id = project.get("id")
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
per_file_metadata = {}
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == "report-values.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc["id"]
metadata: Dict = {}
# the evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//pr:Entry", namespaces=ns)
for entry in entries:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-6
|
for entry in entries:
heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./pr:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None
) -> List[Document]:
"""Load chunks for a document."""
document_id = document["id"]
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(document, response.content, doc_metadata)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
[docs] def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d["id"] in self.document_ids
]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
050ca86ec9c9-7
|
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata = {}
if _project_details:
# if there are any projects for this docset, load project metadata
for project in _project_details:
metadata = self._metadata_for_project(project)
combined_project_metadata.update(metadata)
for doc in _document_details:
doc_metadata = combined_project_metadata.get(doc["id"])
chunks += self._load_chunks_for_document(
self.docset_id, doc, doc_metadata
)
elif self.file_paths:
# local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
{
DOCUMENT_ID_KEY: path.name,
DOCUMENT_NAME_KEY: path.name,
},
file.read(),
)
return chunks
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
|
321e9965eec1-0
|
Source code for langchain.document_loaders.web_base
"""Web base loader class."""
import asyncio
import logging
import warnings
from typing import Any, Dict, Iterator, List, Optional, Union
import aiohttp
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", "No description found.")
if html := soup.find("html"):
metadata["language"] = html.get("lang", "No language found.")
return metadata
[docs]class WebBaseLoader(BaseLoader):
"""Loader that uses urllib and beautiful soup to load webpages."""
web_paths: List[str]
requests_per_second: int = 2
"""Max number of concurrent requests to make."""
default_parser: str = "html.parser"
"""Default parser to use for BeautifulSoup."""
requests_kwargs: Dict[str, Any] = {}
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
321e9965eec1-1
|
requests_kwargs: Dict[str, Any] = {}
"""kwargs for requests"""
raise_for_status: bool = False
"""Raise an exception if http status code denotes an error."""
bs_get_text_kwargs: Dict[str, Any] = {}
"""kwargs for beatifulsoup4 get_text"""
[docs] def __init__(
self,
web_path: Union[str, List[str]],
header_template: Optional[dict] = None,
verify_ssl: Optional[bool] = True,
proxies: Optional[dict] = None,
continue_on_failure: Optional[bool] = False,
):
"""Initialize with webpage path."""
# TODO: Deprecate web_path in favor of web_paths, and remove this
# left like this because there are a number of loaders that expect single
# urls
if isinstance(web_path, str):
self.web_paths = [web_path]
elif isinstance(web_path, List):
self.web_paths = web_path
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"bs4 package not found, please install it with " "`pip install bs4`"
)
headers = header_template or default_header_template
if not headers.get("User-Agent"):
try:
from fake_useragent import UserAgent
headers["User-Agent"] = UserAgent().random
except ImportError:
logger.info(
"fake_useragent not found, using default user agent."
"To get a realistic header for requests, "
"`pip install fake_useragent`."
)
self.session = requests.Session()
self.session.headers = dict(headers)
self.session.verify = verify_ssl
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
321e9965eec1-2
|
self.session.headers = dict(headers)
self.session.verify = verify_ssl
self.continue_on_failure = continue_on_failure
if proxies:
self.session.proxies.update(proxies)
@property
def web_path(self) -> str:
if len(self.web_paths) > 1:
raise ValueError("Multiple webpaths found.")
return self.web_paths[0]
async def _fetch(
self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5
) -> str:
async with aiohttp.ClientSession() as session:
for i in range(retries):
try:
async with session.get(
url,
headers=self.session.headers,
ssl=None if self.session.verify else False,
) as response:
return await response.text()
except aiohttp.ClientConnectionError as e:
if i == retries - 1:
raise
else:
logger.warning(
f"Error fetching {url} with attempt "
f"{i + 1}/{retries}: {e}. Retrying..."
)
await asyncio.sleep(cooldown * backoff**i)
raise ValueError("retry count exceeded")
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
async with semaphore:
try:
return await self._fetch(url)
except Exception as e:
if self.continue_on_failure:
logger.warning(
f"Error fetching {url}, skipping due to"
f" continue_on_failure=True"
)
return ""
logger.exception(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
321e9965eec1-3
|
)
return ""
logger.exception(
f"Error fetching {url} and aborting, use continue_on_failure=True "
"to continue loading urls after encountering an error."
)
raise e
[docs] async def fetch_all(self, urls: List[str]) -> Any:
"""Fetch all urls concurrently with rate limiting."""
semaphore = asyncio.Semaphore(self.requests_per_second)
tasks = []
for url in urls:
task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore))
tasks.append(task)
try:
from tqdm.asyncio import tqdm_asyncio
return await tqdm_asyncio.gather(
*tasks, desc="Fetching pages", ascii=True, mininterval=1
)
except ImportError:
warnings.warn("For better logging of progress, `pip install tqdm`")
return await asyncio.gather(*tasks)
@staticmethod
def _check_parser(parser: str) -> None:
"""Check that parser is valid for bs4."""
valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"]
if parser not in valid_parsers:
raise ValueError(
"`parser` must be one of " + ", ".join(valid_parsers) + "."
)
[docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]:
"""Fetch all urls, then return soups for all results."""
from bs4 import BeautifulSoup
results = asyncio.run(self.fetch_all(urls))
final_results = []
for i, result in enumerate(results):
url = urls[i]
if parser is None:
if url.endswith(".xml"):
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
321e9965eec1-4
|
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
final_results.append(BeautifulSoup(result, parser))
return final_results
def _scrape(self, url: str, parser: Union[str, None] = None) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self.session.get(url, **self.requests_kwargs)
if self.raise_for_status:
html_doc.raise_for_status()
html_doc.encoding = html_doc.apparent_encoding
return BeautifulSoup(html_doc.text, parser)
[docs] def scrape(self, parser: Union[str, None] = None) -> Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser)
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load text from the url(s) in web_path."""
return list(self.lazy_load())
[docs] def aload(self) -> List[Document]:
"""Load text from the urls in web_path async into Documents."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
321e9965eec1-5
|
"""Load text from the urls in web_path async into Documents."""
results = self.scrape_all(self.web_paths)
docs = []
for i in range(len(results)):
soup = results[i]
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, self.web_paths[i])
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
|
61316a877410-0
|
Source code for langchain.document_loaders.figma
"""Loads Figma files json dump."""
import json
import urllib.request
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import stringify_dict
[docs]class FigmaFileLoader(BaseLoader):
"""Loads Figma file json."""
[docs] def __init__(self, access_token: str, ids: str, key: str):
"""Initialize with access token, ids, and key.
Args:
access_token: The access token for the Figma REST API.
ids: The ids of the Figma file.
key: The key for the Figma file
"""
self.access_token = access_token
self.ids = ids
self.key = key
def _construct_figma_api_url(self) -> str:
api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % (
self.key,
self.ids,
)
return api_url
def _get_figma_file(self) -> Any:
"""Get Figma file from Figma REST API."""
headers = {"X-Figma-Token": self.access_token}
request = urllib.request.Request(
self._construct_figma_api_url(), headers=headers
)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
[docs] def load(self) -> List[Document]:
"""Load file"""
data = self._get_figma_file()
text = stringify_dict(data)
metadata = {"source": self._construct_figma_api_url()}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/figma.html
|
a0b5a21f5622-0
|
Source code for langchain.document_loaders.azlyrics
"""Loads AZLyrics."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class AZLyricsLoader(WebBaseLoader):
"""Loads AZLyrics webpages."""
[docs] def load(self) -> List[Document]:
"""Load webpages into Documents."""
soup = self.scrape()
title = soup.title.text
lyrics = soup.find_all("div", {"class": ""})[2].text
text = title + lyrics
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azlyrics.html
|
42f0ad1bf8ed-0
|
Source code for langchain.document_loaders.csv_loader
import csv
from typing import Any, Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all documents by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
[docs] def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
):
"""
Args:
file_path: The path to the CSV file.
source_column: The name of the column in the CSV file to use as the source.
Optional. Defaults to None.
csv_args: A dictionary of arguments to pass to the csv.DictReader.
Optional. Defaults to None.
encoding: The encoding of the CSV file. Optional. Defaults to None.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
|
42f0ad1bf8ed-1
|
encoding: The encoding of the CSV file. Optional. Defaults to None.
"""
self.file_path = file_path
self.source_column = source_column
self.encoding = encoding
self.csv_args = csv_args or {}
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
docs = []
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csv_reader = csv.DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv_reader):
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
try:
source = (
row[self.source_column]
if self.source_column is not None
else self.file_path
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
metadata = {"source": source, "row": i}
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
[docs]class UnstructuredCSVLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load CSV files. Like other
Unstructured loaders, UnstructuredCSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the CSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain.document_loaders.csv_loader import UnstructuredCSVLoader
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
|
42f0ad1bf8ed-2
|
--------
from langchain.document_loaders.csv_loader import UnstructuredCSVLoader
loader = UnstructuredCSVLoader("stanley-cups.csv", mode="elements")
docs = loader.load()
"""
[docs] def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the CSV file.
mode: The mode to use when loading the CSV file.
Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.8")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv
return partition_csv(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html
|
d23f5667795f-0
|
Source code for langchain.document_loaders.helpers
"""Document loader helpers."""
import concurrent.futures
from typing import List, NamedTuple, Optional, cast
[docs]class FileEncoding(NamedTuple):
"""A file encoding as the NamedTuple."""
encoding: Optional[str]
"""The encoding of the file."""
confidence: float
"""The confidence of the encoding."""
language: Optional[str]
"""The language of the file."""
[docs]def detect_file_encodings(file_path: str, timeout: int = 5) -> List[FileEncoding]:
"""Try to detect the file encoding.
Returns a list of `FileEncoding` tuples with the detected encodings ordered
by confidence.
Args:
file_path: The path to the file to detect the encoding for.
timeout: The timeout in seconds for the encoding detection.
"""
import chardet
def read_and_detect(file_path: str) -> List[dict]:
with open(file_path, "rb") as f:
rawdata = f.read()
return cast(List[dict], chardet.detect_all(rawdata))
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(read_and_detect, file_path)
try:
encodings = future.result(timeout=timeout)
except concurrent.futures.TimeoutError:
raise TimeoutError(
f"Timeout reached while detecting encoding for {file_path}"
)
if all(encoding["encoding"] is None for encoding in encodings):
raise RuntimeError(f"Could not detect encoding for {file_path}")
return [FileEncoding(**enc) for enc in encodings if enc["encoding"] is not None]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/helpers.html
|
50d80b24e52c-0
|
Source code for langchain.document_loaders.azure_blob_storage_container
"""Loading logic for loading documents from an Azure Blob Storage container."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain.document_loaders.base import BaseLoader
[docs]class AzureBlobStorageContainerLoader(BaseLoader):
"""Loading Documents from Azure Blob Storage."""
[docs] def __init__(self, conn_str: str, container: str, prefix: str = ""):
"""Initialize with connection string, container and blob prefix."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.prefix = prefix
"""Prefix for blob names."""
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ImportError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
container = ContainerClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container
)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(
self.conn_str, self.container, blob.name # type: ignore
)
docs.extend(loader.load())
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html
|
fe8c9e6f606f-0
|
Source code for langchain.document_loaders.gcs_directory
"""Loading logic for loading documents from an GCS directory."""
from typing import Callable, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.gcs_file import GCSFileLoader
[docs]class GCSDirectoryLoader(BaseLoader):
"""Loads Documents from GCS."""
[docs] def __init__(
self,
project_name: str,
bucket: str,
prefix: str = "",
loader_func: Optional[Callable[[str], BaseLoader]] = None,
):
"""Initialize with bucket and key name.
Args:
project_name: The name of the project for the GCS bucket.
bucket: The name of the GCS bucket.
prefix: The prefix of the GCS bucket.
loader_func: A loader function that instatiates a loader based on a
file_path argument. If nothing is provided, the GCSFileLoader
would use its default loader.
"""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
self._loader_func = loader_func
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(project=self.project_name)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html
|
fe8c9e6f606f-1
|
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(
self.project_name, self.bucket, blob.name, loader_func=self._loader_func
)
docs.extend(loader.load())
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html
|
1b6c6038d0b5-0
|
Source code for langchain.document_loaders.duckdb_loader
from typing import Dict, List, Optional, cast
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DuckDBLoader(BaseLoader):
"""Loads a query result from DuckDB into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
[docs] def __init__(
self,
query: str,
database: str = ":memory:",
read_only: bool = False,
config: Optional[Dict[str, str]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
"""
Args:
query: The query to execute.
database: The database to connect to. Defaults to ":memory:".
read_only: Whether to open the database in read-only mode.
Defaults to False.
config: A dictionary of configuration options to pass to the database.
Optional.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
"""
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html
|
1b6c6038d0b5-1
|
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
[docs] def load(self) -> List[Document]:
try:
import duckdb
except ImportError:
raise ImportError(
"Could not import duckdb python package. "
"Please install it with `pip install duckdb`."
)
docs = []
with duckdb.connect(
database=self.database, read_only=self.read_only, config=self.config
) as con:
query_result = con.execute(self.query)
results = query_result.fetchall()
description = cast(list, query_result.description)
field_names = [c[0] for c in description]
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for result in results:
page_content = "\n".join(
f"{column}: {result[field_names.index(column)]}"
for column in page_content_columns
)
metadata = {
column: result[field_names.index(column)]
for column in metadata_columns
}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html
|
478cab0255f0-0
|
Source code for langchain.document_loaders.twitter
"""Twitter document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import tweepy
from tweepy import OAuth2BearerHandler, OAuthHandler
def _dependable_tweepy_import() -> tweepy:
try:
import tweepy
except ImportError:
raise ImportError(
"tweepy package not found, please install it with `pip install tweepy`"
)
return tweepy
[docs]class TwitterTweetLoader(BaseLoader):
"""Twitter tweets loader.
Read tweets of user twitter handle.
First you need to go to
`https://developer.twitter.com/en/docs/twitter-api
/getting-started/getting-access-to-the-twitter-api`
to get your token. And create a v2 version of the app.
"""
[docs] def __init__(
self,
auth_handler: Union[OAuthHandler, OAuth2BearerHandler],
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
):
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
[docs] def load(self) -> List[Document]:
"""Load tweets."""
tweepy = _dependable_tweepy_import()
api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser())
results: List[Document] = []
for username in self.twitter_users:
tweets = api.user_timeline(screen_name=username, count=self.number_tweets)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
478cab0255f0-1
|
tweets = api.user_timeline(screen_name=username, count=self.number_tweets)
user = api.get_user(screen_name=username)
docs = self._format_tweets(tweets, user)
results.extend(docs)
return results
def _format_tweets(
self, tweets: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {
"created_at": tweet["created_at"],
"user_info": user_info,
}
yield Document(
page_content=tweet["text"],
metadata=metadata,
)
[docs] @classmethod
def from_bearer_token(
cls,
oauth2_bearer_token: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from OAuth2 bearer token."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
[docs] @classmethod
def from_secrets(
cls,
access_token: str,
access_token_secret: str,
consumer_key: str,
consumer_secret: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from access tokens and secrets."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuthHandler(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
478cab0255f0-2
|
auth = tweepy.OAuthHandler(
access_token=access_token,
access_token_secret=access_token_secret,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
362517d1472f-0
|
Source code for langchain.document_loaders.epub
"""Loads EPub files."""
from typing import List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
[docs]class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Loader that uses Unstructured to load EPUB files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(
"example.epub", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-epub
"""
def _get_elements(self) -> List:
min_unstructured_version = "0.5.4"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning epub files is only supported in "
f"unstructured>={min_unstructured_version}."
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/epub.html
|
629ca59cfc20-0
|
Source code for langchain.document_loaders.dataframe
"""Load from a Dataframe object"""
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DataFrameLoader(BaseLoader):
"""Load Pandas DataFrame."""
[docs] def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object.
Args:
data_frame: Pandas DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
import pandas as pd
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}"
)
self.data_frame = data_frame
self.page_content_column = page_content_column
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from dataframe."""
for _, row in self.data_frame.iterrows():
text = row[self.page_content_column]
metadata = row.to_dict()
metadata.pop(self.page_content_column)
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load full dataframe."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html
|
f31fa817b477-0
|
Source code for langchain.document_loaders.image
"""Loads image files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredImageLoader(UnstructuredFileLoader):
"""Loader that uses Unstructured to load PNG and JPG files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredImageLoader
loader = UnstructuredImageLoader(
"example.png", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-image
"""
def _get_elements(self) -> List:
from unstructured.partition.image import partition_image
return partition_image(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image.html
|
8a7a2428ae7e-0
|
Source code for langchain.document_loaders.tomarkdown
"""Loads HTML to markdown using 2markdown."""
from __future__ import annotations
from typing import Iterator, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ToMarkdownLoader(BaseLoader):
"""Loads HTML to markdown using 2markdown."""
[docs] def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://2markdown.com/api/2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load file."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tomarkdown.html
|
9f0e51363552-0
|
Source code for langchain.document_loaders.xml
"""Loads Microsoft Excel files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredXMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load XML files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredXMLLoader
loader = UnstructuredXMLLoader(
"example.xml", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-xml
"""
[docs] def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/xml.html
|
5cf557109c7f-0
|
Source code for langchain.document_loaders.readthedocs
"""Loads ReadTheDocs documentation directory dump."""
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ReadTheDocsLoader(BaseLoader):
"""Loads ReadTheDocs documentation directory dump."""
[docs] def __init__(
self,
path: Union[str, Path],
encoding: Optional[str] = None,
errors: Optional[str] = None,
custom_html_tag: Optional[Tuple[str, dict]] = None,
**kwargs: Optional[Any]
):
"""
Initialize ReadTheDocsLoader
The loader loops over all files under `path` and extracts the actual content of
the files by retrieving main html tags. Default main html tags include
`<main id="main-content>`, <`div role="main>`, and `<article role="main">`. You
can also define your own html tags by passing custom_html_tag, e.g.
`("div", "class=main")`. The loader iterates html tags with the order of
custom html tags (if exists) and default html tags. If any of the tags is not
empty, the loop will break and retrieve the content out of that tag.
Args:
path: The location of pulled readthedocs folder.
encoding: The encoding with which to open the documents.
errors: Specify how encoding and decoding errors are to be handled—this
cannot be used in binary mode.
custom_html_tag: Optional custom html tag to retrieve the content from
files.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
5cf557109c7f-1
|
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = Path(path)
self.encoding = encoding
self.errors = errors
self.custom_html_tag = custom_html_tag
self.bs_kwargs = kwargs
[docs] def load(self) -> List[Document]:
"""Load documents."""
docs = []
for p in self.file_path.rglob("*"):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
def _clean_data(self, data: str) -> str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, **self.bs_kwargs)
# default tags
html_tags = [
("div", {"role": "main"}),
("main", {"id": "main-content"}),
]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
text = None
# reversed order. check the custom one first
for tag, attrs in html_tags[::-1]:
text = soup.find(tag, attrs)
# if found, break
if text is not None:
break
if text is not None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
5cf557109c7f-2
|
if text is not None:
break
if text is not None:
text = text.get_text()
else:
text = ""
# trim empty lines
return "\n".join([t for t in text.split("\n") if t])
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html
|
267a3c92ed94-0
|
Source code for langchain.document_loaders.modern_treasury
"""Loader that fetches data from Modern Treasury"""
import json
import urllib.request
from base64 import b64encode
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_value
MODERN_TREASURY_ENDPOINTS = {
"payment_orders": "https://app.moderntreasury.com/api/payment_orders",
"expected_payments": "https://app.moderntreasury.com/api/expected_payments",
"returns": "https://app.moderntreasury.com/api/returns",
"incoming_payment_details": "https://app.moderntreasury.com/api/\
incoming_payment_details",
"counterparties": "https://app.moderntreasury.com/api/counterparties",
"internal_accounts": "https://app.moderntreasury.com/api/internal_accounts",
"external_accounts": "https://app.moderntreasury.com/api/external_accounts",
"transactions": "https://app.moderntreasury.com/api/transactions",
"ledgers": "https://app.moderntreasury.com/api/ledgers",
"ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts",
"ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions",
"events": "https://app.moderntreasury.com/api/events",
"invoices": "https://app.moderntreasury.com/api/invoices",
}
[docs]class ModernTreasuryLoader(BaseLoader):
"""Loader that fetches data from Modern Treasury."""
[docs] def __init__(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
|
267a3c92ed94-1
|
[docs] def __init__(
self,
resource: str,
organization_id: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
"""
Args:
resource: The Modern Treasury resource to load.
organization_id: The Modern Treasury organization ID. It can also be
specified via the environment variable
"MODERN_TREASURY_ORGANIZATION_ID".
api_key: The Modern Treasury API key. It can also be specified via
the environment variable "MODERN_TREASURY_API_KEY".
"""
self.resource = resource
organization_id = organization_id or get_from_env(
"organization_id", "MODERN_TREASURY_ORGANIZATION_ID"
)
api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY")
credentials = f"{organization_id}:{api_key}".encode("utf-8")
basic_auth_token = b64encode(credentials).decode("utf-8")
self.headers = {"Authorization": f"Basic {basic_auth_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_value(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
|
267a3c92ed94-2
|
[docs] def load(self) -> List[Document]:
return self._get_resource()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
|
d864fc580dc9-0
|
Source code for langchain.document_loaders.reddit
"""Reddit document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import praw
def _dependable_praw_import() -> praw:
try:
import praw
except ImportError:
raise ImportError(
"praw package not found, please install it with `pip install praw`"
)
return praw
[docs]class RedditPostsLoader(BaseLoader):
"""Reddit posts loader.
Read posts on a subreddit.
First, you need to go to
https://www.reddit.com/prefs/apps/
and create your application
"""
[docs] def __init__(
self,
client_id: str,
client_secret: str,
user_agent: str,
search_queries: Sequence[str],
mode: str,
categories: Sequence[str] = ["new"],
number_posts: Optional[int] = 10,
):
"""
Initialize with client_id, client_secret, user_agent, search_queries, mode,
categories, number_posts.
Example: https://www.reddit.com/r/learnpython/
Args:
client_id: Reddit client id.
client_secret: Reddit client secret.
user_agent: Reddit user agent.
search_queries: The search queries.
mode: The mode.
categories: The categories. Default: ["new"]
number_posts: The number of posts. Default: 10
"""
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html
|
d864fc580dc9-1
|
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts
[docs] def load(self) -> List[Document]:
"""Load reddits."""
praw = _dependable_praw_import()
reddit = praw.Reddit(
client_id=self.client_id,
client_secret=self.client_secret,
user_agent=self.user_agent,
)
results: List[Document] = []
if self.mode == "subreddit":
for search_query in self.search_queries:
for category in self.categories:
docs = self._subreddit_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
elif self.mode == "username":
for search_query in self.search_queries:
for category in self.categories:
docs = self._user_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
else:
raise ValueError(
"mode not correct, please enter 'username' or 'subreddit' as mode"
)
return results
def _subreddit_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html
|
d864fc580dc9-2
|
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
def _user_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
user = reddit.redditor(search_query)
method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/reddit.html
|
c70f394f7ee2-0
|
Source code for langchain.document_loaders.gitbook
"""Loads GitBook."""
from typing import Any, List, Optional
from urllib.parse import urljoin, urlparse
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class GitbookLoader(WebBaseLoader):
"""Load GitBook data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
[docs] def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
continue_on_failure: Optional[bool] = False,
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page`.
content_selector: The CSS selector for the content to load.
Defaults to "main".
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
|
c70f394f7ee2-1
|
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
web_paths = f"{self.base_url}/sitemap.xml"
else:
web_paths = web_page
super().__init__(web_paths)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
self.continue_on_failure = continue_on_failure
[docs] def load(self) -> List[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
_documents = [
self._get_document(soup_info, url)
for soup_info, url in zip(soup_infos, urls)
]
else:
soup_info = self.scrape()
_documents = [self._get_document(soup_info, self.web_path)]
documents = [d for d in _documents if d]
return documents
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
|
c70f394f7ee2-2
|
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
|
6eb2b0cdcf19-0
|
Source code for langchain.document_loaders.cube_semantic
import json
import logging
import time
from typing import List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class CubeSemanticLoader(BaseLoader):
"""Load Cube semantic layer metadata.
Args:
cube_api_url: REST API endpoint.
Use the REST API of your Cube's deployment.
Please find out more information here:
https://cube.dev/docs/http-api/rest#configuration-base-path
cube_api_token: Cube API token.
Authentication tokens are generated based on your Cube's API secret.
Please find out more information here:
https://cube.dev/docs/security#generating-json-web-tokens-jwt
load_dimension_values: Whether to load dimension values for every string
dimension or not.
dimension_values_limit: Maximum number of dimension values to load.
dimension_values_max_retries: Maximum number of retries to load dimension
values.
dimension_values_retry_delay: Delay between retries to load dimension values.
"""
[docs] def __init__(
self,
cube_api_url: str,
cube_api_token: str,
load_dimension_values: bool = True,
dimension_values_limit: int = 10_000,
dimension_values_max_retries: int = 10,
dimension_values_retry_delay: int = 3,
):
self.cube_api_url = cube_api_url
self.cube_api_token = cube_api_token
self.load_dimension_values = load_dimension_values
self.dimension_values_limit = dimension_values_limit
self.dimension_values_max_retries = dimension_values_max_retries
self.dimension_values_retry_delay = dimension_values_retry_delay
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/cube_semantic.html
|
6eb2b0cdcf19-1
|
self.dimension_values_retry_delay = dimension_values_retry_delay
def _get_dimension_values(self, dimension_name: str) -> List[str]:
"""Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering.
"""
logger.info("Loading dimension values for: {dimension_name}...")
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
query = {
"query": {
"dimensions": [dimension_name],
"limit": self.dimension_values_limit,
}
}
retries = 0
while retries < self.dimension_values_max_retries:
response = requests.request(
"POST",
f"{self.cube_api_url}/load",
headers=headers,
data=json.dumps(query),
)
if response.status_code == 200:
response_data = response.json()
if (
"error" in response_data
and response_data["error"] == "Continue wait"
):
logger.info("Retrying...")
retries += 1
time.sleep(self.dimension_values_retry_delay)
continue
else:
dimension_values = [
item[dimension_name] for item in response_data["data"]
]
return dimension_values
else:
logger.error("Request failed with status code:", response.status_code)
break
if retries == self.dimension_values_max_retries:
logger.info("Maximum retries reached.")
return []
[docs] def load(self) -> List[Document]:
"""Makes a call to Cube's REST API metadata endpoint.
Returns:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/cube_semantic.html
|
6eb2b0cdcf19-2
|
"""Makes a call to Cube's REST API metadata endpoint.
Returns:
A list of documents with attributes:
- page_content=column_title + column_description
- metadata
- table_name
- column_name
- column_data_type
- column_member_type
- column_title
- column_description
- column_values
"""
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
response = requests.get(f"{self.cube_api_url}/meta", headers=headers)
response.raise_for_status()
raw_meta_json = response.json()
cubes = raw_meta_json.get("cubes", [])
docs = []
for cube in cubes:
if cube.get("type") != "view":
continue
cube_name = cube.get("name")
measures = cube.get("measures", [])
dimensions = cube.get("dimensions", [])
for item in measures + dimensions:
column_member_type = "measure" if item in measures else "dimension"
dimension_values = []
item_name = str(item.get("name"))
item_type = str(item.get("type"))
if (
self.load_dimension_values
and column_member_type == "dimension"
and item_type == "string"
):
dimension_values = self._get_dimension_values(item_name)
metadata = dict(
table_name=str(cube_name),
column_name=item_name,
column_data_type=item_type,
column_title=str(item.get("title")),
column_description=str(item.get("description")),
column_member_type=column_member_type,
column_values=dimension_values,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/cube_semantic.html
|
6eb2b0cdcf19-3
|
column_values=dimension_values,
)
page_content = f"{str(item.get('title'))}, "
page_content += f"{str(item.get('description'))}"
docs.append(Document(page_content=page_content, metadata=metadata))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/cube_semantic.html
|
29615da9cc25-0
|
Source code for langchain.document_loaders.conllu
"""Load CoNLL-U files."""
import csv
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class CoNLLULoader(BaseLoader):
"""Load CoNLL-U files."""
[docs] def __init__(self, file_path: str):
"""Initialize with a file path."""
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load from a file path."""
with open(self.file_path, encoding="utf8") as f:
tsv = list(csv.reader(f, delimiter="\t"))
# If len(line) > 1, the line is not a comment
lines = [line for line in tsv if len(line) > 1]
text = ""
for i, line in enumerate(lines):
# Do not add a space after a punctuation mark or at the end of the sentence
if line[9] == "SpaceAfter=No" or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + " "
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/conllu.html
|
d006de552f09-0
|
Source code for langchain.document_loaders.python
import tokenize
from langchain.document_loaders.text import TextLoader
[docs]class PythonLoader(TextLoader):
"""
Load Python files, respecting any non-default encoding if specified.
"""
[docs] def __init__(self, file_path: str):
"""Initialize with a file path.
Args:
file_path: The path to the file to load.
"""
with open(file_path, "rb") as f:
encoding, _ = tokenize.detect_encoding(f.readline)
super().__init__(file_path=file_path, encoding=encoding)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/python.html
|
8cf9ec13df96-0
|
Source code for langchain.document_loaders.bilibili
import json
import re
import warnings
from typing import List, Tuple
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class BiliBiliLoader(BaseLoader):
"""Loads bilibili transcripts."""
[docs] def __init__(self, video_urls: List[str]):
"""Initialize with bilibili url.
Args:
video_urls: List of bilibili urls.
"""
self.video_urls = video_urls
[docs] def load(self) -> List[Document]:
"""Load Documents from bilibili url."""
results = []
for url in self.video_urls:
transcript, video_info = self._get_bilibili_subs_and_info(url)
doc = Document(page_content=transcript, metadata=video_info)
results.append(doc)
return results
def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]:
try:
from bilibili_api import sync, video
except ImportError:
raise ImportError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
bvid = re.search(r"BV\w+", url)
if bvid is not None:
v = video.Video(bvid=bvid.group())
else:
aid = re.search(r"av[0-9]+", url)
if aid is not None:
try:
v = video.Video(aid=int(aid.group()[2:]))
except AttributeError:
raise ValueError(f"{url} is not bilibili url.")
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html
|
8cf9ec13df96-1
|
raise ValueError(f"{url} is not bilibili url.")
else:
raise ValueError(f"{url} is not bilibili url.")
video_info = sync(v.get_info())
video_info.update({"url": url})
sub = sync(v.get_subtitle(video_info["cid"]))
# Get subtitle url
sub_list = sub["subtitles"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
if not sub_url.startswith("http"):
sub_url = "https:" + sub_url
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
raw_transcript_with_meta_info = (
f"Video Title: {video_info['title']},"
f"description: {video_info['desc']}\n\n"
f"Transcript: {raw_transcript}"
)
return raw_transcript_with_meta_info, video_info
else:
raw_transcript = ""
warnings.warn(
f"""
No subtitles found for video: {url}.
Return Empty transcript.
"""
)
return raw_transcript, video_info
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html
|
74397fff01dc-0
|
Source code for langchain.document_loaders.generic
from __future__ import annotations
from pathlib import Path
from typing import Iterator, List, Literal, Optional, Sequence, Union
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
from langchain.document_loaders.parsers.registry import get_parser
from langchain.schema import Document
from langchain.text_splitter import TextSplitter
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
[docs]class GenericLoader(BaseLoader):
"""A generic document loader.
A generic document loader that allows combining an arbitrary blob loader with
a blob parser.
Examples:
.. code-block:: python
from langchain.document_loaders import GenericLoader
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
loader = GenericLoader.from_filesystem(
path="path/to/directory",
glob="**/[!.]*",
suffixes=[".pdf"],
show_progress=True,
)
docs = loader.lazy_load()
next(docs)
Example instantiations to change which files are loaded:
... code-block:: python
# Recursively load all text files in a directory.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt")
# Recursively load all non-hidden files in a directory.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*")
# Load all files in a directory without recursion.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="*")
Example instantiations to change which parser is used:
... code-block:: python
from langchain.document_loaders.parsers.pdf import PyPDFParser
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
74397fff01dc-1
|
from langchain.document_loaders.parsers.pdf import PyPDFParser
# Recursively load all text files in a directory.
loader = GenericLoader.from_filesystem(
"/path/to/dir",
glob="**/*.pdf",
parser=PyPDFParser()
)
"""
[docs] def __init__(
self,
blob_loader: BlobLoader,
blob_parser: BaseBlobParser,
) -> None:
"""A generic document loader.
Args:
blob_loader: A blob loader which knows how to yield blobs
blob_parser: A blob parser which knows how to parse blobs into documents
"""
self.blob_loader = blob_loader
self.blob_parser = blob_parser
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily. Use this when working at a large scale."""
for blob in self.blob_loader.yield_blobs():
yield from self.blob_parser.lazy_parse(blob)
[docs] def load(self) -> List[Document]:
"""Load all documents."""
return list(self.lazy_load())
[docs] def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> List[Document]:
"""Load all documents and split them into sentences."""
raise NotImplementedError(
"Loading and splitting is not yet implemented for generic loaders. "
"When they will be implemented they will be added via the initializer. "
"This method should not be used going forward."
)
[docs] @classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
74397fff01dc-2
|
*,
glob: str = "**/[!.]*",
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
) -> GenericLoader:
"""Create a generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
Returns:
A generic document loader.
"""
blob_loader = FileSystemBlobLoader(
path, glob=glob, suffixes=suffixes, show_progress=show_progress
)
if isinstance(parser, str):
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
ec9849910e08-0
|
Source code for langchain.document_loaders.url_playwright
"""Loader that uses Playwright to load a page, then uses unstructured to load the html.
"""
import logging
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class PlaywrightURLLoader(BaseLoader):
"""Loader that uses Playwright and to load a page and unstructured to load the html.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
headless (bool): If True, the browser will run in headless mode.
"""
[docs] def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
headless: bool = True,
remove_selectors: Optional[List[str]] = None,
):
"""Load a list of URLs using Playwright and unstructured."""
try:
import playwright # noqa:F401
except ImportError:
raise ImportError(
"playwright package not found, please install it with "
"`pip install playwright`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headless = headless
self.remove_selectors = remove_selectors
[docs] def load(self) -> List[Document]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_playwright.html
|
ec9849910e08-1
|
[docs] def load(self) -> List[Document]:
"""Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.sync_api import sync_playwright
from unstructured.partition.html import partition_html
docs: List[Document] = list()
with sync_playwright() as p:
browser = p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = browser.new_page()
page.goto(url)
for selector in self.remove_selectors or []:
elements = page.locator(selector).all()
for element in elements:
if element.is_visible():
element.evaluate("element => element.remove()")
page_source = page.content()
elements = partition_html(text=page_source)
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
browser.close()
return docs
[docs] async def aload(self) -> List[Document]:
"""Load the specified URLs with Playwright and create Documents asynchronously.
Use this function when in a jupyter notebook environment.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.async_api import async_playwright
from unstructured.partition.html import partition_html
docs: List[Document] = list()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_playwright.html
|
ec9849910e08-2
|
docs: List[Document] = list()
async with async_playwright() as p:
browser = await p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = await browser.new_page()
await page.goto(url)
for selector in self.remove_selectors or []:
elements = await page.locator(selector).all()
for element in elements:
if await element.is_visible():
await element.evaluate("element => element.remove()")
page_source = await page.content()
elements = partition_html(text=page_source)
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f"Error fetching or processing {url}, exception: {e}"
)
else:
raise e
await browser.close()
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_playwright.html
|
370668585036-0
|
Source code for langchain.document_loaders.directory
"""Load documents from a directory."""
import concurrent
import logging
from pathlib import Path
from typing import Any, List, Optional, Type, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.html_bs import BSHTMLLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
FILE_LOADER_TYPE = Union[
Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]
]
logger = logging.getLogger(__name__)
def _is_visible(p: Path) -> bool:
parts = p.parts
for _p in parts:
if _p.startswith("."):
return False
return True
[docs]class DirectoryLoader(BaseLoader):
"""Load documents from a directory."""
[docs] def __init__(
self,
path: str,
glob: str = "**/[!.]*",
silent_errors: bool = False,
load_hidden: bool = False,
loader_cls: FILE_LOADER_TYPE = UnstructuredFileLoader,
loader_kwargs: Union[dict, None] = None,
recursive: bool = False,
show_progress: bool = False,
use_multithreading: bool = False,
max_concurrency: int = 4,
):
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
|
370668585036-1
|
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
"""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
[docs] def load_file(
self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]
) -> None:
"""Load a file.
Args:
item: File path.
path: Directory path.
docs: List of documents to append to.
pbar: Progress bar. Defaults to None.
"""
if item.is_file():
if _is_visible(item.relative_to(path)) or self.load_hidden:
try:
logger.debug(f"Processing file: {str(item)}")
sub_docs = self.loader_cls(str(item), **self.loader_kwargs).load()
docs.extend(sub_docs)
except Exception as e:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
|
370668585036-2
|
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(f"Error loading file {str(item)}: {e}")
else:
raise e
finally:
if pbar:
pbar.update(1)
[docs] def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.path)
if not p.exists():
raise FileNotFoundError(f"Directory not found: '{self.path}'")
if not p.is_dir():
raise ValueError(f"Expected directory, got file: '{self.path}'")
docs: List[Document] = []
items = list(p.rglob(self.glob) if self.recursive else p.glob(self.glob))
pbar = None
if self.show_progress:
try:
from tqdm import tqdm
pbar = tqdm(total=len(items))
except ImportError as e:
logger.warning(
"To log the progress of DirectoryLoader you need to install tqdm, "
"`pip install tqdm`"
)
if self.silent_errors:
logger.warning(e)
else:
raise ImportError(
"To log the progress of DirectoryLoader "
"you need to install tqdm, "
"`pip install tqdm`"
)
if self.use_multithreading:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_concurrency
) as executor:
executor.map(lambda i: self.load_file(i, p, docs, pbar), items)
else:
for i in items:
self.load_file(i, p, docs, pbar)
if pbar:
pbar.close()
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/directory.html
|
316adfeb76b6-0
|
Source code for langchain.document_loaders.notebook
"""Loads .ipynb notebook files."""
import json
from pathlib import Path
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]def concatenate_cells(
cell: dict, include_outputs: bool, max_output_length: int, traceback: bool
) -> str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell["cell_type"]
source = cell["source"]
output = cell["outputs"]
if include_outputs and cell_type == "code" and output:
if "ename" in output[0].keys():
error_name = output[0]["ename"]
error_value = output[0]["evalue"]
if traceback:
traceback = output[0]["traceback"]
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f" with description '{error_value}'\n"
f"and traceback '{traceback}'\n\n"
)
else:
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f"with description '{error_value}'\n\n"
)
elif output[0]["output_type"] == "stream":
output = output[0]["text"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
|
316adfeb76b6-1
|
output = output[0]["text"]
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with "
f"output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ""
[docs]def remove_newlines(x: Any) -> Any:
"""Recursively removes newlines, no matter the data structure they are stored in."""
import pandas as pd
if isinstance(x, str):
return x.replace("\n", "")
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, pd.DataFrame):
return x.applymap(remove_newlines)
else:
return x
[docs]class NotebookLoader(BaseLoader):
"""Loads .ipynb notebook files."""
[docs] def __init__(
self,
path: str,
include_outputs: bool = False,
max_output_length: int = 10,
remove_newline: bool = False,
traceback: bool = False,
):
"""Initialize with path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.