index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/rst.py | """Loads RST files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Load `RST` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRSTLoader
loader = UnstructuredRSTLoader(
"example.rst", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rst
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.7.5")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rst import partition_rst
return partition_rst(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/rtf.py | """Loads rich text files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
class UnstructuredRTFLoader(UnstructuredFileLoader):
"""Load `RTF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredRTFLoader
loader = UnstructuredRTFLoader(
"example.rtf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rtf
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
min_unstructured_version = "0.5.12"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning rtf files is only supported in "
f"unstructured>={min_unstructured_version}."
)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/browserbase.py | from typing import Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class BrowserbaseLoader(BaseLoader):
"""Load pre-rendered web pages using a headless browser hosted on Browserbase.
Depends on `browserbase` package.
Get your API key from https://browserbase.com
"""
def __init__(
self,
urls: Sequence[str],
text_content: bool = False,
api_key: Optional[str] = None,
project_id: Optional[str] = None,
session_id: Optional[str] = None,
proxy: Optional[bool] = None,
):
self.urls = urls
self.text_content = text_content
self.session_id = session_id
self.proxy = proxy
try:
from browserbase import Browserbase
except ImportError:
raise ImportError(
"You must run "
"`pip install --upgrade "
"browserbase` "
"to use the Browserbase loader."
)
self.browserbase = Browserbase(api_key, project_id)
def lazy_load(self) -> Iterator[Document]:
"""Load pages from URLs"""
pages = self.browserbase.load_urls(
self.urls, self.text_content, self.session_id, self.proxy
)
for i, page in enumerate(pages):
yield Document(
page_content=page,
metadata={
"url": self.urls[i],
},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/chatgpt.py | import datetime
import json
from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_rows(message: dict, title: str) -> str:
"""
Combine message information in a readable format ready to be used.
Args:
message: Message to be concatenated
title: Title of the conversation
Returns:
Concatenated message
"""
if not message:
return ""
sender = message["author"]["role"] if message["author"] else "unknown"
text = message["content"]["parts"][0]
date = datetime.datetime.fromtimestamp(message["create_time"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{title} - {sender} on {date}: {text}\n\n"
class ChatGPTLoader(BaseLoader):
"""Load conversations from exported `ChatGPT` data."""
def __init__(self, log_file: str, num_logs: int = -1):
"""Initialize a class object.
Args:
log_file: Path to the log file
num_logs: Number of logs to load. If 0, load all logs.
"""
self.log_file = log_file
self.num_logs = num_logs
def load(self) -> List[Document]:
with open(self.log_file, encoding="utf8") as f:
data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f)
documents = []
for d in data:
title = d["title"]
messages = d["mapping"]
text = "".join(
[
concatenate_rows(messages[key]["message"], title)
for idx, key in enumerate(messages)
if not (
idx == 0
and messages[key]["message"]["author"]["role"] == "system"
)
]
)
metadata = {"source": str(self.log_file)}
documents.append(Document(page_content=text, metadata=metadata))
return documents
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/confluence.py | import logging
from enum import Enum
from io import BytesIO
from typing import Any, Callable, Dict, Iterator, List, Optional, Union
import requests
from langchain_core.documents import Document
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ContentFormat(str, Enum):
"""Enumerator of the content formats of Confluence page."""
EDITOR = "body.editor"
EXPORT_VIEW = "body.export_view"
ANONYMOUS_EXPORT_VIEW = "body.anonymous_export_view"
STORAGE = "body.storage"
VIEW = "body.view"
def get_content(self, page: dict) -> str:
return page["body"][self.name.lower()]["value"]
class ConfluenceLoader(BaseLoader):
"""Load `Confluence` pages.
Port of https://llamahub.ai/l/confluence
This currently supports username/api_key, Oauth2 login or personal access token
authentication.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceLoader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Confluence API supports difference format of page content. The storage format is the
raw XML representation for storage. The view format is the HTML representation for
viewing with macros are rendered as though it is viewed by users. You can pass
a enum `content_format` argument to specify the content format, this is
set to `ContentFormat.STORAGE` by default, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`, `ContentFormat.STORAGE`,
and `ContentFormat.VIEW`.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain_community.document_loaders import ConfluenceLoader
loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345",
space_key="SPACE",
limit=50,
)
documents = loader.load()
# Server on perm
loader = ConfluenceLoader(
url="https://confluence.yoursite.com/",
username="me",
api_key="your_password",
cloud=False,
space_key="SPACE",
limit=50,
)
documents = loader.load()
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param token: _description_, defaults to None
:type token: str, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
*,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_restricted_content: bool = False,
include_archived_content: bool = False,
include_attachments: bool = False,
include_comments: bool = False,
content_format: ContentFormat = ContentFormat.STORAGE,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
ocr_languages: Optional[str] = None,
keep_markdown_format: bool = False,
keep_newlines: bool = False,
):
self.space_key = space_key
self.page_ids = page_ids
self.label = label
self.cql = cql
self.include_restricted_content = include_restricted_content
self.include_archived_content = include_archived_content
self.include_attachments = include_attachments
self.include_comments = include_comments
self.content_format = content_format
self.limit = limit
self.max_pages = max_pages
self.ocr_languages = ocr_languages
self.keep_markdown_format = keep_markdown_format
self.keep_newlines = keep_newlines
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(
url=url,
api_key=api_key,
username=username,
session=session,
oauth2=oauth2,
token=token,
)
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
try:
from atlassian import Confluence
except ImportError:
raise ImportError(
"`atlassian` package not found, please run "
"`pip install atlassian-python-api`"
)
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
if session:
self.confluence = Confluence(url=url, session=session, **confluence_kwargs)
elif oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
elif token:
self.confluence = Confluence(
url=url, token=token, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
@staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
session: Optional[requests.Session] = None,
oauth2: Optional[dict] = None,
token: Optional[str] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided, "
"the other must be as well."
)
non_null_creds = list(
x is not None for x in ((api_key or username), session, oauth2, token)
)
if sum(non_null_creds) > 1:
all_names = ("(api_key, username)", "session", "oauth2", "token")
provided = tuple(n for x, n in zip(non_null_creds, all_names) if x)
errors.append(
f"Cannot provide a value for more than one of: {all_names}. Received "
f"values for: {provided}"
)
if (
oauth2
and set(oauth2.keys())
== {
"token",
"client_id",
}
and set(oauth2["token"].keys())
!= {
"access_token",
"token_type",
}
):
# OAuth2 token authentication
errors.append(
"You have either omitted require keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['client_id', 'token': ['access_token', 'token_type']]`"
)
if (
oauth2
and set(oauth2.keys())
!= {
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
}
and set(oauth2.keys())
!= {
"token",
"client_id",
}
):
errors.append(
"You have either omitted required keys or added extra "
"keys to the oauth2 dictionary. key values should be "
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']` "
"or `['client_id', 'token': ['access_token', 'token_type']]`"
)
return errors or None
def _resolve_param(self, param_name: str, kwargs: Any) -> Any:
return kwargs[param_name] if param_name in kwargs else getattr(self, param_name)
def _lazy_load(self, **kwargs: Any) -> Iterator[Document]:
if kwargs:
logger.warning(
f"Received runtime arguments {kwargs}. Passing runtime args to `load`"
f" is deprecated. Please pass arguments during initialization instead."
)
space_key = self._resolve_param("space_key", kwargs)
page_ids = self._resolve_param("page_ids", kwargs)
label = self._resolve_param("label", kwargs)
cql = self._resolve_param("cql", kwargs)
include_restricted_content = self._resolve_param(
"include_restricted_content", kwargs
)
include_archived_content = self._resolve_param(
"include_archived_content", kwargs
)
include_attachments = self._resolve_param("include_attachments", kwargs)
include_comments = self._resolve_param("include_comments", kwargs)
content_format = self._resolve_param("content_format", kwargs)
limit = self._resolve_param("limit", kwargs)
max_pages = self._resolve_param("max_pages", kwargs)
ocr_languages = self._resolve_param("ocr_languages", kwargs)
keep_markdown_format = self._resolve_param("keep_markdown_format", kwargs)
keep_newlines = self._resolve_param("keep_newlines", kwargs)
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`, "
"`label`, `cql` parameters."
)
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
status="any" if include_archived_content else "current",
expand=f"{content_format.value},version",
)
yield from self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
max_pages=max_pages,
)
ids_by_label = [page["id"] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(
self._search_content_by_cql,
cql=cql,
limit=limit,
max_pages=max_pages,
include_archived_spaces=include_archived_content,
expand=f"{content_format.value},version",
)
yield from self.process_pages(
pages,
include_restricted_content,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
keep_newlines=keep_newlines,
)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(
page_id=page_id, expand=f"{content_format.value},version"
)
if not include_restricted_content and not self.is_public_page(page):
continue
yield self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages,
keep_markdown_format,
)
def load(self, **kwargs: Any) -> List[Document]:
return list(self._lazy_load(**kwargs))
def lazy_load(self) -> Iterator[Document]:
yield from self._lazy_load()
def _search_content_by_cql(
self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any
) -> List[dict]:
url = "rest/api/content/search"
params: Dict[str, Any] = {"cql": cql}
params.update(kwargs)
if include_archived_spaces is not None:
params["includeArchivedSpaces"] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get("results", [])
def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the result key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
def is_public_page(self, page: dict) -> bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page["id"])
return (
page["status"] == "current"
and not restrictions["read"]["restrictions"]["user"]["results"]
and not restrictions["read"]["restrictions"]["group"]["results"]
)
def process_pages(
self,
pages: List[dict],
include_restricted_content: bool,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> Iterator[Document]:
"""Process a list of pages into a list of documents."""
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
yield self.process_page(
page,
include_attachments,
include_comments,
content_format,
ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format,
keep_newlines=keep_newlines,
)
def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
content_format: ContentFormat,
ocr_languages: Optional[str] = None,
keep_markdown_format: Optional[bool] = False,
keep_newlines: bool = False,
) -> Document:
if keep_markdown_format:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
"`markdownify` package not found, please run "
"`pip install markdownify`"
)
if include_comments or not keep_markdown_format:
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run "
"`pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
if keep_markdown_format:
# Use markdownify to keep the page Markdown style
text = markdownify(content, heading_style="ATX") + "".join(attachment_texts)
else:
if keep_newlines:
text = BeautifulSoup(
content.replace("</p>", "\n</p>").replace("<br />", "\n"), "lxml"
).get_text(" ") + "".join(attachment_texts)
else:
text = BeautifulSoup(content, "lxml").get_text(
" ", strip=True
) + "".join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text(
" ", strip=True
)
for comment in comments
]
text = text + "".join(comment_texts)
metadata = {
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
}
if "version" in page and "when" in page["version"]:
metadata["when"] = page["version"]["when"]
return Document(
page_content=text,
metadata=metadata,
)
def process_attachment(
self,
page_id: str,
ocr_languages: Optional[str] = None,
) -> List[str]:
try:
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`Pillow` package not found, " "please run `pip install Pillow`"
)
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
try:
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url, ocr_languages)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url, ocr_languages)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f"Attachment not found at {absolute_url}") # noqa: T201
continue
else:
raise
return texts
def process_pdf(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract
from pdf2image import convert_from_bytes
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found, "
"please run `pip install pytesseract pdf2image`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image, lang=ocr_languages)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
def process_image(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract
from PIL import Image
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found, "
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
def process_doc(self, link: str) -> str:
try:
import docx2txt
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
def process_xls(self, link: str) -> str:
import io
import os
try:
import xlrd
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
try:
import pandas as pd
except ImportError:
raise ImportError(
"`pandas` package not found, please run `pip install pandas`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
filename = os.path.basename(link)
# Getting the whole content of the url after filename,
# Example: ".csv?version=2&modificationDate=1631800010678&cacheVersion=1&api=v2"
file_extension = os.path.splitext(filename)[1]
if file_extension.startswith(
".csv"
): # if the extension found in the url is ".csv"
content_string = response.content.decode("utf-8")
df = pd.read_csv(io.StringIO(content_string))
text += df.to_string(index=False, header=False) + "\n\n"
else:
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n"
text += "\n"
return text
def process_svg(
self,
link: str,
ocr_languages: Optional[str] = None,
) -> str:
try:
import pytesseract
from PIL import Image
from reportlab.graphics import renderPM
from svglib.svglib import svg2rlg
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, "
"please run `pip install pytesseract Pillow reportlab svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image, lang=ocr_languages)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/pubmed.py | from typing import Iterator, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubMedLoader(BaseLoader):
"""Load from the `PubMed` biomedical library.
Attributes:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
"""
def __init__(
self,
query: str,
load_max_docs: Optional[int] = 3,
):
"""Initialize the PubMedLoader.
Args:
query: The query to be passed to the PubMed API.
load_max_docs: The maximum number of documents to load.
Defaults to 3.
"""
self.query = query
self.load_max_docs = load_max_docs
self._client = PubMedAPIWrapper( # type: ignore[call-arg]
top_k_results=load_max_docs, # type: ignore[arg-type]
)
def lazy_load(self) -> Iterator[Document]:
for doc in self._client.lazy_load_docs(self.query):
yield doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/rocksetdb.py | from typing import Any, Callable, Iterator, List, Optional, Tuple
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def default_joiner(docs: List[Tuple[str, Any]]) -> str:
"""Default joiner for content columns."""
return "\n".join([doc[1] for doc in docs])
class ColumnNotFoundError(Exception):
"""Column not found error."""
def __init__(self, missing_key: str, query: str):
super().__init__(f'Column "{missing_key}" not selected in query:\n{query}')
class RocksetLoader(BaseLoader):
"""Load from a `Rockset` database.
To use, you should have the `rockset` python package installed.
Example:
.. code-block:: python
# This code will load 3 records from the "langchain_demo"
# collection as Documents, with the `text` column used as
# the content
from langchain_community.document_loaders import RocksetLoader
from rockset import RocksetClient, Regions, models
loader = RocksetLoader(
RocksetClient(Regions.usw2a1, "<api key>"),
models.QueryRequestSql(
query="select * from langchain_demo limit 3"
),
["text"]
)
)
"""
def __init__(
self,
client: Any,
query: Any,
content_keys: List[str],
metadata_keys: Optional[List[str]] = None,
content_columns_joiner: Callable[[List[Tuple[str, Any]]], str] = default_joiner,
):
"""Initialize with Rockset client.
Args:
client: Rockset client object.
query: Rockset query object.
content_keys: The collection columns to be written into the `page_content`
of the Documents.
metadata_keys: The collection columns to be written into the `metadata` of
the Documents. By default, this is all the keys in the document.
content_columns_joiner: Method that joins content_keys and its values into a
string. It's method that takes in a List[Tuple[str, Any]]],
representing a list of tuples of (column name, column value).
By default, this is a method that joins each column value with a new
line. This method is only relevant if there are multiple content_keys.
"""
try:
from rockset import QueryPaginator, RocksetClient
from rockset.models import QueryRequestSql
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
if not isinstance(client, RocksetClient):
raise ValueError(
f"client should be an instance of rockset.RocksetClient, "
f"got {type(client)}"
)
if not isinstance(query, QueryRequestSql):
raise ValueError(
f"query should be an instance of rockset.model.QueryRequestSql, "
f"got {type(query)}"
)
self.client = client
self.query = query
self.content_keys = content_keys
self.content_columns_joiner = content_columns_joiner
self.metadata_keys = metadata_keys
self.paginator = QueryPaginator
self.request_model = QueryRequestSql
try:
self.client.set_application("langchain")
except AttributeError:
# ignore
pass
def lazy_load(self) -> Iterator[Document]:
query_results = self.client.Queries.query(
sql=self.query
).results # execute the SQL query
for doc in query_results: # for each doc in the response
try:
yield Document(
page_content=self.content_columns_joiner(
[(col, doc[col]) for col in self.content_keys]
),
metadata={col: doc[col] for col in self.metadata_keys}
if self.metadata_keys is not None
else doc,
) # try to yield the Document
except (
KeyError
) as e: # either content_columns or metadata_columns is invalid
raise ColumnNotFoundError(
e.args[0], self.query
) # raise that the column isn't in the db schema
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/hugging_face_dataset.py | import json
from typing import Iterator, Mapping, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class HuggingFaceDatasetLoader(BaseLoader):
"""Load from `Hugging Face Hub` datasets."""
def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=self.parse_obj(row.pop(self.page_content_column)),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
def parse_obj(self, page_content: Union[str, object]) -> str:
if isinstance(page_content, object):
return json.dumps(page_content)
return page_content
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/airtable.py | from typing import Any, Iterator
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
class AirtableLoader(BaseLoader):
"""Load the `Airtable` tables."""
def __init__(
self, api_token: str, table_id: str, base_id: str, **kwargs: Any
) -> None:
"""Initialize with API token and the IDs for table and base.
Args:
api_token: Airtable API token.
table_id: Airtable table ID.
base_id:
kwargs: Additional parameters to pass to Table.all(). Refer to the
pyairtable documentation for available options:
https://pyairtable.readthedocs.io/en/latest/api.html#pyairtable.Table.all
""" # noqa: E501
self.api_token = api_token
self.table_id = table_id
self.base_id = base_id
self.kwargs = kwargs
def lazy_load(self) -> Iterator[Document]:
"""Lazy load Documents from table."""
from pyairtable import Table
table = Table(self.api_token, self.base_id, self.table_id)
records = table.all(**self.kwargs)
for record in records:
metadata = {
"source": self.base_id + "_" + self.table_id,
"base_id": self.base_id,
"table_id": self.table_id,
}
if "view" in self.kwargs:
metadata["view"] = self.kwargs["view"]
# Need to convert record from dict to str
yield Document(page_content=str(record), metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/imsdb.py | from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class IMSDbLoader(WebBaseLoader):
"""Load `IMSDb` webpages."""
def load(self) -> List[Document]:
"""Load webpage."""
soup = self.scrape()
text = soup.select_one("td[class='scrtext']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/gutenberg.py | from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class GutenbergLoader(BaseLoader):
"""Load from `Gutenberg.org`."""
def __init__(self, file_path: str):
"""Initialize with a file path."""
if not file_path.startswith("https://www.gutenberg.org"):
raise ValueError("file path must start with 'https://www.gutenberg.org'")
if not file_path.endswith(".txt"):
raise ValueError("file path must end with '.txt'")
self.file_path = file_path
def load(self) -> List[Document]:
"""Load file."""
from urllib.request import urlopen
elements = urlopen(self.file_path)
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/surrealdb.py | import asyncio
import json
import logging
from typing import Any, Dict, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class SurrealDBLoader(BaseLoader):
"""Load SurrealDB documents."""
def __init__(
self,
filter_criteria: Optional[Dict] = None,
**kwargs: Any,
) -> None:
try:
from surrealdb import Surreal
except ImportError as e:
raise ImportError(
"""Cannot import from surrealdb.
please install with `pip install surrealdb`."""
) from e
self.dburl = kwargs.pop("dburl", "ws://localhost:8000/rpc")
if self.dburl[0:2] == "ws":
self.sdb = Surreal(self.dburl)
else:
raise ValueError("Only websocket connections are supported at this time.")
self.filter_criteria = filter_criteria or {}
if "table" in self.filter_criteria:
raise ValueError(
"key `table` is not a valid criteria for `filter_criteria` argument."
)
self.ns = kwargs.pop("ns", "langchain")
self.db = kwargs.pop("db", "database")
self.table = kwargs.pop("table", "documents")
self.sdb = Surreal(self.dburl)
self.kwargs = kwargs
async def initialize(self) -> None:
"""
Initialize connection to surrealdb database
and authenticate if credentials are provided
"""
await self.sdb.connect()
if "db_user" in self.kwargs and "db_pass" in self.kwargs:
user = self.kwargs.get("db_user")
password = self.kwargs.get("db_pass")
await self.sdb.signin({"user": user, "pass": password})
await self.sdb.use(self.ns, self.db)
def load(self) -> List[Document]:
async def _load() -> List[Document]:
await self.initialize()
return await self.aload()
return asyncio.run(_load())
async def aload(self) -> List[Document]:
"""Load data into Document objects."""
query = "SELECT * FROM type::table($table)"
if self.filter_criteria is not None and len(self.filter_criteria) > 0:
query += " WHERE "
for idx, key in enumerate(self.filter_criteria):
query += f""" {"AND" if idx > 0 else ""} {key} = ${key}"""
metadata = {
"ns": self.ns,
"db": self.db,
"table": self.table,
}
results = await self.sdb.query(
query, {"table": self.table, **self.filter_criteria}
)
return [
(
Document(
page_content=json.dumps(result),
metadata={"id": result["id"], **result["metadata"], **metadata},
)
)
for result in results[0]["result"]
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/youtube.py | """Loads YouTube transcript."""
from __future__ import annotations
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Generator, List, Optional, Sequence, Union
from urllib.parse import parse_qs, urlparse
from xml.etree.ElementTree import ParseError # OK: trusted-source
from langchain_core.documents import Document
from pydantic import model_validator
from pydantic.dataclasses import dataclass
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
@dataclass
class GoogleApiClient:
"""Generic Google API Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
*Security Note*: Note that parsing of the transcripts relies on the standard
xml library but the input is viewed as trusted in this case.
Example:
.. code-block:: python
from langchain_community.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@model_validator(mode="before")
@classmethod
def validate_channel_or_videoIds_is_set(cls, values: Dict[str, Any]) -> Any:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
ALLOWED_SCHEMES = {"http", "https"}
ALLOWED_NETLOCS = {
"youtu.be",
"m.youtube.com",
"youtube.com",
"www.youtube.com",
"www.youtube-nocookie.com",
"vid.plus",
}
def _parse_video_id(url: str) -> Optional[str]:
"""Parse a YouTube URL and return the video ID if valid, otherwise None."""
parsed_url = urlparse(url)
if parsed_url.scheme not in ALLOWED_SCHEMES:
return None
if parsed_url.netloc not in ALLOWED_NETLOCS:
return None
path = parsed_url.path
if path.endswith("/watch"):
query = parsed_url.query
parsed_query = parse_qs(query)
if "v" in parsed_query:
ids = parsed_query["v"]
video_id = ids if isinstance(ids, str) else ids[0]
else:
return None
else:
path = parsed_url.path.lstrip("/")
video_id = path.split("/")[-1]
if len(video_id) != 11: # Video IDs are 11 characters long
return None
return video_id
class TranscriptFormat(Enum):
"""Output formats of transcripts from `YoutubeLoader`."""
TEXT = "text"
LINES = "lines"
CHUNKS = "chunks"
class YoutubeLoader(BaseLoader):
"""Load `YouTube` video transcripts."""
def __init__(
self,
video_id: str,
add_video_info: bool = False,
language: Union[str, Sequence[str]] = "en",
translation: Optional[str] = None,
transcript_format: TranscriptFormat = TranscriptFormat.TEXT,
continue_on_failure: bool = False,
chunk_size_seconds: int = 120,
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self._metadata = {"source": video_id}
self.add_video_info = add_video_info
self.language = language
if isinstance(language, str):
self.language = [language]
else:
self.language = language
self.translation = translation
self.transcript_format = transcript_format
self.continue_on_failure = continue_on_failure
self.chunk_size_seconds = chunk_size_seconds
@staticmethod
def extract_video_id(youtube_url: str) -> str:
"""Extract video ID from common YouTube URLs."""
video_id = _parse_video_id(youtube_url)
if not video_id:
raise ValueError(
f'Could not determine the video ID for the URL "{youtube_url}".'
)
return video_id
@classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given a YouTube URL, construct a loader.
See `YoutubeLoader()` constructor for a list of keyword arguments.
"""
video_id = cls.extract_video_id(youtube_url)
return cls(video_id, **kwargs)
def _make_chunk_document(
self, chunk_pieces: List[Dict], chunk_start_seconds: int
) -> Document:
"""Create Document from chunk of transcript pieces."""
m, s = divmod(chunk_start_seconds, 60)
h, m = divmod(m, 60)
return Document(
page_content=" ".join(
map(lambda chunk_piece: chunk_piece["text"].strip(" "), chunk_pieces)
),
metadata={
**self._metadata,
"start_seconds": chunk_start_seconds,
"start_timestamp": f"{h:02d}:{m:02d}:{s:02d}",
"source":
# replace video ID with URL to start time
f"https://www.youtube.com/watch?v={self.video_id}"
f"&t={chunk_start_seconds}s",
},
)
def _get_transcript_chunks(
self, transcript_pieces: List[Dict]
) -> Generator[Document, None, None]:
chunk_pieces: List[Dict[str, Any]] = []
chunk_start_seconds = 0
chunk_time_limit = self.chunk_size_seconds
for transcript_piece in transcript_pieces:
piece_end = transcript_piece["start"] + transcript_piece["duration"]
if piece_end > chunk_time_limit:
if chunk_pieces:
yield self._make_chunk_document(chunk_pieces, chunk_start_seconds)
chunk_pieces = []
chunk_start_seconds = chunk_time_limit
chunk_time_limit += self.chunk_size_seconds
chunk_pieces.append(transcript_piece)
if len(chunk_pieces) > 0:
yield self._make_chunk_document(chunk_pieces, chunk_start_seconds)
def load(self) -> List[Document]:
"""Load YouTube transcripts into `Document` objects."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
'Could not import "youtube_transcript_api" Python package. '
"Please install it with `pip install youtube-transcript-api`."
)
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
self._metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
transcript = transcript_list.find_transcript(["en"])
if self.translation is not None:
transcript = transcript.translate(self.translation)
transcript_pieces: List[Dict[str, Any]] = transcript.fetch()
if self.transcript_format == TranscriptFormat.TEXT:
transcript = " ".join(
map(
lambda transcript_piece: transcript_piece["text"].strip(" "),
transcript_pieces,
)
)
return [Document(page_content=transcript, metadata=self._metadata)]
elif self.transcript_format == TranscriptFormat.LINES:
return list(
map(
lambda transcript_piece: Document(
page_content=transcript_piece["text"].strip(" "),
metadata=dict(
filter(
lambda item: item[0] != "text", transcript_piece.items()
)
),
),
transcript_pieces,
)
)
elif self.transcript_format == TranscriptFormat.CHUNKS:
return list(self._get_transcript_chunks(transcript_pieces))
else:
raise ValueError("Unknown transcript format.")
def _get_video_info(self) -> Dict:
"""Get important video information.
Components include:
- title
- description
- thumbnail URL,
- publish_date
- channel author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
'Could not import "pytube" Python package. '
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title or "Unknown",
"description": yt.description or "Unknown",
"view_count": yt.views or 0,
"thumbnail_url": yt.thumbnail_url or "Unknown",
"publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S")
if yt.publish_date
else "Unknown",
"length": yt.length or 0,
"author": yt.author or "Unknown",
}
return video_info
@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Load all Videos from a `YouTube` Channel.
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain_community.document_loaders import GoogleApiClient
from langchain_community.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
continue_on_failure: bool = False
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
@model_validator(mode="before")
@classmethod
def validate_channel_or_videoIds_is_set(cls, values: Dict[str, Any]) -> Any:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
for available_transcript in transcript_list:
transcript = available_transcript.translate(self.captions_language)
continue
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_uploads_playlist_id(self, channel_id: str) -> str:
request = self.youtube_client.channels().list(
part="contentDetails",
id=channel_id,
)
response = request.execute()
return response["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"]
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
)
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"youtube-transcript-api` "
"to use the youtube loader"
)
channel_id = self._get_channel_id(channel)
uploads_playlist_id = self._get_uploads_playlist_id(channel_id)
request = self.youtube_client.playlistItems().list(
part="id,snippet",
playlistId=uploads_playlist_id,
maxResults=50,
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
video_id = item["snippet"]["resourceId"]["videoId"]
meta_data = {"videoId": video_id}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
try:
page_content = self._get_transcripe_for_video_id(video_id)
video_ids.append(
Document(
page_content=page_content,
metadata=meta_data,
)
)
except (TranscriptsDisabled, NoTranscriptFound, ParseError) as e:
if self.continue_on_failure:
logger.error(
"Error fetching transscript "
+ f" {item['id']['videoId']}, exception: {e}"
)
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain_community.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""Load from Docusaurus Documentation.
It leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You can also define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusaurus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/datadog_logs.py | from datetime import datetime, timedelta
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class DatadogLogsLoader(BaseLoader):
"""Load `Datadog` logs.
Logs are written into the `page_content` and into the `metadata`.
"""
def __init__(
self,
query: str,
api_key: str,
app_key: str,
from_time: Optional[int] = None,
to_time: Optional[int] = None,
limit: int = 100,
) -> None:
"""Initialize Datadog document loader.
Requirements:
- Must have datadog_api_client installed. Install with `pip install datadog_api_client`.
Args:
query: The query to run in Datadog.
api_key: The Datadog API key.
app_key: The Datadog APP key.
from_time: Optional. The start of the time range to query.
Supports date math and regular timestamps (milliseconds) like '1688732708951'
Defaults to 20 minutes ago.
to_time: Optional. The end of the time range to query.
Supports date math and regular timestamps (milliseconds) like '1688732708951'
Defaults to now.
limit: The maximum number of logs to return.
Defaults to 100.
""" # noqa: E501
try:
from datadog_api_client import Configuration
except ImportError as ex:
raise ImportError(
"Could not import datadog_api_client python package. "
"Please install it with `pip install datadog_api_client`."
) from ex
self.query = query
configuration = Configuration()
configuration.api_key["apiKeyAuth"] = api_key
configuration.api_key["appKeyAuth"] = app_key
self.configuration = configuration
self.from_time = from_time
self.to_time = to_time
self.limit = limit
def parse_log(self, log: dict) -> Document:
"""
Create Document objects from Datadog log items.
"""
attributes = log.get("attributes", {})
metadata = {
"id": log.get("id", ""),
"status": attributes.get("status"),
"service": attributes.get("service", ""),
"tags": attributes.get("tags", []),
"timestamp": attributes.get("timestamp", ""),
}
message = attributes.get("message", "")
inside_attributes = attributes.get("attributes", {})
content_dict = {**inside_attributes, "message": message}
content = ", ".join(f"{k}: {v}" for k, v in content_dict.items())
return Document(page_content=content, metadata=metadata)
def load(self) -> List[Document]:
"""
Get logs from Datadog.
Returns:
A list of Document objects.
- page_content
- metadata
- id
- service
- status
- tags
- timestamp
"""
try:
from datadog_api_client import ApiClient
from datadog_api_client.v2.api.logs_api import LogsApi
from datadog_api_client.v2.model.logs_list_request import LogsListRequest
from datadog_api_client.v2.model.logs_list_request_page import (
LogsListRequestPage,
)
from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter
from datadog_api_client.v2.model.logs_sort import LogsSort
except ImportError as ex:
raise ImportError(
"Could not import datadog_api_client python package. "
"Please install it with `pip install datadog_api_client`."
) from ex
now = datetime.now()
twenty_minutes_before = now - timedelta(minutes=20)
now_timestamp = int(now.timestamp() * 1000)
twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000)
_from = (
self.from_time
if self.from_time is not None
else twenty_minutes_before_timestamp
)
body = LogsListRequest(
filter=LogsQueryFilter(
query=self.query,
_from=_from,
to=f"{self.to_time if self.to_time is not None else now_timestamp}",
),
sort=LogsSort.TIMESTAMP_ASCENDING,
page=LogsListRequestPage(
limit=self.limit,
),
)
with ApiClient(configuration=self.configuration) as api_client:
api_instance = LogsApi(api_client)
response = api_instance.list_logs(body=body).to_dict()
docs: List[Document] = []
for row in response["data"]:
docs.append(self.parse_log(row))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/glue_catalog.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from boto3.session import Session
class GlueCatalogLoader(BaseLoader):
"""Load table schemas from AWS Glue.
This loader fetches the schema of each table within a specified AWS Glue database.
The schema details include column names and their data types, similar to pandas
dtype representation.
AWS credentials are automatically loaded using boto3, following the standard AWS
method:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific AWS profile is required, it can be specified and will be used to
establish the session.
"""
def __init__(
self,
database: str,
*,
session: Optional[Session] = None,
profile_name: Optional[str] = None,
table_filter: Optional[List[str]] = None,
):
"""Initialize Glue database loader.
Args:
database: The name of the Glue database from which to load table schemas.
session: Optional. A boto3 Session object. If not provided, a new
session will be created.
profile_name: Optional. The name of the AWS profile to use for credentials.
table_filter: Optional. List of table names to fetch schemas for,
fetching all if None.
"""
self.database = database
self.profile_name = profile_name
self.table_filter = table_filter
if session:
self.glue_client = session.client("glue")
else:
self.glue_client = self._initialize_glue_client()
def _initialize_glue_client(self) -> Any:
"""Initialize the AWS Glue client.
Returns:
The initialized AWS Glue client.
Raises:
ValueError: If there is an issue with AWS session/client initialization.
"""
try:
import boto3
except ImportError as e:
raise ImportError(
"boto3 is required to use the GlueCatalogLoader. "
"Please install it with `pip install boto3`."
) from e
try:
session = (
boto3.Session(profile_name=self.profile_name)
if self.profile_name
else boto3.Session()
)
return session.client("glue")
except Exception as e:
raise ValueError("Issue with AWS session/client initialization.") from e
def _fetch_tables(self) -> List[str]:
"""Retrieve all table names in the specified Glue database.
Returns:
A list of table names.
"""
paginator = self.glue_client.get_paginator("get_tables")
table_names = []
for page in paginator.paginate(DatabaseName=self.database):
for table in page["TableList"]:
if self.table_filter is None or table["Name"] in self.table_filter:
table_names.append(table["Name"])
return table_names
def _fetch_table_schema(self, table_name: str) -> Dict[str, str]:
"""Fetch the schema of a specified table.
Args:
table_name: The name of the table for which to fetch the schema.
Returns:
A dictionary mapping column names to their data types.
"""
response = self.glue_client.get_table(
DatabaseName=self.database, Name=table_name
)
columns = response["Table"]["StorageDescriptor"]["Columns"]
return {col["Name"]: col["Type"] for col in columns}
def lazy_load(self) -> Iterator[Document]:
"""Lazily load table schemas as Document objects.
Yields:
Document objects, each representing the schema of a table.
"""
table_names = self._fetch_tables()
for table_name in table_names:
schema = self._fetch_table_schema(table_name)
page_content = (
f"Database: {self.database}\nTable: {table_name}\nSchema:\n"
+ "\n".join(f"{col}: {dtype}" for col, dtype in schema.items())
)
doc = Document(
page_content=page_content, metadata={"table_name": table_name}
)
yield doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/markdown.py | from typing import List
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Load `Markdown` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Setup:
Install ``langchain-community``.
.. code-block:: bash
pip install -U langchain-community
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import UnstructuredMarkdownLoader
loader = UnstructuredMarkdownLoader(
"./example_data/example.md",
mode="elements",
strategy="fast",
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
References
----------
https://unstructured-io.github.io/unstructured/core/partition.html#partition-md
""" # noqa: E501
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.partition.md import partition_md
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
if unstructured_version < (0, 4, 16):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning markdown files is only supported in unstructured>=0.4.16."
)
return partition_md(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/azure_ai_data.py | from typing import Iterator, Optional
from langchain_community.docstore.document import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileIOLoader
class AzureAIDataLoader(BaseLoader):
"""Load from Azure AI Data."""
def __init__(self, url: str, glob: Optional[str] = None):
"""Initialize with URL to a data asset or storage location
."""
self.url = url
"""URL to the data asset or storage location."""
self.glob_pattern = glob
"""Optional glob pattern to select files. Defaults to None."""
def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for Documents."""
try:
from azureml.fsspec import AzureMachineLearningFileSystem
except ImportError as exc:
raise ImportError(
"Could not import azureml-fspec package."
"Please install it with `pip install azureml-fsspec`."
) from exc
fs = AzureMachineLearningFileSystem(self.url)
if self.glob_pattern:
remote_paths_list = fs.glob(self.glob_pattern)
else:
remote_paths_list = fs.ls()
for remote_path in remote_paths_list:
with fs.open(remote_path) as f:
loader = UnstructuredFileIOLoader(file=f)
yield from loader.load()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/doc_intelligence.py | from typing import Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
class AzureAIDocumentIntelligenceLoader(BaseLoader):
"""Load a PDF with Azure Document Intelligence."""
def __init__(
self,
api_endpoint: str,
api_key: str,
file_path: Optional[str] = None,
url_path: Optional[str] = None,
bytes_source: Optional[bytes] = None,
api_version: Optional[str] = None,
api_model: str = "prebuilt-layout",
mode: str = "markdown",
*,
analysis_features: Optional[List[str]] = None,
) -> None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a AzureAIDocumentIntelligenceParser object to be
used for parsing files using the Azure Document Intelligence API. The load
method generates Documents whose content representations are determined by the
mode parameter.
Parameters:
-----------
api_endpoint: str
The API endpoint to use for DocumentIntelligenceClient construction.
api_key: str
The API key to use for DocumentIntelligenceClient construction.
file_path : Optional[str]
The path to the file that needs to be loaded.
Either file_path, url_path or bytes_source must be specified.
url_path : Optional[str]
The URL to the file that needs to be loaded.
Either file_path, url_path or bytes_source must be specified.
bytes_source : Optional[bytes]
The bytes array of the file that needs to be loaded.
Either file_path, url_path or bytes_source must be specified.
api_version: Optional[str]
The API version for DocumentIntelligenceClient. Setting None to use
the default value from `azure-ai-documentintelligence` package.
api_model: str
Unique document model name. Default value is "prebuilt-layout".
Note that overriding this default value may result in unsupported
behavior.
mode: Optional[str]
The type of content representation of the generated Documents.
Use either "single", "page", or "markdown". Default value is "markdown".
analysis_features: Optional[List[str]]
List of optional analysis features, each feature should be passed
as a str that conforms to the enum `DocumentAnalysisFeature` in
`azure-ai-documentintelligence` package. Default value is None.
Examples:
---------
>>> obj = AzureAIDocumentIntelligenceLoader(
... file_path="path/to/file",
... api_endpoint="https://endpoint.azure.com",
... api_key="APIKEY",
... api_version="2023-10-31-preview",
... api_model="prebuilt-layout",
... mode="markdown"
... )
"""
assert (
file_path is not None or url_path is not None or bytes_source is not None
), "file_path, url_path or bytes_source must be provided"
self.file_path = file_path
self.url_path = url_path
self.bytes_source = bytes_source
self.parser = AzureAIDocumentIntelligenceParser( # type: ignore[misc]
api_endpoint=api_endpoint,
api_key=api_key,
api_version=api_version,
api_model=api_model,
mode=mode,
analysis_features=analysis_features,
)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load the document as pages."""
if self.file_path is not None:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from self.parser.parse(blob)
elif self.url_path is not None:
yield from self.parser.parse_url(self.url_path) # type: ignore[arg-type]
elif self.bytes_source is not None:
yield from self.parser.parse_bytes(self.bytes_source)
else:
raise ValueError("No data source provided.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/gitbook.py | from typing import Any, Iterator, List, Optional
from urllib.parse import urljoin, urlparse
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class GitbookLoader(WebBaseLoader):
"""Load `GitBook` data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
continue_on_failure: bool = False,
show_progress: bool = True,
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page`.
content_selector: The CSS selector for the content to load.
Defaults to "main".
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
show_progress: whether to show a progress bar while loading. Default: True
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
web_page = f"{self.base_url}/sitemap.xml"
super().__init__(
web_paths=(web_page,),
continue_on_failure=continue_on_failure,
show_progress=show_progress,
)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
def lazy_load(self) -> Iterator[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
for soup_info, url in zip(soup_infos, urls):
doc = self._get_document(soup_info, url)
if doc:
yield doc
else:
soup_info = self.scrape()
doc = self._get_document(soup_info, self.web_path)
if doc:
yield doc
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/notiondb.py | import logging
from typing import Any, Dict, List, Optional
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
# Configure logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class NotionDBLoader(BaseLoader):
"""Load from `Notion DB`.
Reads content from pages within a Notion Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
request_timeout_sec (int): Timeout for Notion requests in seconds.
Defaults to 10.
filter_object (Dict[str, Any]): Filter object used to limit returned
entries based on specified criteria.
E.g.: {
"timestamp": "last_edited_time",
"last_edited_time": {
"on_or_after": "2024-02-07"
}
} -> will only return entries that were last edited
on or after 2024-02-07
Notion docs: https://developers.notion.com/reference/post-database-query-filter
Defaults to None, which will return ALL entries.
"""
def __init__(
self,
integration_token: str,
database_id: str,
request_timeout_sec: Optional[int] = 10,
*,
filter_object: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.request_timeout_sec = request_timeout_sec
self.filter_object = filter_object or {}
def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
page_summaries = self._retrieve_page_summaries()
return list(self.load_page(page_summary) for page_summary in page_summaries)
def _retrieve_page_summaries(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[Dict[str, Any]]:
"""
Get all the pages from a Notion database
OR filter based on specified criteria.
"""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
filter_object=self.filter_object,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
return pages
def load_page(self, page_summary: Dict[str, Any]) -> Document:
"""Read a page.
Args:
page_summary: Page summary from Notion API.
"""
page_id = page_summary["id"]
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
else []
)
elif prop_type == "url":
value = prop_data["url"]
elif prop_type == "unique_id":
value = (
f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}'
if prop_data["unique_id"]
else None
)
elif prop_type == "status":
value = prop_data["status"]["name"] if prop_data["status"] else None
elif prop_type == "people":
value = []
if prop_data["people"]:
for item in prop_data["people"]:
name = item.get("name")
if not name:
logger.warning(
"Missing 'name' in 'people' property "
f"for page {page_id}"
)
value.append(name)
elif prop_type == "date":
value = prop_data["date"] if prop_data["date"] else None
elif prop_type == "last_edited_time":
value = (
prop_data["last_edited_time"]
if prop_data["last_edited_time"]
else None
)
elif prop_type == "created_time":
value = prop_data["created_time"] if prop_data["created_time"] else None
elif prop_type == "checkbox":
value = prop_data["checkbox"]
elif prop_type == "email":
value = prop_data["email"]
elif prop_type == "number":
value = prop_data["number"]
elif prop_type == "select":
value = prop_data["select"]["name"] if prop_data["select"] else None
else:
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self,
url: str,
method: str = "GET",
query_dict: Dict[str, Any] = {},
*,
filter_object: Optional[Dict[str, Any]] = None,
) -> Any:
json_payload = query_dict.copy()
if filter_object:
json_payload["filter"] = filter_object
res = requests.request(
method,
url,
headers=self.headers,
json=json_payload,
timeout=self.request_timeout_sec,
)
res.raise_for_status()
return res.json()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/html_bs.py | import importlib.util
import logging
from pathlib import Path
from typing import Dict, Iterator, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class BSHTMLLoader(BaseLoader):
"""
__ModuleName__ document loader integration
Setup:
Install ``langchain-community`` and ``bs4``.
.. code-block:: bash
pip install -U langchain-community bs4
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import BSHTMLLoader
loader = BSHTMLLoader(
file_path="./example_data/fake-content.html",
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Test Title
My First Heading
My first paragraph.
{'source': './example_data/fake-content.html', 'title': 'Test Title'}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Test Title
My First Heading
My first paragraph.
{'source': './example_data/fake-content.html', 'title': 'Test Title'}
""" # noqa: E501
def __init__(
self,
file_path: Union[str, Path],
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""initialize with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object.
Args:
file_path: The path to the file to load.
open_encoding: The encoding to use when opening the file.
bs_kwargs: Any kwargs to pass to the BeautifulSoup object.
get_text_separator: The separator to use when calling get_text on the soup.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
if not importlib.util.find_spec("lxml"):
raise ImportError(
"By default BSHTMLLoader uses the 'lxml' package. Please either "
"install it with `pip install -U lxml` or pass in init arg "
"`bs_kwargs={'features': '...'}` to overwrite the default "
"BeautifulSoup kwargs."
)
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
def lazy_load(self) -> Iterator[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, "r", encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": str(self.file_path),
"title": title,
}
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/oracleai.py | # Authors:
# Harichandan Roy (hroy)
# David Jiang (ddjiang)
#
# -----------------------------------------------------------------------------
# oracleai.py
# -----------------------------------------------------------------------------
from __future__ import annotations
import hashlib
import json
import logging
import os
import random
import struct
import time
import traceback
from html.parser import HTMLParser
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_text_splitters import TextSplitter
if TYPE_CHECKING:
from oracledb import Connection
logger = logging.getLogger(__name__)
"""ParseOracleDocMetadata class"""
class ParseOracleDocMetadata(HTMLParser):
"""Parse Oracle doc metadata..."""
def __init__(self) -> None:
super().__init__()
self.reset()
self.match = False
self.metadata: Dict[str, Any] = {}
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
if tag == "meta":
entry: Optional[str] = ""
for name, value in attrs:
if name == "name":
entry = value
if name == "content":
if entry:
self.metadata[entry] = value
elif tag == "title":
self.match = True
def handle_data(self, data: str) -> None:
if self.match:
self.metadata["title"] = data
self.match = False
def get_metadata(self) -> Dict[str, Any]:
return self.metadata
"""OracleDocReader class"""
class OracleDocReader:
"""Read a file"""
@staticmethod
def generate_object_id(input_string: Union[str, None] = None) -> str:
out_length = 32 # output length
hash_len = 8 # hash value length
if input_string is None:
input_string = "".join(
random.choices(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
k=16,
)
)
# timestamp
timestamp = int(time.time())
timestamp_bin = struct.pack(">I", timestamp) # 4 bytes
# hash_value
hashval_bin = hashlib.sha256(input_string.encode()).digest()
hashval_bin = hashval_bin[:hash_len] # 8 bytes
# counter
counter_bin = struct.pack(">I", random.getrandbits(32)) # 4 bytes
# binary object id
object_id = timestamp_bin + hashval_bin + counter_bin # 16 bytes
object_id_hex = object_id.hex() # 32 bytes
object_id_hex = object_id_hex.zfill(
out_length
) # fill with zeros if less than 32 bytes
object_id_hex = object_id_hex[:out_length]
return object_id_hex
@staticmethod
def read_file(
conn: Connection, file_path: str, params: dict
) -> Union[Document, None]:
"""Read a file using OracleReader
Args:
conn: Oracle Connection,
file_path: Oracle Directory,
params: ONNX file name.
Returns:
Plain text and metadata as Langchain Document.
"""
metadata: Dict[str, Any] = {}
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
try:
oracledb.defaults.fetch_lobs = False
cursor = conn.cursor()
with open(file_path, "rb") as f:
data = f.read()
if data is None:
return Document(page_content="", metadata=metadata)
mdata = cursor.var(oracledb.DB_TYPE_CLOB)
text = cursor.var(oracledb.DB_TYPE_CLOB)
cursor.execute(
"""
declare
input blob;
begin
input := :blob;
:mdata := dbms_vector_chain.utl_to_text(input, json(:pref));
:text := dbms_vector_chain.utl_to_text(input);
end;""",
blob=data,
pref=json.dumps(params),
mdata=mdata,
text=text,
)
cursor.close()
if mdata is None:
metadata = {}
else:
doc_data = str(mdata.getvalue())
if doc_data.startswith("<!DOCTYPE html") or doc_data.startswith(
"<HTML>"
):
p = ParseOracleDocMetadata()
p.feed(doc_data)
metadata = p.get_metadata()
doc_id = OracleDocReader.generate_object_id(conn.username + "$" + file_path)
metadata["_oid"] = doc_id
metadata["_file"] = file_path
if text is None:
return Document(page_content="", metadata=metadata)
else:
return Document(page_content=str(text.getvalue()), metadata=metadata)
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
logger.info(f"Skip processing {file_path}")
cursor.close()
return None
"""OracleDocLoader class"""
class OracleDocLoader(BaseLoader):
"""Read documents using OracleDocLoader
Args:
conn: Oracle Connection,
params: Loader parameters.
"""
def __init__(self, conn: Connection, params: Dict[str, Any], **kwargs: Any):
self.conn = conn
self.params = json.loads(json.dumps(params))
super().__init__(**kwargs)
def load(self) -> List[Document]:
"""Load data into LangChain Document objects..."""
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
ncols = 0
results: List[Document] = []
metadata: Dict[str, Any] = {}
m_params = {"plaintext": "false"}
try:
# extract the parameters
if self.params is not None:
self.file = self.params.get("file")
self.dir = self.params.get("dir")
self.owner = self.params.get("owner")
self.tablename = self.params.get("tablename")
self.colname = self.params.get("colname")
else:
raise Exception("Missing loader parameters")
oracledb.defaults.fetch_lobs = False
if self.file:
doc = OracleDocReader.read_file(self.conn, self.file, m_params)
if doc is None:
return results
results.append(doc)
if self.dir:
skip_count = 0
for file_name in os.listdir(self.dir):
file_path = os.path.join(self.dir, file_name)
if os.path.isfile(file_path):
doc = OracleDocReader.read_file(self.conn, file_path, m_params)
if doc is None:
skip_count = skip_count + 1
logger.info(f"Total skipped: {skip_count}\n")
else:
results.append(doc)
if self.tablename:
try:
if self.owner is None or self.colname is None:
raise Exception("Missing owner or column name or both.")
cursor = self.conn.cursor()
self.mdata_cols = self.params.get("mdata_cols")
if self.mdata_cols is not None:
if len(self.mdata_cols) > 3:
raise Exception(
"Exceeds the max number of columns "
+ "you can request for metadata."
)
# execute a query to get column data types
sql = (
"select column_name, data_type from all_tab_columns "
+ "where owner = :ownername and "
+ "table_name = :tablename"
)
cursor.execute(
sql,
ownername=self.owner.upper(),
tablename=self.tablename.upper(),
)
# cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
if row[0] in self.mdata_cols:
if row[1] not in [
"NUMBER",
"BINARY_DOUBLE",
"BINARY_FLOAT",
"LONG",
"DATE",
"TIMESTAMP",
"VARCHAR2",
]:
raise Exception(
"The datatype for the column requested "
+ "for metadata is not supported."
)
self.mdata_cols_sql = ", rowid"
if self.mdata_cols is not None:
for col in self.mdata_cols:
self.mdata_cols_sql = self.mdata_cols_sql + ", " + col
# [TODO] use bind variables
sql = (
"select dbms_vector_chain.utl_to_text(t."
+ self.colname
+ ", json('"
+ json.dumps(m_params)
+ "')) mdata, dbms_vector_chain.utl_to_text(t."
+ self.colname
+ ") text"
+ self.mdata_cols_sql
+ " from "
+ self.owner
+ "."
+ self.tablename
+ " t"
)
cursor.execute(sql)
for row in cursor:
metadata = {}
if row is None:
doc_id = OracleDocReader.generate_object_id(
self.conn.username
+ "$"
+ self.owner
+ "$"
+ self.tablename
+ "$"
+ self.colname
)
metadata["_oid"] = doc_id
results.append(Document(page_content="", metadata=metadata))
else:
if row[0] is not None:
data = str(row[0])
if data.startswith("<!DOCTYPE html") or data.startswith(
"<HTML>"
):
p = ParseOracleDocMetadata()
p.feed(data)
metadata = p.get_metadata()
doc_id = OracleDocReader.generate_object_id(
self.conn.username
+ "$"
+ self.owner
+ "$"
+ self.tablename
+ "$"
+ self.colname
+ "$"
+ str(row[2])
)
metadata["_oid"] = doc_id
metadata["_rowid"] = row[2]
# process projected metadata cols
if self.mdata_cols is not None:
ncols = len(self.mdata_cols)
for i in range(0, ncols):
metadata[self.mdata_cols[i]] = row[i + 2]
if row[1] is None:
results.append(
Document(page_content="", metadata=metadata)
)
else:
results.append(
Document(
page_content=str(row[1]), metadata=metadata
)
)
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
cursor.close()
raise
return results
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
raise
class OracleTextSplitter(TextSplitter):
"""Splitting text using Oracle chunker."""
def __init__(self, conn: Connection, params: Dict[str, Any], **kwargs: Any) -> None:
"""Initialize."""
self.conn = conn
self.params = params
super().__init__(**kwargs)
try:
import json
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
self._oracledb = oracledb
self._json = json
except ImportError:
raise ImportError(
"oracledb or json or both are not installed. "
+ "Please install them. "
+ "Recommendations: `pip install oracledb`. "
)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
splits = []
try:
# returns strings or bytes instead of a locator
self._oracledb.defaults.fetch_lobs = False
cursor = self.conn.cursor()
cursor.setinputsizes(content=oracledb.CLOB)
cursor.execute(
"select t.column_value from "
+ "dbms_vector_chain.utl_to_chunks(:content, json(:params)) t",
content=text,
params=self._json.dumps(self.params),
)
while True:
row = cursor.fetchone()
if row is None:
break
d = self._json.loads(row[0])
splits.append(d["chunk_data"])
return splits
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
raise
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/mediawikidump.py | import logging
from pathlib import Path
from typing import Iterator, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MWDumpLoader(BaseLoader):
"""Load `MediaWiki` dump from an `XML` file.
Example:
.. code-block:: python
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import MWDumpLoader
loader = MWDumpLoader(
file_path="myWiki.xml",
encoding="utf8"
)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=0
)
texts = text_splitter.split_documents(docs)
:param file_path: XML local file path
:type file_path: str
:param encoding: Charset encoding, defaults to "utf8"
:type encoding: str, optional
:param namespaces: The namespace of pages you want to parse.
See https://www.mediawiki.org/wiki/Help:Namespaces#Localisation
for a list of all common namespaces
:type namespaces: List[int],optional
:param skip_redirects: TR=rue to skip pages that redirect to other pages,
False to keep them. False by default
:type skip_redirects: bool, optional
:param stop_on_error: False to skip over pages that cause parsing errors,
True to stop. True by default
:type stop_on_error: bool, optional
"""
def __init__(
self,
file_path: Union[str, Path],
encoding: Optional[str] = "utf8",
namespaces: Optional[Sequence[int]] = None,
skip_redirects: Optional[bool] = False,
stop_on_error: Optional[bool] = True,
):
self.file_path = file_path if isinstance(file_path, str) else str(file_path)
self.encoding = encoding
# Namespaces range from -2 to 15, inclusive.
self.namespaces = namespaces
self.skip_redirects = skip_redirects
self.stop_on_error = stop_on_error
def _load_dump_file(self): # type: ignore[no-untyped-def]
try:
import mwxml
except ImportError as e:
raise ImportError(
"Unable to import 'mwxml'. Please install with" " `pip install mwxml`."
) from e
return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
def _load_single_page_from_dump(self, page) -> Document: # type: ignore[no-untyped-def, return]
"""Parse a single page."""
try:
import mwparserfromhell
except ImportError as e:
raise ImportError(
"Unable to import 'mwparserfromhell'. Please install with"
" `pip install mwparserfromhell`."
) from e
for revision in page:
code = mwparserfromhell.parse(revision.text)
text = code.strip_code(
normalize=True, collapse=True, keep_template_params=False
)
metadata = {"source": page.title}
return Document(page_content=text, metadata=metadata)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load from a file path."""
dump = self._load_dump_file()
for page in dump.pages:
if self.skip_redirects and page.redirect:
continue
if self.namespaces and page.namespace not in self.namespaces:
continue
try:
yield self._load_single_page_from_dump(page)
except Exception as e:
logger.error("Parsing error: {}".format(e))
if self.stop_on_error:
raise e
else:
continue
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/telegram.py | from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import pandas as pd
from telethon.hints import EntityLike
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
date = row["date"]
sender = row["from"]
text = row["text"]
return f"{sender} on {date}: {text}\n\n"
class TelegramChatFileLoader(BaseLoader):
"""Load from `Telegram chat` dump."""
def __init__(self, path: Union[str, Path]):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message["type"] == "message" and isinstance(message["text"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
"""Convert a string or list of strings to a list of Documents with metadata."""
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20,
)
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
class TelegramChatApiLoader(BaseLoader):
"""Load `Telegram` chat json directory dump."""
def __init__(
self,
chat_entity: Optional[EntityLike] = None,
api_id: Optional[int] = None,
api_hash: Optional[str] = None,
username: Optional[str] = None,
file_path: str = "telegram_data.json",
):
"""Initialize with API parameters.
Args:
chat_entity: The chat entity to fetch data from.
api_id: The API ID.
api_hash: The API hash.
username: The username.
file_path: The file path to save the data to. Defaults to
"telegram_data.json".
"""
self.chat_entity = chat_entity
self.api_id = api_id
self.api_hash = api_hash
self.username = username
self.file_path = file_path
async def fetch_data_from_telegram(self) -> None:
"""Fetch data from Telegram API and save it as a JSON file."""
from telethon.sync import TelegramClient
data = []
async with TelegramClient(self.username, self.api_id, self.api_hash) as client:
async for message in client.iter_messages(self.chat_entity):
is_reply = message.reply_to is not None
reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None
data.append(
{
"sender_id": message.sender_id,
"text": message.text,
"date": message.date.isoformat(),
"message.id": message.id,
"is_reply": is_reply,
"reply_to_id": reply_to_id,
}
)
with open(self.file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def _get_message_threads(self, data: pd.DataFrame) -> dict:
"""Create a dictionary of message threads from the given data.
Args:
data (pd.DataFrame): A DataFrame containing the conversation \
data with columns:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
dict: A dictionary where the key is the parent message ID and \
the value is a list of message IDs in ascending order.
"""
def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
# Find direct replies to the parent message ID
direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][
"message.id"
].tolist()
# Recursively find replies to the direct replies
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies
# Filter out parent messages
parent_messages = data[~data["is_reply"]]
# Filter out reply messages and drop rows with NaN in 'reply_to_id'
reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"])
# Convert 'reply_to_id' to integer
reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int)
# Create a dictionary of message threads with parent message IDs as keys and \
# lists of reply message IDs as values
message_threads = {
parent_id: [parent_id] + find_replies(parent_id, reply_messages)
for parent_id in parent_messages["message.id"]
}
return message_threads
def _combine_message_texts(
self, message_threads: Dict[int, List[int]], data: pd.DataFrame
) -> str:
"""
Combine the message texts for each parent message ID based \
on the list of message threads.
Args:
message_threads (dict): A dictionary where the key is the parent message \
ID and the value is a list of message IDs in ascending order.
data (pd.DataFrame): A DataFrame containing the conversation data:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
str: A combined string of message texts sorted by date.
"""
combined_text = ""
# Iterate through sorted parent message IDs
for parent_id, message_ids in message_threads.items():
# Get the message texts for the message IDs and sort them by date
message_texts = (
data[data["message.id"].isin(message_ids)]
.sort_values(by="date")["text"]
.tolist()
)
message_texts = [str(elem) for elem in message_texts]
# Combine the message texts
combined_text += " ".join(message_texts) + ".\n"
return combined_text.strip()
def load(self) -> List[Document]:
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
# For backwards compatibility
TelegramChatLoader = TelegramChatFileLoader
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/dedoc.py | import html
import json
import os
from abc import ABC, abstractmethod
from typing import (
Dict,
Iterator,
Optional,
Tuple,
Union,
)
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class DedocBaseLoader(BaseLoader, ABC):
"""
Base Loader that uses `dedoc` (https://dedoc.readthedocs.io).
Loader enables extracting text, tables and attached files from the given file:
* `Text` can be split by pages, `dedoc` tree nodes, textual lines
(according to the `split` parameter).
* `Attached files` (when with_attachments=True)
are split according to the `split` parameter.
For attachments, langchain Document object has an additional metadata field
`type`="attachment".
* `Tables` (when with_tables=True) are not split - each table corresponds to one
langchain Document object.
For tables, Document object has additional metadata fields `type`="table"
and `text_as_html` with table HTML representation.
"""
def __init__(
self,
file_path: str,
*,
split: str = "document",
with_tables: bool = True,
with_attachments: Union[str, bool] = False,
recursion_deep_attachments: int = 10,
pdf_with_text_layer: str = "auto_tabby",
language: str = "rus+eng",
pages: str = ":",
is_one_column_document: str = "auto",
document_orientation: str = "auto",
need_header_footer_analysis: Union[str, bool] = False,
need_binarization: Union[str, bool] = False,
need_pdf_table_analysis: Union[str, bool] = True,
delimiter: Optional[str] = None,
encoding: Optional[str] = None,
) -> None:
"""
Initialize with file path and parsing parameters.
Args:
file_path: path to the file for processing
split: type of document splitting into parts (each part is returned
separately), default value "document"
"document": document text is returned as a single langchain Document
object (don't split)
"page": split document text into pages (works for PDF, DJVU, PPTX, PPT,
ODP)
"node": split document text into tree nodes (title nodes, list item
nodes, raw text nodes)
"line": split document text into lines
with_tables: add tables to the result - each table is returned as a single
langchain Document object
Parameters used for document parsing via `dedoc`
(https://dedoc.readthedocs.io/en/latest/parameters/parameters.html):
with_attachments: enable attached files extraction
recursion_deep_attachments: recursion level for attached files
extraction, works only when with_attachments==True
pdf_with_text_layer: type of handler for parsing PDF documents,
available options
["true", "false", "tabby", "auto", "auto_tabby" (default)]
language: language of the document for PDF without a textual layer and
images, available options ["eng", "rus", "rus+eng" (default)],
the list of languages can be extended, please see
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
pages: page slice to define the reading range for parsing PDF documents
is_one_column_document: detect number of columns for PDF without
a textual layer and images, available options
["true", "false", "auto" (default)]
document_orientation: fix document orientation (90, 180, 270 degrees)
for PDF without a textual layer and images, available options
["auto" (default), "no_change"]
need_header_footer_analysis: remove headers and footers from the output
result for parsing PDF and images
need_binarization: clean pages background (binarize) for PDF without a
textual layer and images
need_pdf_table_analysis: parse tables for PDF without a textual layer
and images
delimiter: column separator for CSV, TSV files
encoding: encoding of TXT, CSV, TSV
"""
self.parsing_parameters = {
key: value
for key, value in locals().items()
if key not in {"self", "file_path", "split", "with_tables"}
}
self.valid_split_values = {"document", "page", "node", "line"}
if split not in self.valid_split_values:
raise ValueError(
f"Got {split} for `split`, but should be one of "
f"`{self.valid_split_values}`"
)
self.split = split
self.with_tables = with_tables
self.file_path = file_path
structure_type = "tree" if self.split == "node" else "linear"
self.parsing_parameters["structure_type"] = structure_type
self.parsing_parameters["need_content_analysis"] = with_attachments
def lazy_load(self) -> Iterator[Document]:
"""Lazily load documents."""
import tempfile
try:
from dedoc import DedocManager
except ImportError:
raise ImportError(
"`dedoc` package not found, please install it with `pip install dedoc`"
)
dedoc_manager = DedocManager(manager_config=self._make_config())
dedoc_manager.config["logger"].disabled = True
with tempfile.TemporaryDirectory() as tmpdir:
document_tree = dedoc_manager.parse(
file_path=self.file_path,
parameters={**self.parsing_parameters, "attachments_dir": tmpdir},
)
yield from self._split_document(
document_tree=document_tree.to_api_schema().dict(), split=self.split
)
@abstractmethod
def _make_config(self) -> dict:
"""
Make configuration for DedocManager according to the file extension and
parsing parameters.
"""
pass
def _json2txt(self, paragraph: dict) -> str:
"""Get text (recursively) of the document tree node."""
subparagraphs_text = "\n".join(
[
self._json2txt(subparagraph)
for subparagraph in paragraph["subparagraphs"]
]
)
text = (
f"{paragraph['text']}\n{subparagraphs_text}"
if subparagraphs_text
else paragraph["text"]
)
return text
def _parse_subparagraphs(
self, document_tree: dict, document_metadata: dict
) -> Iterator[Document]:
"""Parse recursively document tree obtained by `dedoc`."""
if len(document_tree["subparagraphs"]) > 0:
for subparagraph in document_tree["subparagraphs"]:
yield from self._parse_subparagraphs(
document_tree=subparagraph, document_metadata=document_metadata
)
else:
yield Document(
page_content=document_tree["text"],
metadata={**document_metadata, **document_tree["metadata"]},
)
def _split_document(
self,
document_tree: dict,
split: str,
additional_metadata: Optional[dict] = None,
) -> Iterator[Document]:
"""Split document into parts according to the `split` parameter."""
document_metadata = document_tree["metadata"]
if additional_metadata:
document_metadata = {**document_metadata, **additional_metadata}
if split == "document":
text = self._json2txt(paragraph=document_tree["content"]["structure"])
yield Document(page_content=text, metadata=document_metadata)
elif split == "page":
nodes = document_tree["content"]["structure"]["subparagraphs"]
page_id = nodes[0]["metadata"]["page_id"]
page_text = ""
for node in nodes:
if node["metadata"]["page_id"] == page_id:
page_text += self._json2txt(node)
else:
yield Document(
page_content=page_text,
metadata={**document_metadata, "page_id": page_id},
)
page_id = node["metadata"]["page_id"]
page_text = self._json2txt(node)
yield Document(
page_content=page_text,
metadata={**document_metadata, "page_id": page_id},
)
elif split == "line":
for node in document_tree["content"]["structure"]["subparagraphs"]:
line_metadata = node["metadata"]
yield Document(
page_content=self._json2txt(node),
metadata={**document_metadata, **line_metadata},
)
elif split == "node":
yield from self._parse_subparagraphs(
document_tree=document_tree["content"]["structure"],
document_metadata=document_metadata,
)
else:
raise ValueError(
f"Got {split} for `split`, but should be one of "
f"`{self.valid_split_values}`"
)
if self.with_tables:
for table in document_tree["content"]["tables"]:
table_text, table_html = self._get_table(table)
yield Document(
page_content=table_text,
metadata={
**table["metadata"],
"type": "table",
"text_as_html": table_html,
},
)
for attachment in document_tree["attachments"]:
yield from self._split_document(
document_tree=attachment,
split=self.split,
additional_metadata={"type": "attachment"},
)
def _get_table(self, table: dict) -> Tuple[str, str]:
"""Get text and HTML representation of the table."""
table_text = ""
for row in table["cells"]:
for cell in row:
table_text += " ".join(line["text"] for line in cell["lines"])
table_text += "\t"
table_text += "\n"
table_html = (
'<table border="1" style="border-collapse: collapse; width: 100%;'
'">\n<tbody>\n'
)
for row in table["cells"]:
table_html += "<tr>\n"
for cell in row:
cell_text = "\n".join(line["text"] for line in cell["lines"])
cell_text = html.escape(cell_text)
table_html += "<td"
if cell["invisible"]:
table_html += ' style="display: none" '
table_html += (
f' colspan="{cell["colspan"]}" rowspan='
f'"{cell["rowspan"]}">{cell_text}</td>\n'
)
table_html += "</tr>\n"
table_html += "</tbody>\n</table>"
return table_text, table_html
class DedocFileLoader(DedocBaseLoader):
"""
DedocFileLoader document loader integration to load files using `dedoc`.
The file loader automatically detects the file type (with the correct extension).
The list of supported file types is gives at
https://dedoc.readthedocs.io/en/latest/index.html#id1.
Please see the documentation of DedocBaseLoader to get more details.
Setup:
Install ``dedoc`` package.
.. code-block:: bash
pip install -U dedoc
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import DedocFileLoader
loader = DedocFileLoader(
file_path="example.pdf",
# split=...,
# with_tables=...,
# pdf_with_text_layer=...,
# pages=...,
# ...
)
Load:
.. code-block:: python
docs = loader.load()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
"""
def _make_config(self) -> dict:
from dedoc.utils.langchain import make_manager_config
return make_manager_config(
file_path=self.file_path,
parsing_params=self.parsing_parameters,
split=self.split,
)
class DedocAPIFileLoader(DedocBaseLoader):
"""
Load files using `dedoc` API.
The file loader automatically detects the file type (even with the wrong extension).
By default, the loader makes a call to the locally hosted `dedoc` API.
More information about `dedoc` API can be found in `dedoc` documentation:
https://dedoc.readthedocs.io/en/latest/dedoc_api_usage/api.html
Please see the documentation of DedocBaseLoader to get more details.
Setup:
You don't need to install `dedoc` library for using this loader.
Instead, the `dedoc` API needs to be run.
You may use Docker container for this purpose.
Please see `dedoc` documentation for more details:
https://dedoc.readthedocs.io/en/latest/getting_started/installation.html#install-and-run-dedoc-using-docker
.. code-block:: bash
docker pull dedocproject/dedoc
docker run -p 1231:1231
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import DedocAPIFileLoader
loader = DedocAPIFileLoader(
file_path="example.pdf",
# url=...,
# split=...,
# with_tables=...,
# pdf_with_text_layer=...,
# pages=...,
# ...
)
Load:
.. code-block:: python
docs = loader.load()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
"""
def __init__(
self,
file_path: str,
*,
url: str = "http://0.0.0.0:1231",
split: str = "document",
with_tables: bool = True,
with_attachments: Union[str, bool] = False,
recursion_deep_attachments: int = 10,
pdf_with_text_layer: str = "auto_tabby",
language: str = "rus+eng",
pages: str = ":",
is_one_column_document: str = "auto",
document_orientation: str = "auto",
need_header_footer_analysis: Union[str, bool] = False,
need_binarization: Union[str, bool] = False,
need_pdf_table_analysis: Union[str, bool] = True,
delimiter: Optional[str] = None,
encoding: Optional[str] = None,
) -> None:
"""Initialize with file path, API url and parsing parameters.
Args:
file_path: path to the file for processing
url: URL to call `dedoc` API
split: type of document splitting into parts (each part is returned
separately), default value "document"
"document": document is returned as a single langchain Document object
(don't split)
"page": split document into pages (works for PDF, DJVU, PPTX, PPT, ODP)
"node": split document into tree nodes (title nodes, list item nodes,
raw text nodes)
"line": split document into lines
with_tables: add tables to the result - each table is returned as a single
langchain Document object
Parameters used for document parsing via `dedoc`
(https://dedoc.readthedocs.io/en/latest/parameters/parameters.html):
with_attachments: enable attached files extraction
recursion_deep_attachments: recursion level for attached files
extraction, works only when with_attachments==True
pdf_with_text_layer: type of handler for parsing PDF documents,
available options
["true", "false", "tabby", "auto", "auto_tabby" (default)]
language: language of the document for PDF without a textual layer and
images, available options ["eng", "rus", "rus+eng" (default)],
the list of languages can be extended, please see
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
pages: page slice to define the reading range for parsing PDF documents
is_one_column_document: detect number of columns for PDF without
a textual layer and images, available options
["true", "false", "auto" (default)]
document_orientation: fix document orientation (90, 180, 270 degrees)
for PDF without a textual layer and images, available options
["auto" (default), "no_change"]
need_header_footer_analysis: remove headers and footers from the output
result for parsing PDF and images
need_binarization: clean pages background (binarize) for PDF without a
textual layer and images
need_pdf_table_analysis: parse tables for PDF without a textual layer
and images
delimiter: column separator for CSV, TSV files
encoding: encoding of TXT, CSV, TSV
"""
super().__init__(
file_path=file_path,
split=split,
with_tables=with_tables,
with_attachments=with_attachments,
recursion_deep_attachments=recursion_deep_attachments,
pdf_with_text_layer=pdf_with_text_layer,
language=language,
pages=pages,
is_one_column_document=is_one_column_document,
document_orientation=document_orientation,
need_header_footer_analysis=need_header_footer_analysis,
need_binarization=need_binarization,
need_pdf_table_analysis=need_pdf_table_analysis,
delimiter=delimiter,
encoding=encoding,
)
self.url = url
self.parsing_parameters["return_format"] = "json"
def lazy_load(self) -> Iterator[Document]:
"""Lazily load documents."""
doc_tree = self._send_file(
url=self.url, file_path=self.file_path, parameters=self.parsing_parameters
)
yield from self._split_document(document_tree=doc_tree, split=self.split)
def _make_config(self) -> dict:
return {}
def _send_file(
self, url: str, file_path: str, parameters: dict
) -> Dict[str, Union[list, dict, str]]:
"""Send POST-request to `dedoc` API and return the results"""
import requests
file_name = os.path.basename(file_path)
with open(file_path, "rb") as file:
files = {"file": (file_name, file)}
r = requests.post(f"{url}/upload", files=files, data=parameters)
if r.status_code != 200:
raise ValueError(f"Error during file handling: {r.content.decode()}")
result = json.loads(r.content.decode())
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/vsdx.py | import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import List, Union
from urllib.parse import urlparse
import requests
from langchain_community.docstore.document import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import VsdxParser
class VsdxLoader(BaseLoader, ABC):
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
self.parser = VsdxParser() # type: ignore[misc]
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def load(self) -> List[Document]:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
return list(self.parser.parse(blob))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/epub.py | from typing import List
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Load `EPub` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(
"example.epub", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-epub
"""
def _get_elements(self) -> List:
min_unstructured_version = "0.5.4"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning epub files is only supported in "
f"unstructured>={min_unstructured_version}."
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/firecrawl.py | import warnings
from typing import Iterator, Literal, Optional
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.utils import get_from_env
class FireCrawlLoader(BaseLoader):
"""
FireCrawlLoader document loader integration
Setup:
Install ``firecrawl-py``,``langchain_community`` and set environment variable ``FIRECRAWL_API_KEY``.
.. code-block:: bash
pip install -U firecrawl-py langchain_community
export FIRECRAWL_API_KEY="your-api-key"
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import FireCrawlLoader
loader = FireCrawlLoader(
url = "https://firecrawl.dev",
mode = "crawl"
# other params = ...
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Introducing [Smart Crawl!](https://www.firecrawl.dev/smart-crawl)
Join the waitlist to turn any web
{'ogUrl': 'https://www.firecrawl.dev/', 'title': 'Home - Firecrawl', 'robots': 'follow, index', 'ogImage': 'https://www.firecrawl.dev/og.png?123', 'ogTitle': 'Firecrawl', 'sitemap': {'lastmod': '2024-08-12T00:28:16.681Z', 'changefreq': 'weekly'}, 'keywords': 'Firecrawl,Markdown,Data,Mendable,Langchain', 'sourceURL': 'https://www.firecrawl.dev/', 'ogSiteName': 'Firecrawl', 'description': 'Firecrawl crawls and converts any website into clean markdown.', 'ogDescription': 'Turn any website into LLM-ready data.', 'pageStatusCode': 200, 'ogLocaleAlternate': []}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Introducing [Smart Crawl!](https://www.firecrawl.dev/smart-crawl)
Join the waitlist to turn any web
{'ogUrl': 'https://www.firecrawl.dev/', 'title': 'Home - Firecrawl', 'robots': 'follow, index', 'ogImage': 'https://www.firecrawl.dev/og.png?123', 'ogTitle': 'Firecrawl', 'sitemap': {'lastmod': '2024-08-12T00:28:16.681Z', 'changefreq': 'weekly'}, 'keywords': 'Firecrawl,Markdown,Data,Mendable,Langchain', 'sourceURL': 'https://www.firecrawl.dev/', 'ogSiteName': 'Firecrawl', 'description': 'Firecrawl crawls and converts any website into clean markdown.', 'ogDescription': 'Turn any website into LLM-ready data.', 'pageStatusCode': 200, 'ogLocaleAlternate': []}
""" # noqa: E501
def legacy_crawler_options_adapter(self, params: dict) -> dict:
use_legacy_options = False
legacy_keys = [
"includes",
"excludes",
"allowBackwardCrawling",
"allowExternalContentLinks",
"pageOptions",
]
for key in legacy_keys:
if params.get(key):
use_legacy_options = True
break
if use_legacy_options:
warnings.warn(
"Deprecated parameters detected. See Firecrawl v1 docs for updates.",
DeprecationWarning,
)
if "includes" in params:
if params["includes"] is True:
params["includePaths"] = params["includes"]
del params["includes"]
if "excludes" in params:
if params["excludes"] is True:
params["excludePaths"] = params["excludes"]
del params["excludes"]
if "allowBackwardCrawling" in params:
if params["allowBackwardCrawling"] is True:
params["allowBackwardLinks"] = params["allowBackwardCrawling"]
del params["allowBackwardCrawling"]
if "allowExternalContentLinks" in params:
if params["allowExternalContentLinks"] is True:
params["allowExternalLinks"] = params["allowExternalContentLinks"]
del params["allowExternalContentLinks"]
if "pageOptions" in params:
if isinstance(params["pageOptions"], dict):
params["scrapeOptions"] = self.legacy_scrape_options_adapter(
params["pageOptions"]
)
del params["pageOptions"]
return params
def legacy_scrape_options_adapter(self, params: dict) -> dict:
use_legacy_options = False
formats = ["markdown"]
if "extractorOptions" in params:
if "mode" in params["extractorOptions"]:
if (
params["extractorOptions"]["mode"] == "llm-extraction"
or params["extractorOptions"]["mode"]
== "llm-extraction-from-raw-html"
or params["extractorOptions"]["mode"]
== "llm-extraction-from-markdown"
):
use_legacy_options = True
if "extractionPrompt" in params["extractorOptions"]:
if params["extractorOptions"]["extractionPrompt"]:
params["prompt"] = params["extractorOptions"][
"extractionPrompt"
]
else:
params["prompt"] = params["extractorOptions"].get(
"extractionPrompt",
"Extract page information based on the schema.",
)
if "extractionSchema" in params["extractorOptions"]:
if params["extractorOptions"]["extractionSchema"]:
params["schema"] = params["extractorOptions"][
"extractionSchema"
]
if "userPrompt" in params["extractorOptions"]:
if params["extractorOptions"]["userPrompt"]:
params["prompt"] = params["extractorOptions"]["userPrompt"]
del params["extractorOptions"]
scrape_keys = [
"includeMarkdown",
"includeHtml",
"includeRawHtml",
"includeExtract",
"includeLinks",
"screenshot",
"fullPageScreenshot",
"onlyIncludeTags",
"removeTags",
]
for key in scrape_keys:
if params.get(key):
use_legacy_options = True
break
if use_legacy_options:
warnings.warn(
"Deprecated parameters detected. See Firecrawl v1 docs for updates.",
DeprecationWarning,
)
if "includeMarkdown" in params:
if params["includeMarkdown"] is False:
formats.remove("markdown")
del params["includeMarkdown"]
if "includeHtml" in params:
if params["includeHtml"] is True:
formats.append("html")
del params["includeHtml"]
if "includeRawHtml" in params:
if params["includeRawHtml"] is True:
formats.append("rawHtml")
del params["includeRawHtml"]
if "includeExtract" in params:
if params["includeExtract"] is True:
formats.append("extract")
del params["includeExtract"]
if "includeLinks" in params:
if params["includeLinks"] is True:
formats.append("links")
del params["includeLinks"]
if "screenshot" in params:
if params["screenshot"] is True:
formats.append("screenshot")
del params["screenshot"]
if "fullPageScreenshot" in params:
if params["fullPageScreenshot"] is True:
formats.append("screenshot@fullPage")
del params["fullPageScreenshot"]
if "onlyIncludeTags" in params:
if params["onlyIncludeTags"] is True:
params["includeTags"] = params["onlyIncludeTags"]
del params["onlyIncludeTags"]
if "removeTags" in params:
if params["removeTags"] is True:
params["excludeTags"] = params["removeTags"]
del params["removeTags"]
if "formats" not in params:
params["formats"] = formats
return params
def __init__(
self,
url: str,
*,
api_key: Optional[str] = None,
api_url: Optional[str] = None,
mode: Literal["crawl", "scrape", "map"] = "crawl",
params: Optional[dict] = None,
):
"""Initialize with API key and url.
Args:
url: The url to be crawled.
api_key: The Firecrawl API key. If not specified will be read from env var
FIRECRAWL_API_KEY. Get an API key
api_url: The Firecrawl API URL. If not specified will be read from env var
FIRECRAWL_API_URL or defaults to https://api.firecrawl.dev.
mode: The mode to run the loader in. Default is "crawl".
Options include "scrape" (single url),
"crawl" (all accessible sub pages),
"map" (returns list of links that are semantically related).
params: The parameters to pass to the Firecrawl API.
Examples include crawlerOptions.
For more details, visit: https://github.com/mendableai/firecrawl-py
"""
try:
from firecrawl import FirecrawlApp
except ImportError:
raise ImportError(
"`firecrawl` package not found, please run `pip install firecrawl-py`"
)
if mode not in ("crawl", "scrape", "search", "map"):
raise ValueError(
f"Invalid mode '{mode}'. Allowed: 'crawl', 'scrape', 'search', 'map'."
)
if not url:
raise ValueError("Url must be provided")
api_key = api_key or get_from_env("api_key", "FIRECRAWL_API_KEY")
self.firecrawl = FirecrawlApp(api_key=api_key, api_url=api_url)
self.url = url
self.mode = mode
self.params = params or {}
def lazy_load(self) -> Iterator[Document]:
if self.mode == "scrape":
firecrawl_docs = [
self.firecrawl.scrape_url(
self.url, params=self.legacy_scrape_options_adapter(self.params)
)
]
elif self.mode == "crawl":
if not self.url:
raise ValueError("URL is required for crawl mode")
crawl_response = self.firecrawl.crawl_url(
self.url, params=self.legacy_crawler_options_adapter(self.params)
)
firecrawl_docs = crawl_response.get("data", [])
elif self.mode == "map":
if not self.url:
raise ValueError("URL is required for map mode")
firecrawl_docs = self.firecrawl.map_url(self.url, params=self.params)
elif self.mode == "search":
raise ValueError(
"Search mode is not supported in this version, please downgrade."
)
else:
raise ValueError(
f"Invalid mode '{self.mode}'. Allowed: 'crawl', 'scrape', 'map'."
)
for doc in firecrawl_docs:
if self.mode == "map":
page_content = doc
metadata = {}
else:
page_content = (
doc.get("markdown") or doc.get("html") or doc.get("rawHtml", "")
)
metadata = doc.get("metadata", {})
if not page_content:
continue
yield Document(
page_content=page_content,
metadata=metadata,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/url.py | """Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class UnstructuredURLLoader(BaseLoader):
"""Load files from remote URLs using `Unstructured`.
Use the unstructured partition function to detect the MIME type
and route the file to the appropriate partitioner.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredURLLoader
loader = UnstructuredURLLoader(
urls=["<url-1>", "<url-2>"], mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
mode: str = "single",
show_progress_bar: bool = False,
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop("headers", {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
"You are using an old version of unstructured. "
"The headers parameter is ignored"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
self.show_progress_bar = show_progress_bar
def _validate_mode(self, mode: str) -> None:
_valid_modes = {"single", "elements"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
def __is_headers_available_for_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 7)
def __is_headers_available_for_non_html(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 13)
def __is_non_html_available(self) -> bool:
_unstructured_version = self.__version.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
return unstructured_version >= (0, 5, 12)
def load(self) -> List[Document]:
"""Load file."""
from unstructured.partition.auto import partition
from unstructured.partition.html import partition_html
docs: List[Document] = list()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. "
"Please install with 'pip install tqdm' or set "
"show_progress_bar=False."
) from e
urls = tqdm(self.urls)
else:
urls = self.urls
for url in urls:
try:
if self.__is_non_html_available():
if self.__is_headers_available_for_non_html():
elements = partition(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition(url=url, **self.unstructured_kwargs)
else:
if self.__is_headers_available_for_html():
elements = partition_html(
url=url, headers=self.headers, **self.unstructured_kwargs
)
else:
elements = partition_html(url=url, **self.unstructured_kwargs)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
else:
raise e
if self.mode == "single":
text = "\n\n".join([str(el) for el in elements])
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
elif self.mode == "elements":
for element in elements:
metadata = element.metadata.to_dict()
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/psychic.py | from typing import Iterator, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class PsychicLoader(BaseLoader):
"""Load from `Psychic.dev`."""
def __init__(
self, api_key: str, account_id: str, connector_id: Optional[str] = None
):
"""Initialize with API key, connector id, and account id.
Args:
api_key: The Psychic API key.
account_id: The Psychic account id.
connector_id: The Psychic connector id.
"""
try:
from psychicapi import ConnectorId, Psychic
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
self.psychic = Psychic(secret_key=api_key)
self.connector_id = ConnectorId(connector_id)
self.account_id = account_id
def lazy_load(self) -> Iterator[Document]:
psychic_docs = self.psychic.get_documents(
connector_id=self.connector_id, account_id=self.account_id
)
for doc in psychic_docs.documents:
yield Document(
page_content=doc["content"],
metadata={"title": doc["title"], "source": doc["uri"]},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/toml.py | import json
from pathlib import Path
from typing import Iterator, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class TomlLoader(BaseLoader):
"""Load `TOML` files.
It can load a single source file or several files in a single
directory.
"""
def __init__(self, source: Union[str, Path]):
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
def lazy_load(self) -> Iterator[Document]:
"""Lazily load the TOML documents from the source file or directory."""
import tomli
if self.source.is_file() and self.source.suffix == ".toml":
files = [self.source]
elif self.source.is_dir():
files = list(self.source.glob("**/*.toml"))
else:
raise ValueError("Invalid source path or file type")
for file_path in files:
with file_path.open("r", encoding="utf-8") as file:
content = file.read()
try:
data = tomli.loads(content)
doc = Document(
page_content=json.dumps(data),
metadata={"source": str(file_path)},
)
yield doc
except tomli.TOMLDecodeError as e:
print(f"Error parsing TOML file {file_path}: {e}") # noqa: T201
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/modern_treasury.py | import json
import urllib.request
from base64 import b64encode
from typing import List, Optional
from langchain_core.documents import Document
from langchain_core.utils import get_from_env, stringify_value
from langchain_community.document_loaders.base import BaseLoader
MODERN_TREASURY_ENDPOINTS = {
"payment_orders": "https://app.moderntreasury.com/api/payment_orders",
"expected_payments": "https://app.moderntreasury.com/api/expected_payments",
"returns": "https://app.moderntreasury.com/api/returns",
"incoming_payment_details": "https://app.moderntreasury.com/api/\
incoming_payment_details",
"counterparties": "https://app.moderntreasury.com/api/counterparties",
"internal_accounts": "https://app.moderntreasury.com/api/internal_accounts",
"external_accounts": "https://app.moderntreasury.com/api/external_accounts",
"transactions": "https://app.moderntreasury.com/api/transactions",
"ledgers": "https://app.moderntreasury.com/api/ledgers",
"ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts",
"ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions",
"events": "https://app.moderntreasury.com/api/events",
"invoices": "https://app.moderntreasury.com/api/invoices",
}
class ModernTreasuryLoader(BaseLoader):
"""Load from `Modern Treasury`."""
def __init__(
self,
resource: str,
organization_id: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
"""
Args:
resource: The Modern Treasury resource to load.
organization_id: The Modern Treasury organization ID. It can also be
specified via the environment variable
"MODERN_TREASURY_ORGANIZATION_ID".
api_key: The Modern Treasury API key. It can also be specified via
the environment variable "MODERN_TREASURY_API_KEY".
"""
self.resource = resource
organization_id = organization_id or get_from_env(
"organization_id", "MODERN_TREASURY_ORGANIZATION_ID"
)
api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY")
credentials = f"{organization_id}:{api_key}".encode("utf-8")
basic_auth_token = b64encode(credentials).decode("utf-8")
self.headers = {"Authorization": f"Basic {basic_auth_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_value(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
def load(self) -> List[Document]:
return self._get_resource()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/arcgis_loader.py | """Document Loader for ArcGIS FeatureLayers."""
from __future__ import annotations
import json
import re
import warnings
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import arcgis
_NOT_PROVIDED = "(Not Provided)"
class ArcGISLoader(BaseLoader):
"""Load records from an ArcGIS FeatureLayer."""
def __init__(
self,
layer: Union[str, arcgis.features.FeatureLayer],
gis: Optional[arcgis.gis.GIS] = None,
where: str = "1=1",
out_fields: Optional[Union[List[str], str]] = None,
return_geometry: bool = False,
result_record_count: Optional[int] = None,
lyr_desc: Optional[str] = None,
**kwargs: Any,
):
try:
import arcgis
except ImportError as e:
raise ImportError(
"arcgis is required to use the ArcGIS Loader. "
"Install it with pip or conda."
) from e
try:
from bs4 import BeautifulSoup
self.BEAUTIFULSOUP = BeautifulSoup
except ImportError:
warnings.warn("BeautifulSoup not found. HTML will not be parsed.")
self.BEAUTIFULSOUP = None
self.gis = gis or arcgis.gis.GIS()
if isinstance(layer, str):
self.url = layer
self.layer = arcgis.features.FeatureLayer(layer, gis=gis)
else:
self.url = layer.url
self.layer = layer
self.layer_properties = self._get_layer_properties(lyr_desc)
self.where = where
if isinstance(out_fields, str):
self.out_fields = out_fields
elif out_fields is None:
self.out_fields = "*"
else:
self.out_fields = ",".join(out_fields)
self.return_geometry = return_geometry
self.result_record_count = result_record_count
self.return_all_records = not isinstance(result_record_count, int)
query_params = dict(
where=self.where,
out_fields=self.out_fields,
return_geometry=self.return_geometry,
return_all_records=self.return_all_records,
result_record_count=self.result_record_count,
)
query_params.update(kwargs)
self.query_params = query_params
def _get_layer_properties(self, lyr_desc: Optional[str] = None) -> dict:
"""Get the layer properties from the FeatureLayer."""
import arcgis
layer_number_pattern = re.compile(r"/\d+$")
props = self.layer.properties
if lyr_desc is None:
# retrieve description from the FeatureLayer if not provided
try:
if self.BEAUTIFULSOUP:
lyr_desc = self.BEAUTIFULSOUP(props["description"]).text
else:
lyr_desc = props["description"]
lyr_desc = lyr_desc or _NOT_PROVIDED
except KeyError:
lyr_desc = _NOT_PROVIDED
try:
item_id = props["serviceItemId"]
item = self.gis.content.get(item_id) or arcgis.features.FeatureLayer(
re.sub(layer_number_pattern, "", self.url),
)
try:
raw_desc = item.description
except AttributeError:
raw_desc = item.properties.description
if self.BEAUTIFULSOUP:
item_desc = self.BEAUTIFULSOUP(raw_desc).text
else:
item_desc = raw_desc
item_desc = item_desc or _NOT_PROVIDED
except KeyError:
item_desc = _NOT_PROVIDED
return {
"layer_description": lyr_desc,
"item_description": item_desc,
"layer_properties": props,
}
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from FeatureLayer."""
query_response = self.layer.query(**self.query_params)
features = (feature.as_dict for feature in query_response)
for feature in features:
attributes = feature["attributes"]
page_content = json.dumps(attributes)
metadata = {
"accessed": f"{datetime.now(timezone.utc).isoformat()}Z",
"name": self.layer_properties["layer_properties"]["name"],
"url": self.url,
"layer_description": self.layer_properties["layer_description"],
"item_description": self.layer_properties["item_description"],
"layer_properties": self.layer_properties["layer_properties"],
}
if self.return_geometry:
try:
metadata["geometry"] = feature["geometry"]
except KeyError:
warnings.warn(
"Geometry could not be retrieved from the feature layer."
)
yield Document(page_content=page_content, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/unstructured.py | """Loader that uses unstructured to load files."""
from __future__ import annotations
import logging
import os
from abc import ABC, abstractmethod
from pathlib import Path
from typing import IO, Any, Callable, Iterator, List, Optional, Sequence, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from typing_extensions import TypeAlias
from langchain_community.document_loaders.base import BaseLoader
Element: TypeAlias = Any
logger = logging.getLogger(__file__)
def satisfies_min_unstructured_version(min_version: str) -> bool:
"""Check if the installed `Unstructured` version exceeds the minimum version
for the feature in question."""
from unstructured.__version__ import __version__ as __unstructured_version__
min_version_tuple = tuple([int(x) for x in min_version.split(".")])
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version_tuple = tuple(
[int(x) for x in _unstructured_version.split(".")]
)
return unstructured_version_tuple >= min_version_tuple
def validate_unstructured_version(min_unstructured_version: str) -> None:
"""Raise an error if the `Unstructured` version does not exceed the
specified minimum."""
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
f"unstructured>={min_unstructured_version} is required in this loader."
)
class UnstructuredBaseLoader(BaseLoader, ABC):
"""Base Loader that uses `Unstructured`."""
def __init__(
self,
mode: str = "single", # deprecated
post_processors: Optional[List[Callable[[str], str]]] = None,
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
# `single` - elements are combined into one (default)
# `elements` - maintain individual elements
# `paged` - elements are combined by page
_valid_modes = {"single", "elements", "paged"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
if not satisfies_min_unstructured_version("0.5.4"):
if "strategy" in unstructured_kwargs:
unstructured_kwargs.pop("strategy")
self._check_if_both_mode_and_chunking_strategy_are_by_page(
mode, unstructured_kwargs
)
self.mode = mode
self.unstructured_kwargs = unstructured_kwargs
self.post_processors = post_processors or []
@abstractmethod
def _get_elements(self) -> List[Element]:
"""Get elements."""
@abstractmethod
def _get_metadata(self) -> dict[str, Any]:
"""Get file_path metadata if available."""
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
"""Apply post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables passed
in using the post_processors kwarg when the loader is instantiated.
"""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
def lazy_load(self) -> Iterator[Document]:
"""Load file."""
elements = self._get_elements()
self._post_process_elements(elements)
if self.mode == "elements":
for element in elements:
metadata = self._get_metadata()
# NOTE(MthwRobinson) - the attribute check is for backward compatibility
# with unstructured<0.4.9. The metadata attributed was added in 0.4.9.
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
if hasattr(element, "category"):
metadata["category"] = element.category
if element.to_dict().get("element_id"):
metadata["element_id"] = element.to_dict().get("element_id")
yield Document(page_content=str(element), metadata=metadata)
elif self.mode == "paged":
logger.warning(
"`mode='paged'` is deprecated in favor of the 'by_page' chunking"
" strategy. Learn more about chunking here:"
" https://docs.unstructured.io/open-source/core-functionality/chunking"
)
text_dict: dict[int, str] = {}
meta_dict: dict[int, dict[str, Any]] = {}
for element in elements:
metadata = self._get_metadata()
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
page_number = metadata.get("page_number", 1)
# Check if this page_number already exists in text_dict
if page_number not in text_dict:
# If not, create new entry with initial text and metadata
text_dict[page_number] = str(element) + "\n\n"
meta_dict[page_number] = metadata
else:
# If exists, append to text and update the metadata
text_dict[page_number] += str(element) + "\n\n"
meta_dict[page_number].update(metadata)
# Convert the dict to a list of Document objects
for key in text_dict.keys():
yield Document(page_content=text_dict[key], metadata=meta_dict[key])
elif self.mode == "single":
metadata = self._get_metadata()
text = "\n\n".join([str(el) for el in elements])
yield Document(page_content=text, metadata=metadata)
else:
raise ValueError(f"mode of {self.mode} not supported.")
def _check_if_both_mode_and_chunking_strategy_are_by_page(
self, mode: str, unstructured_kwargs: dict[str, Any]
) -> None:
if (
mode == "paged"
and unstructured_kwargs.get("chunking_strategy") == "by_page"
):
raise ValueError(
"Only one of `chunking_strategy='by_page'` or `mode='paged'` may be"
" set. `chunking_strategy` is preferred."
)
@deprecated(
since="0.2.8",
removal="1.0",
alternative_import="langchain_unstructured.UnstructuredLoader",
)
class UnstructuredFileLoader(UnstructuredBaseLoader):
"""Load files using `Unstructured`.
The file loader uses the unstructured partition function and will automatically
detect the file type. You can run the loader in different modes: "single",
"elements", and "paged". The default "single" mode will return a single langchain
Document object. If you use "elements" mode, the unstructured library will split
the document into elements such as Title and NarrativeText and return those as
individual langchain Document objects. In addition to these post-processing modes
(which are specific to the LangChain Loaders), Unstructured has its own "chunking"
parameters for post-processing elements into more useful chunks for uses cases such
as Retrieval Augmented Generation (RAG). You can pass in additional unstructured
kwargs to configure different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredFileLoader
loader = UnstructuredFileLoader(
"example.pdf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://docs.unstructured.io/open-source/core-functionality/partitioning
https://docs.unstructured.io/open-source/core-functionality/chunking
"""
def __init__(
self,
file_path: Union[str, List[str], Path, List[Path]],
*,
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file_path = file_path
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List[Element]:
from unstructured.partition.auto import partition
if isinstance(self.file_path, list):
elements: List[Element] = []
for file in self.file_path:
if isinstance(file, Path):
file = str(file)
elements.extend(partition(filename=file, **self.unstructured_kwargs))
return elements
else:
if isinstance(self.file_path, Path):
self.file_path = str(self.file_path)
return partition(filename=self.file_path, **self.unstructured_kwargs)
def _get_metadata(self) -> dict[str, Any]:
return {"source": self.file_path}
def get_elements_from_api(
file_path: Union[str, List[str], Path, List[Path], None] = None,
file: Union[IO[bytes], Sequence[IO[bytes]], None] = None,
api_url: str = "https://api.unstructuredapp.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
) -> List[Element]:
"""Retrieve a list of elements from the `Unstructured API`."""
if is_list := isinstance(file_path, list):
file_path = [str(path) for path in file_path]
if isinstance(file, Sequence) or is_list:
from unstructured.partition.api import partition_multiple_via_api
_doc_elements = partition_multiple_via_api(
filenames=file_path, # type: ignore
files=file, # type: ignore
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
elements = []
for _elements in _doc_elements:
elements.extend(_elements)
return elements
else:
from unstructured.partition.api import partition_via_api
return partition_via_api(
filename=str(file_path) if file_path is not None else None,
file=file,
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
@deprecated(
since="0.2.8",
removal="1.0",
alternative_import="langchain_unstructured.UnstructuredLoader",
)
class UnstructuredAPIFileLoader(UnstructuredBaseLoader):
"""Load files using `Unstructured` API.
By default, the loader makes a call to the hosted Unstructured API. If you are
running the unstructured API locally, you can change the API rule by passing in the
url parameter when you initialize the loader. The hosted Unstructured API requires
an API key. See the links below to learn more about our API offerings and get an
API key.
You can run the loader in different modes: "single", "elements", and "paged". The
default "single" mode will return a single langchain Document object. If you use
"elements" mode, the unstructured library will split the document into elements such
as Title and NarrativeText and return those as individual langchain Document
objects. In addition to these post-processing modes (which are specific to the
LangChain Loaders), Unstructured has its own "chunking" parameters for
post-processing elements into more useful chunks for uses cases such as Retrieval
Augmented Generation (RAG). You can pass in additional unstructured kwargs to
configure different unstructured settings.
Examples
```python
from langchain_community.document_loaders import UnstructuredAPIFileLoader
loader = UnstructuredAPIFileLoader(
"example.pdf", mode="elements", strategy="fast", api_key="MY_API_KEY",
)
docs = loader.load()
References
----------
https://docs.unstructured.io/api-reference/api-services/sdk
https://docs.unstructured.io/api-reference/api-services/overview
https://docs.unstructured.io/open-source/core-functionality/partitioning
https://docs.unstructured.io/open-source/core-functionality/chunking
"""
def __init__(
self,
file_path: Union[str, List[str]],
*,
mode: str = "single",
url: str = "https://api.unstructuredapp.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
validate_unstructured_version(min_unstructured_version="0.10.15")
self.file_path = file_path
self.url = url
self.api_key = os.getenv("UNSTRUCTURED_API_KEY") or api_key
super().__init__(mode=mode, **unstructured_kwargs)
def _get_metadata(self) -> dict[str, Any]:
return {"source": self.file_path}
def _get_elements(self) -> List[Element]:
return get_elements_from_api(
file_path=self.file_path,
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
"""Apply post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables passed
in using the post_processors kwarg when the loader is instantiated.
"""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
@deprecated(
since="0.2.8",
removal="1.0",
alternative_import="langchain_unstructured.UnstructuredLoader",
)
class UnstructuredFileIOLoader(UnstructuredBaseLoader):
"""Load file-like objects opened in read mode using `Unstructured`.
The file loader uses the unstructured partition function and will automatically
detect the file type. You can run the loader in different modes: "single",
"elements", and "paged". The default "single" mode will return a single langchain
Document object. If you use "elements" mode, the unstructured library will split
the document into elements such as Title and NarrativeText and return those as
individual langchain Document objects. In addition to these post-processing modes
(which are specific to the LangChain Loaders), Unstructured has its own "chunking"
parameters for post-processing elements into more useful chunks for uses cases
such as Retrieval Augmented Generation (RAG). You can pass in additional
unstructured kwargs to configure different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredFileIOLoader
with open("example.pdf", "rb") as f:
loader = UnstructuredFileIOLoader(
f, mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://docs.unstructured.io/open-source/core-functionality/partitioning
https://docs.unstructured.io/open-source/core-functionality/chunking
"""
def __init__(
self,
file: IO[bytes],
*,
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file = file
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List[Element]:
from unstructured.partition.auto import partition
return partition(file=self.file, **self.unstructured_kwargs)
def _get_metadata(self) -> dict[str, Any]:
return {}
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
"""Apply post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables passed
in using the post_processors kwarg when the loader is instantiated.
"""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
@deprecated(
since="0.2.8",
removal="1.0",
alternative_import="langchain_unstructured.UnstructuredLoader",
)
class UnstructuredAPIFileIOLoader(UnstructuredBaseLoader):
"""Send file-like objects with `unstructured-client` sdk to the Unstructured API.
By default, the loader makes a call to the hosted Unstructured API. If you are
running the unstructured API locally, you can change the API rule by passing in the
url parameter when you initialize the loader. The hosted Unstructured API requires
an API key. See the links below to learn more about our API offerings and get an
API key.
You can run the loader in different modes: "single", "elements", and "paged". The
default "single" mode will return a single langchain Document object. If you use
"elements" mode, the unstructured library will split the document into elements
such as Title and NarrativeText and return those as individual langchain Document
objects. In addition to these post-processing modes (which are specific to the
LangChain Loaders), Unstructured has its own "chunking" parameters for
post-processing elements into more useful chunks for uses cases such as Retrieval
Augmented Generation (RAG). You can pass in additional unstructured kwargs to
configure different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredAPIFileLoader
with open("example.pdf", "rb") as f:
loader = UnstructuredAPIFileIOLoader(
f, mode="elements", strategy="fast", api_key="MY_API_KEY",
)
docs = loader.load()
References
----------
https://docs.unstructured.io/api-reference/api-services/sdk
https://docs.unstructured.io/api-reference/api-services/overview
https://docs.unstructured.io/open-source/core-functionality/partitioning
https://docs.unstructured.io/open-source/core-functionality/chunking
"""
def __init__(
self,
file: Union[IO[bytes], Sequence[IO[bytes]]],
*,
mode: str = "single",
url: str = "https://api.unstructuredapp.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
if isinstance(file, Sequence):
validate_unstructured_version(min_unstructured_version="0.6.3")
validate_unstructured_version(min_unstructured_version="0.6.2")
self.file = file
self.url = url
self.api_key = os.getenv("UNSTRUCTURED_API_KEY") or api_key
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List[Element]:
if self.unstructured_kwargs.get("metadata_filename"):
return get_elements_from_api(
file=self.file,
file_path=self.unstructured_kwargs.pop("metadata_filename"),
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
else:
raise ValueError(
"If partitioning a file via api,"
" metadata_filename must be specified as well.",
)
def _get_metadata(self) -> dict[str, Any]:
return {}
def _post_process_elements(self, elements: List[Element]) -> List[Element]:
"""Apply post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables passed
in using the post_processors kwarg when the loader is instantiated.
"""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/obs_directory.py | # coding:utf-8
from typing import List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.obs_file import OBSFileLoader
class OBSDirectoryLoader(BaseLoader):
"""Load from `Huawei OBS directory`."""
def __init__(
self,
bucket: str,
endpoint: str,
config: Optional[dict] = None,
prefix: str = "",
):
"""Initialize the OBSDirectoryLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
endpoint (str): The endpoint URL of your OBS bucket.
config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
prefix (str, optional): The prefix to be added to the OBS key. Defaults to "".
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSDirectoryLoader:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
```
directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix")
""" # noqa: E501
try:
from obs import ObsClient
except ImportError:
raise ImportError(
"Could not import esdk-obs-python python package. "
"Please install it with `pip install esdk-obs-python`."
)
if not config:
config = dict()
if config.get("get_token_from_ecs"):
self.client = ObsClient(server=endpoint, security_provider_policy="ECS")
else:
self.client = ObsClient(
access_key_id=config.get("ak"),
secret_access_key=config.get("sk"),
security_token=config.get("token"),
server=endpoint,
)
self.bucket = bucket
self.prefix = prefix
def load(self) -> List[Document]:
"""Load documents."""
max_num = 1000
mark = None
docs = []
while True:
resp = self.client.listObjects(
self.bucket, prefix=self.prefix, marker=mark, max_keys=max_num
)
if resp.status < 300:
for content in resp.body.contents:
loader = OBSFileLoader(self.bucket, content.key, client=self.client)
docs.extend(loader.load())
if resp.body.is_truncated is True:
mark = resp.body.next_marker
else:
break
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/sharepoint.py | """Loader that loads data from Sharepoint Document Library"""
from __future__ import annotations
import json
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
import requests # type: ignore
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from pydantic import Field
from langchain_community.document_loaders.base_o365 import (
O365BaseLoader,
)
class SharePointLoader(O365BaseLoader, BaseLoader):
"""Load from `SharePoint`."""
document_library_id: str = Field(...)
""" The ID of the SharePoint document library to load data from."""
folder_path: Optional[str] = None
""" The path to the folder to load data from."""
object_ids: Optional[List[str]] = None
""" The IDs of the objects to load data from."""
folder_id: Optional[str] = None
""" The ID of the folder to load data from."""
load_auth: Optional[bool] = False
""" Whether to load authorization identities."""
token_path: Path = Path.home() / ".credentials" / "o365_token.txt"
""" The path to the token to make api calls"""
load_extended_metadata: Optional[bool] = False
""" Whether to load extended metadata. Size, Owner and full_path."""
@property
def _scopes(self) -> List[str]:
"""Return required scopes.
Returns:
List[str]: A list of required scopes.
"""
return ["sharepoint", "basic"]
def lazy_load(self) -> Iterator[Document]:
"""
Load documents lazily. Use this when working at a large scale.
Yields:
Document: A document object representing the parsed blob.
"""
try:
from O365.drive import Drive, Folder
except ImportError:
raise ImportError(
"O365 package not found, please install it with `pip install o365`"
)
drive = self._auth().storage().get_drive(self.document_library_id)
if not isinstance(drive, Drive):
raise ValueError(f"There isn't a Drive with id {self.document_library_id}.")
if self.folder_path:
target_folder = drive.get_item_by_path(self.folder_path)
if not isinstance(target_folder, Folder):
raise ValueError(f"There isn't a folder with path {self.folder_path}.")
for blob in self._load_from_folder(target_folder):
file_id = str(blob.metadata.get("id"))
if self.load_auth is True:
auth_identities = self.authorized_identities(file_id)
if self.load_extended_metadata is True:
extended_metadata = self.get_extended_metadata(file_id)
extended_metadata.update({"source_full_url": target_folder.web_url})
for parsed_blob in self._blob_parser.lazy_parse(blob):
if self.load_auth is True:
parsed_blob.metadata["authorized_identities"] = auth_identities
if self.load_extended_metadata is True:
parsed_blob.metadata.update(extended_metadata)
yield parsed_blob
if self.folder_id:
target_folder = drive.get_item(self.folder_id)
if not isinstance(target_folder, Folder):
raise ValueError(f"There isn't a folder with path {self.folder_path}.")
for blob in self._load_from_folder(target_folder):
file_id = str(blob.metadata.get("id"))
if self.load_auth is True:
auth_identities = self.authorized_identities(file_id)
if self.load_extended_metadata is True:
extended_metadata = self.get_extended_metadata(file_id)
extended_metadata.update({"source_full_url": target_folder.web_url})
for parsed_blob in self._blob_parser.lazy_parse(blob):
if self.load_auth is True:
parsed_blob.metadata["authorized_identities"] = auth_identities
if self.load_extended_metadata is True:
parsed_blob.metadata.update(extended_metadata)
yield parsed_blob
if self.object_ids:
for blob in self._load_from_object_ids(drive, self.object_ids):
file_id = str(blob.metadata.get("id"))
if self.load_auth is True:
auth_identities = self.authorized_identities(file_id)
if self.load_extended_metadata is True:
extended_metadata = self.get_extended_metadata(file_id)
for parsed_blob in self._blob_parser.lazy_parse(blob):
if self.load_auth is True:
parsed_blob.metadata["authorized_identities"] = auth_identities
if self.load_extended_metadata is True:
parsed_blob.metadata.update(extended_metadata)
yield parsed_blob
if not (self.folder_path or self.folder_id or self.object_ids):
target_folder = drive.get_root_folder()
if not isinstance(target_folder, Folder):
raise ValueError("Unable to fetch root folder")
for blob in self._load_from_folder(target_folder):
file_id = str(blob.metadata.get("id"))
if self.load_auth is True:
auth_identities = self.authorized_identities(file_id)
if self.load_extended_metadata is True:
extended_metadata = self.get_extended_metadata(file_id)
for blob_part in self._blob_parser.lazy_parse(blob):
blob_part.metadata.update(blob.metadata)
if self.load_auth is True:
blob_part.metadata["authorized_identities"] = auth_identities
if self.load_extended_metadata is True:
blob_part.metadata.update(extended_metadata)
blob_part.metadata.update(
{"source_full_url": target_folder.web_url}
)
yield blob_part
def authorized_identities(self, file_id: str) -> List:
"""
Retrieve the access identities (user/group emails) for a given file.
Args:
file_id (str): The ID of the file.
Returns:
List: A list of group names (email addresses) that have
access to the file.
"""
data = self._fetch_access_token()
access_token = data.get("access_token")
url = (
"https://graph.microsoft.com/v1.0/drives"
f"/{self.document_library_id}/items/{file_id}/permissions"
)
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.request("GET", url, headers=headers)
access_list = response.json()
group_names = []
for access_data in access_list.get("value"):
if access_data.get("grantedToV2"):
site_data = (
(access_data.get("grantedToV2").get("siteUser"))
or (access_data.get("grantedToV2").get("user"))
or (access_data.get("grantedToV2").get("group"))
)
if site_data:
email = site_data.get("email")
if email:
group_names.append(email)
return group_names
def _fetch_access_token(self) -> Any:
"""
Fetch the access token from the token file.
Returns:
The access token as a dictionary.
"""
with open(self.token_path, encoding="utf-8") as f:
s = f.read()
data = json.loads(s)
return data
def get_extended_metadata(self, file_id: str) -> Dict:
"""
Retrieve extended metadata for a file in SharePoint.
As of today, following fields are supported in the extended metadata:
- size: size of the source file.
- owner: display name of the owner of the source file.
- full_path: pretty human readable path of the source file.
Args:
file_id (str): The ID of the file.
Returns:
dict: A dictionary containing the extended metadata of the file,
including size, owner, and full path.
"""
data = self._fetch_access_token()
access_token = data.get("access_token")
url = (
"https://graph.microsoft.com/v1.0/drives/"
f"{self.document_library_id}/items/{file_id}"
"?$select=size,createdBy,parentReference,name"
)
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.request("GET", url, headers=headers)
metadata = response.json()
staged_metadata = {
"size": metadata.get("size", 0),
"owner": metadata.get("createdBy", {})
.get("user", {})
.get("displayName", ""),
"full_path": metadata.get("parentReference", {})
.get("path", "")
.split(":")[-1]
+ "/"
+ metadata.get("name", ""),
}
return staged_metadata
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/word_document.py | """Loads word documents."""
import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import List, Union
from urllib.parse import urlparse
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class Docx2txtLoader(BaseLoader, ABC):
"""Load `DOCX` file using `docx2txt` and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
self.original_file_path = self.file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.original_file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Load `Microsoft Word` file using `Unstructured`.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC # type: ignore[arg-type]
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/text.py | import logging
from pathlib import Path
from typing import Iterator, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.helpers import detect_file_encodings
logger = logging.getLogger(__name__)
class TextLoader(BaseLoader):
"""Load text file.
Args:
file_path: Path to the file to load.
encoding: File encoding to use. If `None`, the file will be loaded
with the default system encoding.
autodetect_encoding: Whether to try to autodetect the file encoding
if the specified encoding fails.
"""
def __init__(
self,
file_path: Union[str, Path],
encoding: Optional[str] = None,
autodetect_encoding: bool = False,
):
"""Initialize with file path."""
self.file_path = file_path
self.encoding = encoding
self.autodetect_encoding = autodetect_encoding
def lazy_load(self) -> Iterator[Document]:
"""Load from file path."""
text = ""
try:
with open(self.file_path, encoding=self.encoding) as f:
text = f.read()
except UnicodeDecodeError as e:
if self.autodetect_encoding:
detected_encodings = detect_file_encodings(self.file_path)
for encoding in detected_encodings:
logger.debug(f"Trying encoding: {encoding.encoding}")
try:
with open(self.file_path, encoding=encoding.encoding) as f:
text = f.read()
break
except UnicodeDecodeError:
continue
else:
raise RuntimeError(f"Error loading {self.file_path}") from e
except Exception as e:
raise RuntimeError(f"Error loading {self.file_path}") from e
metadata = {"source": str(self.file_path)}
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/org_mode.py | from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredOrgModeLoader(UnstructuredFileLoader):
"""Load `Org-Mode` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredOrgModeLoader
loader = UnstructuredOrgModeLoader(
"example.org", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-org
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to load the file from. Default is "single".
**unstructured_kwargs: Any additional keyword arguments to pass
to the unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.7.9")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.org import partition_org
return partition_org(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/rspace.py | import os
from typing import Any, Dict, Iterator, List, Optional, Union
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders.base import BaseLoader
class RSpaceLoader(BaseLoader):
"""Load content from RSpace notebooks, folders, documents or PDF Gallery files.
Map RSpace document <-> Langchain Document in 1-1. PDFs are imported using PyPDF.
Requirements are rspace_client (`pip install rspace_client`) and PyPDF if importing
PDF docs (`pip install pypdf`).
"""
def __init__(
self, global_id: str, api_key: Optional[str] = None, url: Optional[str] = None
):
"""api_key: RSpace API key - can also be supplied as environment variable
'RSPACE_API_KEY'
url: str
The URL of your RSpace instance - can also be supplied as environment
variable 'RSPACE_URL'
global_id: str
The global ID of the resource to load,
e.g. 'SD12344' (a single document); 'GL12345'(A PDF file in the gallery);
'NB4567' (a notebook); 'FL12244' (a folder)
"""
args: Dict[str, Optional[str]] = {
"api_key": api_key,
"url": url,
"global_id": global_id,
}
verified_args: Dict[str, str] = RSpaceLoader.validate_environment(args)
self.api_key = verified_args["api_key"]
self.url = verified_args["url"]
self.global_id: str = verified_args["global_id"]
@classmethod
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that API key and URL exist in environment."""
values["api_key"] = get_from_dict_or_env(values, "api_key", "RSPACE_API_KEY")
values["url"] = get_from_dict_or_env(values, "url", "RSPACE_URL")
if "global_id" not in values or values["global_id"] is None:
raise ValueError(
"No value supplied for global_id. Please supply an RSpace global ID"
)
return values
def _create_rspace_client(self) -> Any:
"""Create a RSpace client."""
try:
from rspace_client.eln import eln, field_content
except ImportError:
raise ImportError("You must run " "`pip install rspace_client`")
try:
eln = eln.ELNClient(self.url, self.api_key)
eln.get_status()
except Exception:
raise Exception(
f"Unable to initialize client - is url {self.url} or "
f"api key correct?"
)
return eln, field_content.FieldContent
def _get_doc(self, cli: Any, field_content: Any, d_id: Union[str, int]) -> Document:
content = ""
doc = cli.get_document(d_id)
content += f"<h2>{doc['name']}<h2/>"
for f in doc["fields"]:
content += f"{f['name']}\n"
fc = field_content(f["content"])
content += fc.get_text()
content += "\n"
return Document(
metadata={"source": f"rspace: {doc['name']}-{doc['globalId']}"},
page_content=content,
)
def _load_structured_doc(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
yield self._get_doc(cli, field_content, self.global_id)
def _load_folder_tree(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
if self.global_id:
docs_in_folder = cli.list_folder_tree(
folder_id=self.global_id[2:], typesToInclude=["document"]
)
doc_ids: List[int] = [d["id"] for d in docs_in_folder["records"]]
for doc_id in doc_ids:
yield self._get_doc(cli, field_content, doc_id)
def _load_pdf(self) -> Iterator[Document]:
cli, field_content = self._create_rspace_client()
file_info = cli.get_file_info(self.global_id)
_, ext = os.path.splitext(file_info["name"])
if ext.lower() == ".pdf":
outfile = f"{self.global_id}.pdf"
cli.download_file(self.global_id, outfile)
pdf_loader = PyPDFLoader(outfile)
for pdf in pdf_loader.lazy_load():
pdf.metadata["rspace_src"] = self.global_id
yield pdf
def lazy_load(self) -> Iterator[Document]:
if self.global_id and "GL" in self.global_id:
for d in self._load_pdf():
yield d
elif self.global_id and "SD" in self.global_id:
for d in self._load_structured_doc():
yield d
elif self.global_id and self.global_id[0:2] in ["FL", "NB"]:
for d in self._load_folder_tree():
yield d
else:
raise ValueError("Unknown global ID type")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/tensorflow_datasets.py | from typing import Callable, Dict, Iterator, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
class TensorflowDatasetLoader(BaseLoader):
"""Load from `TensorFlow Dataset`.
Attributes:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document
Example:
.. code-block:: python
from langchain_community.document_loaders import TensorflowDatasetLoader
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
tsds_client = TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=100,
sample_to_document_function=mlqaen_example_to_document,
)
"""
def __init__(
self,
dataset_name: str,
split_name: str,
load_max_docs: Optional[int] = 100,
sample_to_document_function: Optional[Callable[[Dict], Document]] = None,
):
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[Callable[[Dict], Document]] = (
sample_to_document_function
)
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets( # type: ignore[call-arg]
dataset_name=self.dataset_name,
split_name=self.split_name,
load_max_docs=self.load_max_docs, # type: ignore[arg-type]
sample_to_document_function=self.sample_to_document_function,
)
def lazy_load(self) -> Iterator[Document]:
yield from self._tfds_client.lazy_load()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/bilibili.py | import json
import re
import warnings
from typing import List, Tuple
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
# Pre-compile regular expressions for video ID extraction
BV_PATTERN = re.compile(r"BV\w+")
AV_PATTERN = re.compile(r"av[0-9]+")
class BiliBiliLoader(BaseLoader):
"""
Load fetching transcripts from BiliBili videos.
"""
def __init__(
self,
video_urls: List[str],
sessdata: str = "",
bili_jct: str = "",
buvid3: str = "",
):
"""
Initialize the loader with BiliBili video URLs and authentication cookies.
if no authentication cookies are provided, the loader can't get transcripts
and will only fetch videos info.
Args:
video_urls (List[str]): List of BiliBili video URLs.
sessdata (str): SESSDATA cookie value for authentication.
bili_jct (str): BILI_JCT cookie value for authentication.
buvid3 (str): BUVI3 cookie value for authentication.
"""
self.video_urls = video_urls
self.credential = None
try:
from bilibili_api import video
except ImportError:
raise ImportError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
if sessdata and bili_jct and buvid3:
self.credential = video.Credential(
sessdata=sessdata, bili_jct=bili_jct, buvid3=buvid3
)
def load(self) -> List[Document]:
"""
Load and return a list of documents containing video transcripts.
Returns:
List[Document]: List of Document objects transcripts and metadata.
"""
results = []
for url in self.video_urls:
transcript, video_info = self._get_bilibili_subs_and_info(url)
doc = Document(page_content=transcript, metadata=video_info)
results.append(doc)
return results
def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]:
"""
Retrieve video information and transcript for a given BiliBili URL.
"""
bvid = BV_PATTERN.search(url)
try:
from bilibili_api import sync, video
except ImportError:
raise ImportError(
"requests package not found, please install it with "
"`pip install bilibili-api-python`"
)
if bvid:
v = video.Video(bvid=bvid.group(), credential=self.credential)
else:
aid = AV_PATTERN.search(url)
if aid:
v = video.Video(aid=int(aid.group()[2:]), credential=self.credential)
else:
raise ValueError(f"Unable to find a valid video ID in URL: {url}")
video_info = sync(v.get_info())
video_info.update({"url": url})
# Return if no credential is provided
if not self.credential:
return "", video_info
# Fetching and processing subtitles
sub = sync(v.get_subtitle(video_info["cid"]))
sub_list = sub.get("subtitles", [])
if sub_list:
sub_url = sub_list[0].get("subtitle_url", "")
if not sub_url.startswith("http"):
sub_url = "https:" + sub_url
response = requests.get(sub_url)
if response.status_code == 200:
raw_sub_titles = json.loads(response.content).get("body", [])
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
raw_transcript_with_meta_info = (
f"Video Title: {video_info['title']}, "
f"description: {video_info['desc']}\n\n"
f"Transcript: {raw_transcript}"
)
return raw_transcript_with_meta_info, video_info
else:
warnings.warn(
f"Failed to fetch subtitles for {url}. "
f"HTTP Status Code: {response.status_code}"
)
else:
warnings.warn(
f"No subtitles found for video: {url}. Returning empty transcript."
)
# Return empty transcript if no subtitles are found
return "", video_info
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/pebblo.py | """Pebblo's safe dataloader is a wrapper for document loaders"""
import logging
import os
import uuid
from importlib.metadata import version
from typing import Any, Dict, Iterable, Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.pebblo import (
BATCH_SIZE_BYTES,
PLUGIN_VERSION,
App,
Framework,
IndexedDocument,
PebbloLoaderAPIWrapper,
generate_size_based_batches,
get_full_path,
get_loader_full_path,
get_loader_type,
get_runtime,
get_source_size,
)
logger = logging.getLogger(__name__)
class PebbloSafeLoader(BaseLoader):
"""Pebblo Safe Loader class is a wrapper around document loaders enabling the data
to be scrutinized.
"""
_discover_sent: bool = False
def __init__(
self,
langchain_loader: BaseLoader,
name: str,
owner: str = "",
description: str = "",
api_key: Optional[str] = None,
load_semantic: bool = False,
classifier_url: Optional[str] = None,
*,
classifier_location: str = "local",
anonymize_snippets: bool = False,
):
if not name or not isinstance(name, str):
raise NameError("Must specify a valid name.")
self.app_name = name
self.load_id = str(uuid.uuid4())
self.loader = langchain_loader
self.load_semantic = os.environ.get("PEBBLO_LOAD_SEMANTIC") or load_semantic
self.owner = owner
self.description = description
self.source_path = get_loader_full_path(self.loader)
self.docs: List[Document] = []
self.docs_with_id: List[IndexedDocument] = []
loader_name = str(type(self.loader)).split(".")[-1].split("'")[0]
self.source_type = get_loader_type(loader_name)
self.source_path_size = get_source_size(self.source_path)
self.batch_size = BATCH_SIZE_BYTES
self.loader_details = {
"loader": loader_name,
"source_path": self.source_path,
"source_type": self.source_type,
**(
{"source_path_size": str(self.source_path_size)}
if self.source_path_size > 0
else {}
),
}
# generate app
self.app = self._get_app_details()
# initialize Pebblo Loader API client
self.pb_client = PebbloLoaderAPIWrapper(
api_key=api_key,
classifier_location=classifier_location,
classifier_url=classifier_url,
anonymize_snippets=anonymize_snippets,
)
self.pb_client.send_loader_discover(self.app)
def load(self) -> List[Document]:
"""Load Documents.
Returns:
list: Documents fetched from load method of the wrapped `loader`.
"""
self.docs = self.loader.load()
# Classify docs in batches
self.classify_in_batches()
return self.docs
def classify_in_batches(self) -> None:
"""
Classify documents in batches.
This is to avoid API timeouts when sending large number of documents.
Batches are generated based on the page_content size.
"""
batches: List[List[Document]] = generate_size_based_batches(
self.docs, self.batch_size
)
processed_docs: List[Document] = []
total_batches = len(batches)
for i, batch in enumerate(batches):
is_last_batch: bool = i == total_batches - 1
self.docs = batch
self.docs_with_id = self._index_docs()
classified_docs = self.pb_client.classify_documents(
self.docs_with_id,
self.app,
self.loader_details,
loading_end=is_last_batch,
)
self._add_pebblo_specific_metadata(classified_docs)
if self.load_semantic:
batch_processed_docs = self._add_semantic_to_docs(classified_docs)
else:
batch_processed_docs = self._unindex_docs()
processed_docs.extend(batch_processed_docs)
self.docs = processed_docs
def lazy_load(self) -> Iterator[Document]:
"""Load documents in lazy fashion.
Raises:
NotImplementedError: raised when lazy_load id not implemented
within wrapped loader.
Yields:
list: Documents from loader's lazy loading.
"""
try:
doc_iterator = self.loader.lazy_load()
except NotImplementedError as exc:
err_str = f"{self.loader.__class__.__name__} does not implement lazy_load()"
logger.error(err_str)
raise NotImplementedError(err_str) from exc
while True:
try:
doc = next(doc_iterator)
except StopIteration:
self.docs = []
break
self.docs = list((doc,))
self.docs_with_id = self._index_docs()
classified_doc = self.pb_client.classify_documents(
self.docs_with_id, self.app, self.loader_details
)
self._add_pebblo_specific_metadata(classified_doc)
if self.load_semantic:
self.docs = self._add_semantic_to_docs(classified_doc)
else:
self.docs = self._unindex_docs()
yield self.docs[0]
@classmethod
def set_discover_sent(cls) -> None:
cls._discover_sent = True
def _get_app_details(self) -> App:
"""Fetch app details. Internal method.
Returns:
App: App details.
"""
framework, runtime = get_runtime()
app = App(
name=self.app_name,
owner=self.owner,
description=self.description,
load_id=self.load_id,
runtime=runtime,
framework=framework,
plugin_version=PLUGIN_VERSION,
client_version=Framework(
name="langchain_community",
version=version("langchain_community"),
),
)
return app
def _index_docs(self) -> List[IndexedDocument]:
"""
Indexes the documents and returns a list of IndexedDocument objects.
Returns:
List[IndexedDocument]: A list of IndexedDocument objects with unique IDs.
"""
docs_with_id = [
IndexedDocument(pb_id=str(i), **doc.dict())
for i, doc in enumerate(self.docs)
]
return docs_with_id
def _add_semantic_to_docs(self, classified_docs: Dict) -> List[Document]:
"""
Adds semantic metadata to the given list of documents.
Args:
classified_docs (Dict): A dictionary of dictionaries containing the
classified documents with pb_id as key.
Returns:
List[Document]: A list of Document objects with added semantic metadata.
"""
indexed_docs = {
doc.pb_id: Document(page_content=doc.page_content, metadata=doc.metadata)
for doc in self.docs_with_id
}
for classified_doc in classified_docs.values():
doc_id = classified_doc.get("pb_id")
if doc_id in indexed_docs:
self._add_semantic_to_doc(indexed_docs[doc_id], classified_doc)
semantic_metadata_docs = [doc for doc in indexed_docs.values()]
return semantic_metadata_docs
def _unindex_docs(self) -> List[Document]:
"""
Converts a list of IndexedDocument objects to a list of Document objects.
Returns:
List[Document]: A list of Document objects.
"""
docs = [
Document(page_content=doc.page_content, metadata=doc.metadata)
for i, doc in enumerate(self.docs_with_id)
]
return docs
def _add_semantic_to_doc(self, doc: Document, classified_doc: dict) -> Document:
"""
Adds semantic metadata to the given document in-place.
Args:
doc (Document): A Document object.
classified_doc (dict): A dictionary containing the classified document.
Returns:
Document: The Document object with added semantic metadata.
"""
doc.metadata["pebblo_semantic_entities"] = list(
classified_doc.get("entities", {}).keys()
)
doc.metadata["pebblo_semantic_topics"] = list(
classified_doc.get("topics", {}).keys()
)
return doc
def _add_pebblo_specific_metadata(self, classified_docs: dict) -> None:
"""Add Pebblo specific metadata to documents."""
for doc in self.docs_with_id:
doc_metadata = doc.metadata
if self.loader.__class__.__name__ == "SharePointLoader":
doc_metadata["full_path"] = get_full_path(
doc_metadata.get("source", self.source_path)
)
else:
doc_metadata["full_path"] = get_full_path(
doc_metadata.get(
"full_path", doc_metadata.get("source", self.source_path)
)
)
doc_metadata["pb_checksum"] = classified_docs.get(doc.pb_id, {}).get(
"pb_checksum", None
)
class PebbloTextLoader(BaseLoader):
"""
Loader for text data.
Since PebbloSafeLoader is a wrapper around document loaders, this loader is
used to load text data directly into Documents.
"""
def __init__(
self,
texts: Iterable[str],
*,
source: Optional[str] = None,
ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
metadatas: Optional[List[Dict[str, Any]]] = None,
) -> None:
"""
Args:
texts: Iterable of text data.
source: Source of the text data.
Optional. Defaults to None.
ids: List of unique identifiers for each text.
Optional. Defaults to None.
metadata: Metadata for all texts.
Optional. Defaults to None.
metadatas: List of metadata for each text.
Optional. Defaults to None.
"""
self.texts = texts
self.source = source
self.ids = ids
self.metadata = metadata
self.metadatas = metadatas
def lazy_load(self) -> Iterator[Document]:
"""
Lazy load text data into Documents.
Returns:
Iterator of Documents
"""
for i, text in enumerate(self.texts):
_id = None
metadata = self.metadata or {}
if self.metadatas and i < len(self.metadatas) and self.metadatas[i]:
metadata.update(self.metadatas[i])
if self.ids and i < len(self.ids):
_id = self.ids[i]
yield Document(id=_id, page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""
Load text data into Documents.
Returns:
List of Documents
"""
documents = []
for doc in self.lazy_load():
documents.append(doc)
return documents
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/etherscan.py | import os
import re
from typing import Iterator, List
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class EtherscanLoader(BaseLoader):
"""Load transactions from `Ethereum` mainnet.
The Loader use Etherscan API to interact with Ethereum mainnet.
ETHERSCAN_API_KEY environment variable must be set use this loader.
"""
def __init__(
self,
account_address: str,
api_key: str = "docs-demo",
filter: str = "normal_transaction",
page: int = 1,
offset: int = 10,
start_block: int = 0,
end_block: int = 99999999,
sort: str = "desc",
):
self.account_address = account_address
self.api_key = os.environ.get("ETHERSCAN_API_KEY") or api_key
self.filter = filter
self.page = page
self.offset = offset
self.start_block = start_block
self.end_block = end_block
self.sort = sort
if not self.api_key:
raise ValueError("Etherscan API key not provided")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.account_address):
raise ValueError(f"Invalid contract address {self.account_address}")
if filter not in [
"normal_transaction",
"internal_transaction",
"erc20_transaction",
"eth_balance",
"erc721_transaction",
"erc1155_transaction",
]:
raise ValueError(f"Invalid filter {filter}")
def lazy_load(self) -> Iterator[Document]:
"""Lazy load Documents from table."""
result = []
if self.filter == "normal_transaction":
result = self.getNormTx()
elif self.filter == "internal_transaction":
result = self.getInternalTx()
elif self.filter == "erc20_transaction":
result = self.getERC20Tx()
elif self.filter == "eth_balance":
result = self.getEthBalance()
elif self.filter == "erc721_transaction":
result = self.getERC721Tx()
elif self.filter == "erc1155_transaction":
result = self.getERC1155Tx()
else:
raise ValueError(f"Invalid filter {filter}")
for doc in result:
yield doc
def getNormTx(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=txlist&address={self.account_address}"
f"&startblock={self.start_block}&endblock={self.end_block}&page={self.page}"
f"&offset={self.offset}&sort={self.sort}&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
items = response.json()["result"]
result = []
if len(items) == 0:
return [Document(page_content="")]
for item in items:
content = str(item)
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
result.append(Document(page_content=content, metadata=metadata))
print(len(result)) # noqa: T201
return result
def getEthBalance(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=balance"
f"&address={self.account_address}&tag=latest&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
return [Document(page_content=response.json()["result"])]
def getInternalTx(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=txlistinternal"
f"&address={self.account_address}&startblock={self.start_block}"
f"&endblock={self.end_block}&page={self.page}&offset={self.offset}"
f"&sort={self.sort}&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
items = response.json()["result"]
result = []
if len(items) == 0:
return [Document(page_content="")]
for item in items:
content = str(item)
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
result.append(Document(page_content=content, metadata=metadata))
return result
def getERC20Tx(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=tokentx"
f"&address={self.account_address}&startblock={self.start_block}"
f"&endblock={self.end_block}&page={self.page}&offset={self.offset}"
f"&sort={self.sort}&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
items = response.json()["result"]
result = []
if len(items) == 0:
return [Document(page_content="")]
for item in items:
content = str(item)
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
result.append(Document(page_content=content, metadata=metadata))
return result
def getERC721Tx(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=tokennfttx"
f"&address={self.account_address}&startblock={self.start_block}"
f"&endblock={self.end_block}&page={self.page}&offset={self.offset}"
f"&sort={self.sort}&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
items = response.json()["result"]
result = []
if len(items) == 0:
return [Document(page_content="")]
for item in items:
content = str(item)
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
result.append(Document(page_content=content, metadata=metadata))
return result
def getERC1155Tx(self) -> List[Document]:
url = (
f"https://api.etherscan.io/api?module=account&action=token1155tx"
f"&address={self.account_address}&startblock={self.start_block}"
f"&endblock={self.end_block}&page={self.page}&offset={self.offset}"
f"&sort={self.sort}&apikey={self.api_key}"
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print("Error occurred while making the request:", e) # noqa: T201
items = response.json()["result"]
result = []
if len(items) == 0:
return [Document(page_content="")]
for item in items:
content = str(item)
metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]}
result.append(Document(page_content=content, metadata=metadata))
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/tencent_cos_file.py | import os
import tempfile
from typing import Any, Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class TencentCOSFileLoader(BaseLoader):
"""Load from `Tencent Cloud COS` file."""
def __init__(self, conf: Any, bucket: str, key: str):
"""Initialize with COS config, bucket and key name.
:param conf(CosConfig): COS config.
:param bucket(str): COS bucket.
:param key(str): COS file key.
"""
self.conf = conf
self.bucket = bucket
self.key = key
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
"Could not import cos-python-sdk-v5 python package. "
"Please install it with `pip install cos-python-sdk-v5`."
)
# initialize a client
client = CosS3Client(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
client.download_file(
Bucket=self.bucket, Key=self.key, DestFilePath=file_path
)
loader = UnstructuredFileLoader(file_path)
# UnstructuredFileLoader not implement lazy_load yet
return iter(loader.load())
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/quip.py | import logging
import re
import xml.etree.cElementTree # OK: user-must-opt-in
from io import BytesIO
from typing import List, Optional, Sequence
from xml.etree.ElementTree import ElementTree # OK: user-must-opt-in
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
_MAXIMUM_TITLE_LENGTH = 64
class QuipLoader(BaseLoader):
"""Load `Quip` pages.
Port of https://github.com/quip/quip-api/tree/master/samples/baqup
"""
def __init__(
self,
api_url: str,
access_token: str,
request_timeout: Optional[int] = 60,
*,
allow_dangerous_xml_parsing: bool = False,
):
"""
Args:
api_url: https://platform.quip.com
access_token: token of access quip API. Please refer:
https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs
request_timeout: timeout of request, default 60s.
allow_dangerous_xml_parsing: Allow dangerous XML parsing, defaults to False
"""
try:
from quip_api.quip import QuipClient
except ImportError:
raise ImportError(
"`quip_api` package not found, please run " "`pip install quip_api`"
)
self.quip_client = QuipClient(
access_token=access_token, base_url=api_url, request_timeout=request_timeout
)
if not allow_dangerous_xml_parsing:
raise ValueError(
"The quip client uses the built-in XML parser which may cause"
"security issues when parsing XML data in some cases. "
"Please see "
"https://docs.python.org/3/library/xml.html#xml-vulnerabilities "
"For more information, set `allow_dangerous_xml_parsing` as True "
"if you are sure that your distribution of the standard library "
"is not vulnerable to XML vulnerabilities."
)
def load(
self,
folder_ids: Optional[List[str]] = None,
thread_ids: Optional[List[str]] = None,
max_docs: Optional[int] = 1000,
include_all_folders: bool = False,
include_comments: bool = False,
include_images: bool = False,
) -> List[Document]:
"""
Args:
:param folder_ids: List of specific folder IDs to load, defaults to None
:param thread_ids: List of specific thread IDs to load, defaults to None
:param max_docs: Maximum number of docs to retrieve in total, defaults 1000
:param include_all_folders: Include all folders that your access_token
can access, but doesn't include your private folder
:param include_comments: Include comments, defaults to False
:param include_images: Include images, defaults to False
"""
if not folder_ids and not thread_ids and not include_all_folders:
raise ValueError(
"Must specify at least one among `folder_ids`, `thread_ids` "
"or set `include_all`_folders as True"
)
thread_ids = thread_ids or []
if folder_ids:
for folder_id in folder_ids:
self.get_thread_ids_by_folder_id(folder_id, 0, thread_ids)
if include_all_folders:
user = self.quip_client.get_authenticated_user()
if "group_folder_ids" in user:
self.get_thread_ids_by_folder_id(
user["group_folder_ids"], 0, thread_ids
)
if "shared_folder_ids" in user:
self.get_thread_ids_by_folder_id(
user["shared_folder_ids"], 0, thread_ids
)
thread_ids = list(set(thread_ids[:max_docs]))
return self.process_threads(thread_ids, include_images, include_comments)
def get_thread_ids_by_folder_id(
self, folder_id: str, depth: int, thread_ids: List[str]
) -> None:
"""Get thread ids by folder id and update in thread_ids"""
from quip_api.quip import HTTPError, QuipError
try:
folder = self.quip_client.get_folder(folder_id)
except QuipError as e:
if e.code == 403:
logging.warning(
f"depth {depth}, Skipped over restricted folder {folder_id}, {e}"
)
else:
logging.warning(
f"depth {depth}, Skipped over folder {folder_id} "
f"due to unknown error {e.code}"
)
return
except HTTPError as e:
logging.warning(
f"depth {depth}, Skipped over folder {folder_id} "
f"due to HTTP error {e.code}"
)
return
title = folder["folder"].get("title", "Folder %s" % folder_id)
logging.info(f"depth {depth}, Processing folder {title}")
for child in folder["children"]:
if "folder_id" in child:
self.get_thread_ids_by_folder_id(
child["folder_id"], depth + 1, thread_ids
)
elif "thread_id" in child:
thread_ids.append(child["thread_id"])
def process_threads(
self, thread_ids: Sequence[str], include_images: bool, include_messages: bool
) -> List[Document]:
"""Process a list of thread into a list of documents."""
docs = []
for thread_id in thread_ids:
doc = self.process_thread(thread_id, include_images, include_messages)
if doc is not None:
docs.append(doc)
return docs
def process_thread(
self, thread_id: str, include_images: bool, include_messages: bool
) -> Optional[Document]:
thread = self.quip_client.get_thread(thread_id)
thread_id = thread["thread"]["id"]
title = thread["thread"]["title"]
link = thread["thread"]["link"]
update_ts = thread["thread"]["updated_usec"]
sanitized_title = QuipLoader._sanitize_title(title)
logger.info(
f"processing thread {thread_id} title {sanitized_title} "
f"link {link} update_ts {update_ts}"
)
if "html" in thread:
# Parse the document
try:
tree = self.quip_client.parse_document_html(thread["html"])
except xml.etree.cElementTree.ParseError as e:
logger.error(f"Error parsing thread {title} {thread_id}, skipping, {e}")
return None
metadata = {
"title": sanitized_title,
"update_ts": update_ts,
"id": thread_id,
"source": link,
}
# Download each image and replace with the new URL
text = ""
if include_images:
text = self.process_thread_images(tree)
if include_messages:
text = text + "/n" + self.process_thread_messages(thread_id)
return Document(
page_content=thread["html"] + text,
metadata=metadata,
)
return None
def process_thread_images(self, tree: ElementTree) -> str:
text = ""
try:
from PIL import Image
from pytesseract import pytesseract
except ImportError:
raise ImportError(
"`Pillow or pytesseract` package not found, "
"please run "
"`pip install Pillow` or `pip install pytesseract`"
)
for img in tree.iter("img"):
src = img.get("src")
if not src or not src.startswith("/blob"):
continue
_, _, thread_id, blob_id = src.split("/")
blob_response = self.quip_client.get_blob(thread_id, blob_id)
try:
image = Image.open(BytesIO(blob_response.read()))
text = text + "\n" + pytesseract.image_to_string(image)
except OSError as e:
logger.error(f"failed to convert image to text, {e}")
raise e
return text
def process_thread_messages(self, thread_id: str) -> str:
max_created_usec = None
messages = []
while True:
chunk = self.quip_client.get_messages(
thread_id, max_created_usec=max_created_usec, count=100
)
messages.extend(chunk)
if chunk:
max_created_usec = chunk[-1]["created_usec"] - 1
else:
break
messages.reverse()
texts = [message["text"] for message in messages]
return "\n".join(texts)
@staticmethod
def _sanitize_title(title: str) -> str:
sanitized_title = re.sub(r"\s", " ", title)
sanitized_title = re.sub(r"(?u)[^- \w.]", "", sanitized_title)
if len(sanitized_title) > _MAXIMUM_TITLE_LENGTH:
sanitized_title = sanitized_title[:_MAXIMUM_TITLE_LENGTH]
return sanitized_title
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/snowflake_loader.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List, Optional, Tuple
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class SnowflakeLoader(BaseLoader):
"""Load from `Snowflake` API.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
user: str,
password: str,
account: str,
warehouse: str,
role: str,
database: str,
schema: str,
parameters: Optional[Dict[str, Any]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
"""Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
parameters: Optional. Parameters to pass to the query.
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.user = user
self.password = password
self.account = account
self.warehouse = warehouse
self.role = role
self.database = database
self.schema = schema
self.parameters = parameters
self.page_content_columns = (
page_content_columns if page_content_columns is not None else ["*"]
)
self.metadata_columns = metadata_columns if metadata_columns is not None else []
def _execute_query(self) -> List[Dict[str, Any]]:
try:
import snowflake.connector
except ImportError as ex:
raise ImportError(
"Could not import snowflake-connector-python package. "
"Please install it with `pip install snowflake-connector-python`."
) from ex
conn = snowflake.connector.connect(
user=self.user,
password=self.password,
account=self.account,
warehouse=self.warehouse,
role=self.role,
database=self.database,
schema=self.schema,
parameters=self.parameters,
)
try:
cur = conn.cursor()
cur.execute("USE DATABASE " + self.database)
cur.execute("USE SCHEMA " + self.schema)
cur.execute(self.query, self.parameters)
query_result = cur.fetchall()
column_names = [column[0] for column in cur.description]
query_result = [dict(zip(column_names, row)) for row in query_result]
except Exception as e:
print(f"An error occurred: {e}") # noqa: T201
query_result = []
finally:
cur.close()
return query_result
def _get_columns(
self, query_result: List[Dict[str, Any]]
) -> Tuple[List[str], List[str]]:
page_content_columns = (
self.page_content_columns if self.page_content_columns else []
)
metadata_columns = self.metadata_columns if self.metadata_columns else []
if page_content_columns is None and query_result:
page_content_columns = list(query_result[0].keys())
if metadata_columns is None:
metadata_columns = []
return page_content_columns or [], metadata_columns
def lazy_load(self) -> Iterator[Document]:
query_result = self._execute_query()
if isinstance(query_result, Exception):
print(f"An error occurred during the query: {query_result}") # noqa: T201
return [] # type: ignore[return-value]
page_content_columns, metadata_columns = self._get_columns(query_result)
if "*" in page_content_columns:
page_content_columns = list(query_result[0].keys())
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
yield doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/astradb.py | from __future__ import annotations
import json
import logging
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.astradb import _AstraDBEnvironment
if TYPE_CHECKING:
from astrapy.db import AstraDB, AsyncAstraDB
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.29",
removal="1.0",
alternative_import="langchain_astradb.AstraDBLoader",
)
class AstraDBLoader(BaseLoader):
def __init__(
self,
collection_name: str,
*,
token: Optional[str] = None,
api_endpoint: Optional[str] = None,
astra_db_client: Optional[AstraDB] = None,
async_astra_db_client: Optional[AsyncAstraDB] = None,
namespace: Optional[str] = None,
filter_criteria: Optional[Dict[str, Any]] = None,
projection: Optional[Dict[str, Any]] = None,
find_options: Optional[Dict[str, Any]] = None,
nb_prefetched: int = 1000,
extraction_function: Callable[[Dict], str] = json.dumps,
) -> None:
"""Load DataStax Astra DB documents.
Args:
collection_name: name of the Astra DB collection to use.
token: API token for Astra DB usage.
api_endpoint: full URL to the API endpoint,
such as `https://<DB-ID>-us-east1.apps.astra.datastax.com`.
astra_db_client: *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
async_astra_db_client: *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AsyncAstraDB' instance.
namespace: namespace (aka keyspace) where the
collection is. Defaults to the database's "default namespace".
filter_criteria: Criteria to filter documents.
projection: Specifies the fields to return.
find_options: Additional options for the query.
nb_prefetched: Max number of documents to pre-fetch. Defaults to 1000.
extraction_function: Function applied to collection documents to create
the `page_content` of the LangChain Document. Defaults to `json.dumps`.
"""
astra_env = _AstraDBEnvironment(
token=token,
api_endpoint=api_endpoint,
astra_db_client=astra_db_client,
async_astra_db_client=async_astra_db_client,
namespace=namespace,
)
self.astra_env = astra_env
self.collection = astra_env.astra_db.collection(collection_name)
self.collection_name = collection_name
self.filter = filter_criteria
self.projection = projection
self.find_options = find_options or {}
self.nb_prefetched = nb_prefetched
self.extraction_function = extraction_function
def lazy_load(self) -> Iterator[Document]:
for doc in self.collection.paginated_find(
filter=self.filter,
options=self.find_options,
projection=self.projection,
sort=None,
prefetched=self.nb_prefetched,
):
yield Document(
page_content=self.extraction_function(doc),
metadata={
"namespace": self.collection.astra_db.namespace,
"api_endpoint": self.collection.astra_db.base_url,
"collection": self.collection_name,
},
)
async def aload(self) -> List[Document]:
"""Load data into Document objects."""
return [doc async for doc in self.alazy_load()]
async def alazy_load(self) -> AsyncIterator[Document]:
async_collection = await self.astra_env.async_astra_db.collection(
self.collection_name
)
async for doc in async_collection.paginated_find(
filter=self.filter,
options=self.find_options,
projection=self.projection,
sort=None,
prefetched=self.nb_prefetched,
):
yield Document(
page_content=self.extraction_function(doc),
metadata={
"namespace": async_collection.astra_db.namespace,
"api_endpoint": async_collection.astra_db.base_url,
"collection": self.collection_name,
},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/browserless.py | from typing import Iterator, List, Union
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class BrowserlessLoader(BaseLoader):
"""Load webpages with `Browserless` /content endpoint."""
def __init__(
self, api_token: str, urls: Union[str, List[str]], text_content: bool = True
):
"""Initialize with API token and the URLs to scrape"""
self.api_token = api_token
"""Browserless API token."""
self.urls = urls
"""List of URLs to scrape."""
self.text_content = text_content
def lazy_load(self) -> Iterator[Document]:
"""Lazy load Documents from URLs."""
for url in self.urls:
if self.text_content:
response = requests.post(
"https://chrome.browserless.io/scrape",
params={
"token": self.api_token,
},
json={
"url": url,
"elements": [
{
"selector": "body",
}
],
},
)
yield Document(
page_content=response.json()["data"][0]["results"][0]["text"],
metadata={
"source": url,
},
)
else:
response = requests.post(
"https://chrome.browserless.io/content",
params={
"token": self.api_token,
},
json={
"url": url,
},
)
yield Document(
page_content=response.text,
metadata={
"source": url,
},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/merge.py | from typing import AsyncIterator, Iterator, List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class MergedDataLoader(BaseLoader):
"""Merge documents from a list of loaders"""
def __init__(self, loaders: List):
"""Initialize with a list of loaders"""
self.loaders = loaders
def lazy_load(self) -> Iterator[Document]:
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
# Check if lazy_load is implemented
try:
data = loader.lazy_load()
except NotImplementedError:
data = loader.load()
for document in data:
yield document
async def alazy_load(self) -> AsyncIterator[Document]:
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
async for document in loader.alazy_load():
yield document
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/tomarkdown.py | from __future__ import annotations
from typing import Iterator
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ToMarkdownLoader(BaseLoader):
"""Load `HTML` using `2markdown API`."""
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load the file."""
response = requests.post(
"https://api.2markdown.com/v1/url2md",
headers={"X-Api-Key": self.api_key},
json={"url": self.url},
)
text = response.json()["article"]
metadata = {"source": self.url}
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/tencent_cos_directory.py | from typing import Any, Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.tencent_cos_file import TencentCOSFileLoader
class TencentCOSDirectoryLoader(BaseLoader):
"""Load from `Tencent Cloud COS` directory."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with COS config, bucket and prefix.
:param conf(CosConfig): COS config.
:param bucket(str): COS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
"Could not import cos-python-sdk-v5 python package. "
"Please install it with `pip install cos-python-sdk-v5`."
)
client = CosS3Client(self.conf)
contents = []
marker = ""
while True:
response = client.list_objects(
Bucket=self.bucket, Prefix=self.prefix, Marker=marker, MaxKeys=1000
)
if "Contents" in response:
contents.extend(response["Contents"])
if response["IsTruncated"] == "false":
break
marker = response["NextMarker"]
for content in contents:
if content["Key"].endswith("/"):
continue
loader = TencentCOSFileLoader(self.conf, self.bucket, content["Key"])
yield loader.load()[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/diffbot.py | import logging
from typing import Any, List
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class DiffbotLoader(BaseLoader):
"""Load `Diffbot` json file."""
def __init__(
self, api_token: str, urls: List[str], continue_on_failure: bool = True
):
"""Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
"""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
def _diffbot_api_url(self, diffbot_api: str) -> str:
return f"https://api.diffbot.com/v3/{diffbot_api}"
def _get_diffbot_data(self, url: str) -> Any:
"""Get Diffbot file from Diffbot REST API."""
# TODO: Add support for other Diffbot APIs
diffbot_url = self._diffbot_api_url("article")
params = {
"token": self.api_token,
"url": url,
}
response = requests.get(diffbot_url, params=params, timeout=10)
# TODO: handle non-ok errors
return response.json() if response.ok else {}
def load(self) -> List[Document]:
"""Extract text from Diffbot on all the URLs and return Documents"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data["objects"][0]["text"] if "objects" in data else ""
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/notebook.py | """Loads .ipynb notebook files."""
import json
from pathlib import Path
from typing import Any, List, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_cells(
cell: dict, include_outputs: bool, max_output_length: int, traceback: bool
) -> str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell["cell_type"]
source = cell["source"]
if include_outputs:
try:
output = cell["outputs"]
except KeyError:
pass
if include_outputs and cell_type == "code" and output:
if "ename" in output[0].keys():
error_name = output[0]["ename"]
error_value = output[0]["evalue"]
if traceback:
traceback = output[0]["traceback"]
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f" with description '{error_value}'\n"
f"and traceback '{traceback}'\n\n"
)
else:
return (
f"'{cell_type}' cell: '{source}'\n, gives error '{error_name}',"
f"with description '{error_value}'\n\n"
)
elif output[0]["output_type"] == "stream":
output = output[0]["text"]
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with "
f"output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ""
def remove_newlines(x: Any) -> Any:
"""Recursively remove newlines, no matter the data structure they are stored in."""
if isinstance(x, str):
return x.replace("\n", "")
elif isinstance(x, list):
return [remove_newlines(elem) for elem in x]
elif isinstance(x, dict):
return {k: remove_newlines(v) for (k, v) in x.items()}
else:
return x
class NotebookLoader(BaseLoader):
"""Load `Jupyter notebook` (.ipynb) files."""
def __init__(
self,
path: Union[str, Path],
include_outputs: bool = False,
max_output_length: int = 10,
remove_newline: bool = False,
traceback: bool = False,
):
"""Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
def load(
self,
) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
filtered_data = [
{k: v for (k, v) in cell.items() if k in ["cell_type", "source", "outputs"]}
for cell in d["cells"]
]
if self.remove_newline:
filtered_data = list(map(remove_newlines, filtered_data))
text = "".join(
list(
map(
lambda x: concatenate_cells(
x, self.include_outputs, self.max_output_length, self.traceback
),
filtered_data,
)
)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/mhtml.py | import email
import logging
from pathlib import Path
from typing import Dict, Iterator, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MHTMLLoader(BaseLoader):
"""Parse `MHTML` files with `BeautifulSoup`."""
def __init__(
self,
file_path: Union[str, Path],
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""initialize with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object.
Args:
file_path: Path to file to load.
open_encoding: The encoding to use when opening the file.
bs_kwargs: Any kwargs to pass to the BeautifulSoup object.
get_text_separator: The separator to use when getting the text
from the soup.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
def lazy_load(self) -> Iterator[Document]:
"""Load MHTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, "r", encoding=self.open_encoding) as f:
message = email.message_from_string(f.read())
parts = message.get_payload()
if not isinstance(parts, list):
parts = [message]
for part in parts:
if part.get_content_type() == "text/html": # type: ignore[union-attr]
html = part.get_payload(decode=True).decode() # type: ignore[union-attr]
soup = BeautifulSoup(html, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": str(self.file_path),
"title": title,
}
yield Document(page_content=text, metadata=metadata)
return
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/spider.py | from typing import Iterator, Literal, Optional
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.utils import get_from_env
class SpiderLoader(BaseLoader):
"""Load web pages as Documents using Spider AI.
Must have the Python package `spider-client` installed and a Spider API key.
See https://spider.cloud for more.
"""
def __init__(
self,
url: str,
*,
api_key: Optional[str] = None,
mode: Literal["scrape", "crawl"] = "scrape",
params: Optional[dict] = None,
):
"""Initialize with API key and URL.
Args:
url: The URL to be processed.
api_key: The Spider API key. If not specified, will be read from env
var `SPIDER_API_KEY`.
mode: The mode to run the loader in. Default is "scrape".
Options include "scrape" (single page) and "crawl" (with deeper
crawling following subpages).
params: Additional parameters for the Spider API.
"""
if params is None:
params = {
"return_format": "markdown",
"metadata": True,
} # Using the metadata param slightly slows down the output
try:
from spider import Spider
except ImportError:
raise ImportError(
"`spider` package not found, please run `pip install spider-client`"
)
if mode not in ("scrape", "crawl"):
raise ValueError(
f"Unrecognized mode '{mode}'. Expected one of 'scrape', 'crawl'."
)
# Use the environment variable if the API key isn't provided
api_key = api_key or get_from_env("api_key", "SPIDER_API_KEY")
self.spider = Spider(api_key=api_key)
self.url = url
self.mode = mode
self.params = params
def lazy_load(self) -> Iterator[Document]:
"""Load documents based on the specified mode."""
spider_docs = []
if self.mode == "scrape":
# Scrape a single page
response = self.spider.scrape_url(self.url, params=self.params)
if response:
spider_docs.append(response)
elif self.mode == "crawl":
# Crawl multiple pages
response = self.spider.crawl_url(self.url, params=self.params)
if response:
spider_docs.extend(response)
for doc in spider_docs:
if self.mode == "scrape":
# Ensure page_content is also not None
page_content = doc[0].get("content", "")
# Ensure metadata is also not None
metadata = doc[0].get("metadata", {})
if page_content is not None:
yield Document(page_content=page_content, metadata=metadata)
if self.mode == "crawl":
# Ensure page_content is also not None
page_content = doc.get("content", "")
# Ensure metadata is also not None
metadata = doc.get("metadata", {})
if page_content is not None:
yield Document(
page_content=page_content,
metadata=metadata,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/cube_semantic.py | import json
import logging
import time
from typing import Iterator, List
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class CubeSemanticLoader(BaseLoader):
"""Load `Cube semantic layer` metadata.
Args:
cube_api_url: REST API endpoint.
Use the REST API of your Cube's deployment.
Please find out more information here:
https://cube.dev/docs/http-api/rest#configuration-base-path
cube_api_token: Cube API token.
Authentication tokens are generated based on your Cube's API secret.
Please find out more information here:
https://cube.dev/docs/security#generating-json-web-tokens-jwt
load_dimension_values: Whether to load dimension values for every string
dimension or not.
dimension_values_limit: Maximum number of dimension values to load.
dimension_values_max_retries: Maximum number of retries to load dimension
values.
dimension_values_retry_delay: Delay between retries to load dimension values.
"""
def __init__(
self,
cube_api_url: str,
cube_api_token: str,
load_dimension_values: bool = True,
dimension_values_limit: int = 10_000,
dimension_values_max_retries: int = 10,
dimension_values_retry_delay: int = 3,
):
self.cube_api_url = cube_api_url
self.cube_api_token = cube_api_token
self.load_dimension_values = load_dimension_values
self.dimension_values_limit = dimension_values_limit
self.dimension_values_max_retries = dimension_values_max_retries
self.dimension_values_retry_delay = dimension_values_retry_delay
def _get_dimension_values(self, dimension_name: str) -> List[str]:
"""Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering.
"""
logger.info("Loading dimension values for: {dimension_name}...")
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
query = {
"query": {
"dimensions": [dimension_name],
"limit": self.dimension_values_limit,
}
}
retries = 0
while retries < self.dimension_values_max_retries:
response = requests.request(
"POST",
f"{self.cube_api_url}/load",
headers=headers,
data=json.dumps(query),
)
if response.status_code == 200:
response_data = response.json()
if (
"error" in response_data
and response_data["error"] == "Continue wait"
):
logger.info("Retrying...")
retries += 1
time.sleep(self.dimension_values_retry_delay)
continue
else:
dimension_values = [
item[dimension_name] for item in response_data["data"]
]
return dimension_values
else:
logger.error("Request failed with status code:", response.status_code)
break
if retries == self.dimension_values_max_retries:
logger.info("Maximum retries reached.")
return []
def lazy_load(self) -> Iterator[Document]:
"""Makes a call to Cube's REST API metadata endpoint.
Returns:
A list of documents with attributes:
- page_content=column_title + column_description
- metadata
- table_name
- column_name
- column_data_type
- column_member_type
- column_title
- column_description
- column_values
- cube_data_obj_type
"""
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
logger.info(f"Loading metadata from {self.cube_api_url}...")
response = requests.get(f"{self.cube_api_url}/meta", headers=headers)
response.raise_for_status()
raw_meta_json = response.json()
cube_data_objects = raw_meta_json.get("cubes", [])
logger.info(f"Found {len(cube_data_objects)} cube data objects in metadata.")
if not cube_data_objects:
raise ValueError("No cubes found in metadata.")
for cube_data_obj in cube_data_objects:
cube_data_obj_name = cube_data_obj.get("name")
cube_data_obj_type = cube_data_obj.get("type")
cube_data_obj_is_public = cube_data_obj.get("public")
measures = cube_data_obj.get("measures", [])
dimensions = cube_data_obj.get("dimensions", [])
logger.info(f"Processing {cube_data_obj_name}...")
if not cube_data_obj_is_public:
logger.info(f"Skipping {cube_data_obj_name} because it is not public.")
continue
for item in measures + dimensions:
column_member_type = "measure" if item in measures else "dimension"
dimension_values = []
item_name = str(item.get("name"))
item_type = str(item.get("type"))
if (
self.load_dimension_values
and column_member_type == "dimension"
and item_type == "string"
):
dimension_values = self._get_dimension_values(item_name)
metadata = dict(
table_name=str(cube_data_obj_name),
column_name=item_name,
column_data_type=item_type,
column_title=str(item.get("title")),
column_description=str(item.get("description")),
column_member_type=column_member_type,
column_values=dimension_values,
cube_data_obj_type=cube_data_obj_type,
)
page_content = f"{str(item.get('title'))}, "
page_content += f"{str(item.get('description'))}"
yield Document(page_content=page_content, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/onenote.py | """Loads data from OneNote Notebooks"""
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
import requests
from langchain_core.documents import Document
from pydantic import (
BaseModel,
Field,
FilePath,
SecretStr,
model_validator,
)
from pydantic_settings import BaseSettings, SettingsConfigDict
from langchain_community.document_loaders.base import BaseLoader
class _OneNoteGraphSettings(BaseSettings):
client_id: str = Field(...)
client_secret: SecretStr = Field(...)
model_config = SettingsConfigDict(
case_sensitive=False,
populate_by_name=True,
env_file=".env",
env_prefix="MS_GRAPH_",
extra="ignore",
)
class OneNoteLoader(BaseLoader, BaseModel):
"""Load pages from OneNote notebooks."""
settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings) # type: ignore[arg-type]
"""Settings for the Microsoft Graph API client."""
auth_with_token: bool = False
"""Whether to authenticate with a token or not. Defaults to False."""
access_token: str = ""
"""Personal access token"""
onenote_api_base_url: str = "https://graph.microsoft.com/v1.0/me/onenote"
"""URL of Microsoft Graph API for OneNote"""
authority_url: str = "https://login.microsoftonline.com/consumers/"
"""A URL that identifies a token authority"""
token_path: FilePath = Path.home() / ".credentials" / "onenote_graph_token.txt"
"""Path to the file where the access token is stored"""
notebook_name: Optional[str] = None
"""Filter on notebook name"""
section_name: Optional[str] = None
"""Filter on section name"""
page_title: Optional[str] = None
"""Filter on section name"""
object_ids: Optional[List[str]] = None
""" The IDs of the objects to load data from."""
@model_validator(mode="before")
@classmethod
def init(cls, values: Dict) -> Any:
"""Initialize the class."""
if "settings" in values and isinstance(values["settings"], dict):
values["settings"] = _OneNoteGraphSettings(**values["settings"])
return values
def lazy_load(self) -> Iterator[Document]:
"""
Get pages from OneNote notebooks.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- title
"""
self._auth()
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install bs4`"
)
if self.object_ids is not None:
for object_id in self.object_ids:
page_content_html = self._get_page_content(object_id)
soup = BeautifulSoup(page_content_html, "html.parser")
page_title = ""
title_tag = soup.title
if title_tag:
page_title = title_tag.get_text(strip=True)
page_content = soup.get_text(separator="\n", strip=True)
yield Document(
page_content=page_content, metadata={"title": page_title}
)
else:
request_url = self._url
while request_url != "":
response = requests.get(request_url, headers=self._headers, timeout=10)
response.raise_for_status()
pages = response.json()
for page in pages["value"]:
page_id = page["id"]
page_content_html = self._get_page_content(page_id)
soup = BeautifulSoup(page_content_html, "html.parser")
page_title = ""
title_tag = soup.title
if title_tag:
page_content = soup.get_text(separator="\n", strip=True)
yield Document(
page_content=page_content, metadata={"title": page_title}
)
if "@odata.nextLink" in pages:
request_url = pages["@odata.nextLink"]
else:
request_url = ""
def _get_page_content(self, page_id: str) -> str:
"""Get page content from OneNote API"""
request_url = self.onenote_api_base_url + f"/pages/{page_id}/content"
response = requests.get(request_url, headers=self._headers, timeout=10)
response.raise_for_status()
return response.text
@property
def _headers(self) -> Dict[str, str]:
"""Return headers for requests to OneNote API"""
return {
"Authorization": f"Bearer {self.access_token}",
}
@property
def _scopes(self) -> List[str]:
"""Return required scopes."""
return ["Notes.Read"]
def _auth(self) -> None:
"""Authenticate with Microsoft Graph API"""
if self.access_token != "":
return
if self.auth_with_token:
with self.token_path.open("r") as token_file:
self.access_token = token_file.read()
else:
try:
from msal import ConfidentialClientApplication
except ImportError as e:
raise ImportError(
"MSAL package not found, please install it with `pip install msal`"
) from e
client_instance = ConfidentialClientApplication(
client_id=self.settings.client_id,
client_credential=self.settings.client_secret.get_secret_value(),
authority=self.authority_url,
)
authorization_request_url = client_instance.get_authorization_request_url(
self._scopes
)
print("Visit the following url to give consent:") # noqa: T201
print(authorization_request_url) # noqa: T201
authorization_url = input("Paste the authenticated url here:\n")
authorization_code = authorization_url.split("code=")[1].split("&")[0]
access_token_json = client_instance.acquire_token_by_authorization_code(
code=authorization_code, scopes=self._scopes
)
self.access_token = access_token_json["access_token"]
try:
if not self.token_path.parent.exists():
self.token_path.parent.mkdir(parents=True)
except Exception as e:
raise Exception(
f"Could not create the folder {self.token_path.parent} "
+ "to store the access token."
) from e
with self.token_path.open("w") as token_file:
token_file.write(self.access_token)
@property
def _url(self) -> str:
"""Create URL for getting page ids from the OneNoteApi API."""
query_params_list = []
filter_list = []
expand_list = []
query_params_list.append("$select=id")
if self.notebook_name is not None:
filter_list.append(
"parentNotebook/displayName%20eq%20"
+ f"'{self.notebook_name.replace(' ', '%20')}'"
)
expand_list.append("parentNotebook")
if self.section_name is not None:
filter_list.append(
"parentSection/displayName%20eq%20"
+ f"'{self.section_name.replace(' ', '%20')}'"
)
expand_list.append("parentSection")
if self.page_title is not None:
filter_list.append(
"title%20eq%20" + f"'{self.page_title.replace(' ', '%20')}'"
)
if len(expand_list) > 0:
query_params_list.append("$expand=" + ",".join(expand_list))
if len(filter_list) > 0:
query_params_list.append("$filter=" + "%20and%20".join(filter_list))
query_params = "&".join(query_params_list)
if query_params != "":
query_params = "?" + query_params
return f"{self.onenote_api_base_url}/pages{query_params}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/image.py | from typing import List
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredImageLoader(UnstructuredFileLoader):
"""Load `PNG` and `JPG` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredImageLoader
loader = UnstructuredImageLoader(
"example.png", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-image
"""
def _get_elements(self) -> List:
from unstructured.partition.image import partition_image
return partition_image(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/web_base.py | """Web base loader class."""
import asyncio
import logging
import warnings
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
import aiohttp
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utils.user_agent import get_user_agent
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": get_user_agent(),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", "No description found.")
if html := soup.find("html"):
metadata["language"] = html.get("lang", "No language found.")
return metadata
class WebBaseLoader(BaseLoader):
"""
WebBaseLoader document loader integration
Setup:
Install ``langchain_community``.
.. code-block:: bash
pip install -U langchain_community
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader(
web_path = "https://www.espn.com/"
# header_template = None,
# verify_ssl = True,
# proxies = None,
# continue_on_failure = False,
# autoset_encoding = True,
# encoding = None,
# web_paths = (),
# requests_per_second = 2,
# default_parser = "html.parser",
# requests_kwargs = None,
# raise_for_status = False,
# bs_get_text_kwargs = None,
# bs_kwargs = None,
# session = None,
# show_progress = True,
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
ESPN - Serving Sports Fans. Anytime. Anywhere.
{'source': 'https://www.espn.com/', 'title': 'ESPN - Serving Sports Fans. Anytime. Anywhere.', 'description': 'Visit ESPN for live scores, highlights and sports news. Stream exclusive games on ESPN+ and play fantasy sports.', 'language': 'en'}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
ESPN - Serving Sports Fans. Anytime. Anywhere.
{'source': 'https://www.espn.com/', 'title': 'ESPN - Serving Sports Fans. Anytime. Anywhere.', 'description': 'Visit ESPN for live scores, highlights and sports news. Stream exclusive games on ESPN+ and play fantasy sports.', 'language': 'en'}
""" # noqa: E501
def __init__(
self,
web_path: Union[str, Sequence[str]] = "",
header_template: Optional[dict] = None,
verify_ssl: bool = True,
proxies: Optional[dict] = None,
continue_on_failure: bool = False,
autoset_encoding: bool = True,
encoding: Optional[str] = None,
web_paths: Sequence[str] = (),
requests_per_second: int = 2,
default_parser: str = "html.parser",
requests_kwargs: Optional[Dict[str, Any]] = None,
raise_for_status: bool = False,
bs_get_text_kwargs: Optional[Dict[str, Any]] = None,
bs_kwargs: Optional[Dict[str, Any]] = None,
session: Any = None,
*,
show_progress: bool = True,
) -> None:
"""Initialize loader.
Args:
web_paths: Web paths to load from.
requests_per_second: Max number of concurrent requests to make.
default_parser: Default parser to use for BeautifulSoup.
requests_kwargs: kwargs for requests
raise_for_status: Raise an exception if http status code denotes an error.
bs_get_text_kwargs: kwargs for beatifulsoup4 get_text
bs_kwargs: kwargs for beatifulsoup4 web page parsing
show_progress: Show progress bar when loading pages.
"""
# web_path kept for backwards-compatibility.
if web_path and web_paths:
raise ValueError(
"Received web_path and web_paths. Only one can be specified. "
"web_path is deprecated, web_paths should be used."
)
if web_paths:
self.web_paths = list(web_paths)
elif isinstance(web_path, str):
self.web_paths = [web_path]
elif isinstance(web_path, Sequence):
self.web_paths = list(web_path)
else:
raise TypeError(
f"web_path must be str or Sequence[str] got ({type(web_path)}) or"
f" web_paths must be Sequence[str] got ({type(web_paths)})"
)
self.requests_per_second = requests_per_second
self.default_parser = default_parser
self.requests_kwargs = requests_kwargs or {}
self.raise_for_status = raise_for_status
self.show_progress = show_progress
self.bs_get_text_kwargs = bs_get_text_kwargs or {}
self.bs_kwargs = bs_kwargs or {}
if session:
self.session = session
else:
session = requests.Session()
header_template = header_template or default_header_template.copy()
if not header_template.get("User-Agent"):
try:
from fake_useragent import UserAgent
header_template["User-Agent"] = UserAgent().random
except ImportError:
logger.info(
"fake_useragent not found, using default user agent."
"To get a realistic header for requests, "
"`pip install fake_useragent`."
)
session.headers = dict(header_template)
session.verify = verify_ssl
if proxies:
session.proxies.update(proxies)
self.session = session
self.continue_on_failure = continue_on_failure
self.autoset_encoding = autoset_encoding
self.encoding = encoding
@property
def web_path(self) -> str:
if len(self.web_paths) > 1:
raise ValueError("Multiple webpaths found.")
return self.web_paths[0]
async def _fetch(
self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5
) -> str:
async with aiohttp.ClientSession() as session:
for i in range(retries):
try:
kwargs: Dict = dict(
headers=self.session.headers,
cookies=self.session.cookies.get_dict(),
)
if not self.session.verify:
kwargs["ssl"] = False
async with session.get(url, **kwargs) as response:
if self.raise_for_status:
response.raise_for_status()
return await response.text()
except aiohttp.ClientConnectionError as e:
if i == retries - 1:
raise
else:
logger.warning(
f"Error fetching {url} with attempt "
f"{i + 1}/{retries}: {e}. Retrying..."
)
await asyncio.sleep(cooldown * backoff**i)
raise ValueError("retry count exceeded")
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
async with semaphore:
try:
return await self._fetch(url)
except Exception as e:
if self.continue_on_failure:
logger.warning(
f"Error fetching {url}, skipping due to"
f" continue_on_failure=True"
)
return ""
logger.exception(
f"Error fetching {url} and aborting, use continue_on_failure=True "
"to continue loading urls after encountering an error."
)
raise e
async def fetch_all(self, urls: List[str]) -> Any:
"""Fetch all urls concurrently with rate limiting."""
semaphore = asyncio.Semaphore(self.requests_per_second)
tasks = []
for url in urls:
task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore))
tasks.append(task)
try:
if self.show_progress:
from tqdm.asyncio import tqdm_asyncio
return await tqdm_asyncio.gather(
*tasks, desc="Fetching pages", ascii=True, mininterval=1
)
else:
return await asyncio.gather(*tasks)
except ImportError:
warnings.warn("For better logging of progress, `pip install tqdm`")
return await asyncio.gather(*tasks)
@staticmethod
def _check_parser(parser: str) -> None:
"""Check that parser is valid for bs4."""
valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"]
if parser not in valid_parsers:
raise ValueError(
"`parser` must be one of " + ", ".join(valid_parsers) + "."
)
def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]:
"""Fetch all urls, then return soups for all results."""
from bs4 import BeautifulSoup
results = asyncio.run(self.fetch_all(urls))
final_results = []
for i, result in enumerate(results):
url = urls[i]
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
final_results.append(BeautifulSoup(result, parser, **self.bs_kwargs))
return final_results
def _scrape(
self,
url: str,
parser: Union[str, None] = None,
bs_kwargs: Optional[dict] = None,
) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self.session.get(url, **self.requests_kwargs)
if self.raise_for_status:
html_doc.raise_for_status()
if self.encoding is not None:
html_doc.encoding = self.encoding
elif self.autoset_encoding:
html_doc.encoding = html_doc.apparent_encoding
return BeautifulSoup(html_doc.text, parser, **(bs_kwargs or {}))
def scrape(self, parser: Union[str, None] = None) -> Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser=parser, bs_kwargs=self.bs_kwargs)
def lazy_load(self) -> Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for path in self.web_paths:
soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
yield Document(page_content=text, metadata=metadata)
def aload(self) -> List[Document]: # type: ignore
"""Load text from the urls in web_path async into Documents."""
results = self.scrape_all(self.web_paths)
docs = []
for path, soup in zip(self.web_paths, results):
text = soup.get_text(**self.bs_get_text_kwargs)
metadata = _build_metadata(soup, path)
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/xorbits.py | from typing import Any
from langchain_community.document_loaders.dataframe import BaseDataFrameLoader
class XorbitsLoader(BaseDataFrameLoader):
"""Load `Xorbits` DataFrame."""
def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object.
Requirements:
Must have xorbits installed. You can install with `pip install xorbits`.
Args:
data_frame: Xorbits DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
try:
import xorbits.pandas as pd
except ImportError as e:
raise ImportError(
"Cannot import xorbits, please install with 'pip install xorbits'."
) from e
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a xorbits.pandas.DataFrame, \
got {type(data_frame)}"
)
super().__init__(data_frame, page_content_column=page_content_column)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/bigquery.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.auth.credentials import Credentials
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.BigQueryLoader",
)
class BigQueryLoader(BaseLoader):
"""Load from the Google Cloud Platform `BigQuery`.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
project: Optional[str] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
credentials: Optional[Credentials] = None,
):
"""Initialize BigQuery document loader.
Args:
query: The query to run in BigQuery.
project: Optional. The project to run the query in.
page_content_columns: Optional. The columns to write into the `page_content`
of the document.
metadata_columns: Optional. The columns to write into the `metadata` of the
document.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
(`google.auth.compute_engine.Credentials`) or Service Account
(`google.oauth2.service_account.Credentials`) credentials directly.
"""
self.query = query
self.project = project
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
self.credentials = credentials
def load(self) -> List[Document]:
try:
from google.cloud import bigquery
except ImportError as ex:
raise ImportError(
"Could not import google-cloud-bigquery python package. "
"Please install it with `pip install google-cloud-bigquery`."
) from ex
bq_client = bigquery.Client(
credentials=self.credentials,
project=self.project,
client_info=get_client_info(module="bigquery"),
)
if not bq_client.project:
error_desc = (
"GCP project for Big Query is not set! Either provide a "
"`project` argument during BigQueryLoader instantiation, "
"or set a default project with `gcloud config set project` "
"command."
)
raise ValueError(error_desc)
query_result = bq_client.query(self.query).result()
docs: List[Document] = []
page_content_columns = self.page_content_columns
metadata_columns = self.metadata_columns
if page_content_columns is None:
page_content_columns = [column.name for column in query_result.schema]
if metadata_columns is None:
metadata_columns = []
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/chm.py | from typing import TYPE_CHECKING, Dict, List, Union
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
if TYPE_CHECKING:
from chm import chm
class UnstructuredCHMLoader(UnstructuredFileLoader):
"""Load `CHM` files using `Unstructured`.
CHM means Microsoft Compiled HTML Help.
Examples
--------
from langchain_community.document_loaders import UnstructuredCHMLoader
loader = UnstructuredCHMLoader("example.chm")
docs = loader.load()
References
----------
https://github.com/dottedmag/pychm
http://www.jedrea.com/chmlib/
"""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
with CHMParser(self.file_path) as f: # type: ignore[arg-type]
return [
partition_html(text=item["content"], **self.unstructured_kwargs)
for item in f.load_all()
]
class CHMParser(object):
"""Microsoft Compiled HTML Help (CHM) Parser."""
path: str
file: "chm.CHMFile"
def __init__(self, path: str):
from chm import chm
self.path = path
self.file = chm.CHMFile()
self.file.LoadCHM(path)
def __enter__(self): # type: ignore[no-untyped-def]
return self
def __exit__(self, exc_type, exc_value, traceback): # type: ignore[no-untyped-def]
if self.file:
self.file.CloseCHM()
@property
def encoding(self) -> str:
return self.file.GetEncoding().decode("utf-8")
def index(self) -> List[Dict[str, str]]:
from urllib.parse import urlparse
from bs4 import BeautifulSoup
res = []
index = self.file.GetTopicsTree().decode(self.encoding)
soup = BeautifulSoup(index)
# <OBJECT ..>
for obj in soup.find_all("object"):
# <param name="Name" value="<...>">
# <param name="Local" value="<...>">
name = ""
local = ""
for param in obj.find_all("param"):
if param["name"] == "Name":
name = param["value"]
if param["name"] == "Local":
local = param["value"]
if not name or not local:
continue
local = urlparse(local).path
if not local.startswith("/"):
local = "/" + local
res.append({"name": name, "local": local})
return res
def load(self, path: Union[str, bytes]) -> str:
if isinstance(path, str):
path = path.encode("utf-8")
obj = self.file.ResolveObject(path)[1]
return self.file.RetrieveObject(obj)[1].decode(self.encoding)
def load_all(self) -> List[Dict[str, str]]:
res = []
index = self.index()
for item in index:
content = self.load(item["local"])
res.append(
{"name": item["name"], "local": item["local"], "content": content}
)
return res
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/airbyte_json.py | import json
from pathlib import Path
from typing import List, Union
from langchain_core.documents import Document
from langchain_core.utils import stringify_dict
from langchain_community.document_loaders.base import BaseLoader
class AirbyteJSONLoader(BaseLoader):
"""Load local `Airbyte` json files."""
def __init__(self, file_path: Union[str, Path]):
"""Initialize with a file path. This should start with '/tmp/airbyte_local/'."""
self.file_path = file_path
"""Path to the directory containing the json files."""
def load(self) -> List[Document]:
text = ""
for line in open(self.file_path, "r"):
data = json.loads(line)["_airbyte_data"]
text += stringify_dict(data)
metadata = {"source": str(self.file_path)}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/chromium.py | import asyncio
import logging
from typing import AsyncIterator, Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utils.user_agent import get_user_agent
logger = logging.getLogger(__name__)
class AsyncChromiumLoader(BaseLoader):
"""Scrape HTML pages from URLs using a
headless instance of the Chromium."""
def __init__(
self,
urls: List[str],
*,
headless: bool = True,
user_agent: Optional[str] = None,
):
"""Initialize the loader with a list of URL paths.
Args:
urls: A list of URLs to scrape content from.
headless: Whether to run browser in headless mode.
user_agent: The user agent to use for the browser
Raises:
ImportError: If the required 'playwright' package is not installed.
"""
self.urls = urls
self.headless = headless
self.user_agent = user_agent or get_user_agent()
try:
import playwright # noqa: F401
except ImportError:
raise ImportError(
"playwright is required for AsyncChromiumLoader. "
"Please install it with `pip install playwright`."
)
async def ascrape_playwright(self, url: str) -> str:
"""
Asynchronously scrape the content of a given URL using Playwright's async API.
Args:
url (str): The URL to scrape.
Returns:
str: The scraped HTML content or an error message if an exception occurs.
"""
from playwright.async_api import async_playwright
logger.info("Starting scraping...")
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=self.headless)
try:
page = await browser.new_page(user_agent=self.user_agent)
await page.goto(url)
results = await page.content() # Simply get the HTML content
logger.info("Content scraped")
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def lazy_load(self) -> Iterator[Document]:
"""
Lazily load text content from the provided URLs.
This method yields Documents one at a time as they're scraped,
instead of waiting to scrape all URLs before returning.
Yields:
Document: The scraped content encapsulated within a Document object.
"""
for url in self.urls:
html_content = asyncio.run(self.ascrape_playwright(url))
metadata = {"source": url}
yield Document(page_content=html_content, metadata=metadata)
async def alazy_load(self) -> AsyncIterator[Document]:
"""
Asynchronously load text content from the provided URLs.
This method leverages asyncio to initiate the scraping of all provided URLs
simultaneously. It improves performance by utilizing concurrent asynchronous
requests. Each Document is yielded as soon as its content is available,
encapsulating the scraped content.
Yields:
Document: A Document object containing the scraped content, along with its
source URL as metadata.
"""
tasks = [self.ascrape_playwright(url) for url in self.urls]
results = await asyncio.gather(*tasks)
for url, content in zip(self.urls, results):
metadata = {"source": url}
yield Document(page_content=content, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/odt.py | from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredODTLoader
loader = UnstructuredODTLoader(
"example.odt", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-odt
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/brave_search.py | from typing import Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearchLoader(BaseLoader):
"""Load with `Brave Search` engine."""
def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None):
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
def load(self) -> List[Document]:
brave_client = BraveSearchWrapper(
api_key=self.api_key,
search_kwargs=self.search_kwargs,
)
return brave_client.download_documents(self.query)
def lazy_load(self) -> Iterator[Document]:
for doc in self.load():
yield doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/docugami.py | import hashlib
import io
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.documents import Document
from pydantic import BaseModel, model_validator
from langchain_community.document_loaders.base import BaseLoader
TABLE_NAME = "{http://www.w3.org/1999/xhtml}table"
XPATH_KEY = "xpath"
ID_KEY = "id"
DOCUMENT_SOURCE_KEY = "source"
DOCUMENT_NAME_KEY = "name"
STRUCTURE_KEY = "structure"
TAG_KEY = "tag"
PROJECTS_KEY = "projects"
DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1"
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.24",
removal="1.0",
alternative_import="docugami_langchain.DocugamiLoader",
)
class DocugamiLoader(BaseLoader, BaseModel):
"""Load from `Docugami`.
To use, you should have the ``dgml-utils`` python package installed.
"""
api: str = DEFAULT_API_ENDPOINT
"""The Docugami API endpoint to use."""
access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY")
"""The Docugami API access token to use."""
max_text_length: int = 4096
"""Max length of chunk text returned."""
min_text_length: int = 32
"""Threshold under which chunks are appended to next to avoid over-chunking."""
max_metadata_length: int = 512
"""Max length of metadata text returned."""
include_xml_tags: bool = False
"""Set to true for XML tags in chunk output text."""
parent_hierarchy_levels: int = 0
"""Set appropriately to get parent chunks using the chunk hierarchy."""
parent_id_key: str = "doc_id"
"""Metadata key for parent doc ID."""
sub_chunk_tables: bool = False
"""Set to True to return sub-chunks within tables."""
whitespace_normalize_text: bool = True
"""Set to False if you want to full whitespace formatting in the original
XML doc, including indentation."""
docset_id: Optional[str] = None
"""The Docugami API docset ID to use."""
document_ids: Optional[Sequence[str]] = None
"""The Docugami API document IDs to use."""
file_paths: Optional[Sequence[Union[Path, str]]]
"""The local file paths to use."""
include_project_metadata_in_doc_metadata: bool = True
"""Set to True if you want to include the project metadata in the doc metadata."""
@model_validator(mode="before")
@classmethod
def validate_local_or_remote(cls, values: Dict[str, Any]) -> Any:
"""Validate that either local file paths are given, or remote API docset ID.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("file_paths") and values.get("docset_id"):
raise ValueError("Cannot specify both file_paths and remote API docset_id")
if not values.get("file_paths") and not values.get("docset_id"):
raise ValueError("Must specify either file_paths or remote API docset_id")
if values.get("docset_id") and not values.get("access_token"):
raise ValueError("Must specify access token if using remote API docset_id")
return values
def _parse_dgml(
self,
content: bytes,
document_name: Optional[str] = None,
additional_doc_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Parse a single DGML document into a list of Documents."""
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
try:
from dgml_utils.models import Chunk
from dgml_utils.segmentation import get_chunks
except ImportError:
raise ImportError(
"Could not import from dgml-utils python package. "
"Please install it with `pip install dgml-utils`."
)
def _build_framework_chunk(dg_chunk: Chunk) -> Document:
# Stable IDs for chunks with the same text.
_hashed_id = hashlib.md5(dg_chunk.text.encode()).hexdigest()
metadata = {
XPATH_KEY: dg_chunk.xpath,
ID_KEY: _hashed_id,
DOCUMENT_NAME_KEY: document_name,
DOCUMENT_SOURCE_KEY: document_name,
STRUCTURE_KEY: dg_chunk.structure,
TAG_KEY: dg_chunk.tag,
}
text = dg_chunk.text
if additional_doc_metadata:
if self.include_project_metadata_in_doc_metadata:
metadata.update(additional_doc_metadata)
return Document(
page_content=text[: self.max_text_length],
metadata=metadata,
)
# Parse the tree and return chunks
tree = etree.parse(io.BytesIO(content))
root = tree.getroot()
dg_chunks = get_chunks(
root,
min_text_length=self.min_text_length,
max_text_length=self.max_text_length,
whitespace_normalize_text=self.whitespace_normalize_text,
sub_chunk_tables=self.sub_chunk_tables,
include_xml_tags=self.include_xml_tags,
parent_hierarchy_levels=self.parent_hierarchy_levels,
)
framework_chunks: Dict[str, Document] = {}
for dg_chunk in dg_chunks:
framework_chunk = _build_framework_chunk(dg_chunk)
chunk_id = framework_chunk.metadata.get(ID_KEY)
if chunk_id:
framework_chunks[chunk_id] = framework_chunk
if dg_chunk.parent:
framework_parent_chunk = _build_framework_chunk(dg_chunk.parent)
parent_id = framework_parent_chunk.metadata.get(ID_KEY)
if parent_id and framework_parent_chunk.page_content:
framework_chunk.metadata[self.parent_id_key] = parent_id
framework_chunks[parent_id] = framework_parent_chunk
return list(framework_chunks.values())
def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all document details for the given docset ID"""
url = f"{self.api}/docsets/{docset_id}/documents"
all_documents = []
while url:
response = requests.get(
url,
headers={"Authorization": f"Bearer {self.access_token}"},
)
if response.ok:
data = response.json()
all_documents.extend(data["documents"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_documents
def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]:
"""Gets all project details for the given docset ID"""
url = f"{self.api}/projects?docset.id={docset_id}"
all_projects = []
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_projects.extend(data["projects"])
url = data.get("next", None)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
return all_projects
def _metadata_for_project(self, project: Dict) -> Dict:
"""Gets project metadata for all files"""
project_id = project.get(ID_KEY)
url = f"{self.api}/projects/{project_id}/artifacts/latest"
all_artifacts = []
per_file_metadata: Dict = {}
while url:
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
data = response.json()
all_artifacts.extend(data["artifacts"])
url = data.get("next", None)
elif response.status_code == 404:
# Not found is ok, just means no published projects
return per_file_metadata
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
for artifact in all_artifacts:
artifact_name = artifact.get("name")
artifact_url = artifact.get("url")
artifact_doc = artifact.get("document")
if artifact_name == "report-values.xml" and artifact_url and artifact_doc:
doc_id = artifact_doc[ID_KEY]
metadata: Dict = {}
# The evaluated XML for each document is named after the project
response = requests.request(
"GET",
f"{artifact_url}/content",
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
try:
from lxml import etree
except ImportError:
raise ImportError(
"Could not import lxml python package. "
"Please install it with `pip install lxml`."
)
artifact_tree = etree.parse(io.BytesIO(response.content))
artifact_root = artifact_tree.getroot()
ns = artifact_root.nsmap
entries = artifact_root.xpath("//pr:Entry", namespaces=ns)
for entry in entries:
heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text
value = " ".join(
entry.xpath("./pr:Value", namespaces=ns)[0].itertext()
).strip()
metadata[heading] = value[: self.max_metadata_length]
per_file_metadata[doc_id] = metadata
else:
raise Exception(
f"Failed to download {artifact_url}/content "
+ "(status: {response.status_code})"
)
return per_file_metadata
def _load_chunks_for_document(
self,
document_id: str,
docset_id: str,
document_name: Optional[str] = None,
additional_metadata: Optional[Mapping] = None,
) -> List[Document]:
"""Load chunks for a document."""
url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml"
response = requests.request(
"GET",
url,
headers={"Authorization": f"Bearer {self.access_token}"},
data={},
)
if response.ok:
return self._parse_dgml(
content=response.content,
document_name=document_name,
additional_doc_metadata=additional_metadata,
)
else:
raise Exception(
f"Failed to download {url} (status: {response.status_code})"
)
def load(self) -> List[Document]:
"""Load documents."""
chunks: List[Document] = []
if self.access_token and self.docset_id:
# Remote mode
_document_details = self._document_details_for_docset_id(self.docset_id)
if self.document_ids:
_document_details = [
d for d in _document_details if d[ID_KEY] in self.document_ids
]
_project_details = self._project_details_for_docset_id(self.docset_id)
combined_project_metadata: Dict[str, Dict] = {}
if _project_details and self.include_project_metadata_in_doc_metadata:
# If there are any projects for this docset and the caller requested
# project metadata, load it.
for project in _project_details:
metadata = self._metadata_for_project(project)
for file_id in metadata:
if file_id not in combined_project_metadata:
combined_project_metadata[file_id] = metadata[file_id]
else:
combined_project_metadata[file_id].update(metadata[file_id])
for doc in _document_details:
doc_id = doc[ID_KEY]
doc_name = doc.get(DOCUMENT_NAME_KEY)
doc_metadata = combined_project_metadata.get(doc_id)
chunks += self._load_chunks_for_document(
document_id=doc_id,
docset_id=self.docset_id,
document_name=doc_name,
additional_metadata=doc_metadata,
)
elif self.file_paths:
# Local mode (for integration testing, or pre-downloaded XML)
for path in self.file_paths:
path = Path(path)
with open(path, "rb") as file:
chunks += self._parse_dgml(
content=file.read(),
document_name=path.name,
)
return chunks
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/evernote.py | """Load documents from Evernote.
https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c
"""
import hashlib
import logging
from base64 import b64decode
from pathlib import Path
from time import strptime
from typing import Any, Dict, Iterator, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class EverNoteLoader(BaseLoader):
"""Load from `EverNote`.
Loads an EverNote notebook export file e.g. my_notebook.enex into Documents.
Instructions on producing this file can be found at
https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML
Currently only the plain text in the note is extracted and stored as the contents
of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc.
but not 'content-raw' or 'resource') tags on the note will be extracted and stored
as metadata on the Document.
Args:
file_path (str): The path to the notebook export with a .enex extension
load_single_document (bool): Whether or not to concatenate the content of all
notes into a single long Document.
If this is set to True (default) then the only metadata on the document will be
the 'source' which contains the file name of the export.
"""
def __init__(self, file_path: Union[str, Path], load_single_document: bool = True):
"""Initialize with file path."""
self.file_path = str(file_path)
self.load_single_document = load_single_document
def _lazy_load(self) -> Iterator[Document]:
for note in self._parse_note_xml(self.file_path):
if note.get("content") is not None:
yield Document(
page_content=note["content"],
metadata={
**{
key: value
for key, value in note.items()
if key not in ["content", "content-raw", "resource"]
},
**{"source": self.file_path},
},
)
def lazy_load(self) -> Iterator[Document]:
"""Load documents from EverNote export file."""
if not self.load_single_document:
yield from self._lazy_load()
else:
yield Document(
page_content="".join(
[document.page_content for document in self._lazy_load()]
),
metadata={"source": self.file_path},
)
@staticmethod
def _parse_content(content: str) -> str:
try:
import html2text
return html2text.html2text(content).strip()
except ImportError as e:
raise ImportError(
"Could not import `html2text`. Although it is not a required package "
"to use Langchain, using the EverNote loader requires `html2text`. "
"Please install `html2text` via `pip install html2text` and try again."
) from e
@staticmethod
def _parse_resource(resource: list) -> dict:
rsc_dict: Dict[str, Any] = {}
for elem in resource:
if elem.tag == "data":
# Sometimes elem.text is None
rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest()
else:
rsc_dict[elem.tag] = elem.text
return rsc_dict
@staticmethod
def _parse_note(note: List, prefix: Optional[str] = None) -> dict:
note_dict: Dict[str, Any] = {}
resources = []
def add_prefix(element_tag: str) -> str:
if prefix is None:
return element_tag
return f"{prefix}.{element_tag}"
for elem in note:
if elem.tag == "content":
note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text)
# A copy of original content
note_dict["content-raw"] = elem.text
elif elem.tag == "resource":
resources.append(EverNoteLoader._parse_resource(elem))
elif elem.tag == "created" or elem.tag == "updated":
note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ")
elif elem.tag == "note-attributes":
additional_attributes = EverNoteLoader._parse_note(
elem, elem.tag
) # Recursively enter the note-attributes tag
note_dict.update(additional_attributes)
else:
note_dict[elem.tag] = elem.text
if len(resources) > 0:
note_dict["resource"] = resources
return {add_prefix(key): value for key, value in note_dict.items()}
@staticmethod
def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]:
"""Parse Evernote xml."""
# Without huge_tree set to True, parser may complain about huge text node
# Try to recover, because there may be " ", which will cause
# "XMLSyntaxError: Entity 'nbsp' not defined"
try:
from lxml import etree
except ImportError as e:
logger.error(
"Could not import `lxml`. Although it is not a required package to use "
"Langchain, using the EverNote loader requires `lxml`. Please install "
"`lxml` via `pip install lxml` and try again."
)
raise e
context = etree.iterparse(
xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True
)
for action, elem in context:
if elem.tag == "note":
yield EverNoteLoader._parse_note(elem)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/baiducloud_bos_directory.py | from typing import Any, Iterator
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class BaiduBOSDirectoryLoader(BaseLoader):
"""Load from `Baidu BOS directory`."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError(
"Please install bce-python-sdk with `pip install bce-python-sdk`."
)
client = BosClient(self.conf)
contents = []
marker = ""
while True:
response = client.list_objects(
bucket_name=self.bucket,
prefix=self.prefix,
marker=marker,
max_keys=1000,
)
contents_len = len(response.contents)
contents.extend(response.contents)
if response.is_truncated or contents_len < int(str(response.max_keys)):
break
marker = response.next_marker
from langchain_community.document_loaders.baiducloud_bos_file import (
BaiduBOSFileLoader,
)
for content in contents:
if str(content.key).endswith("/"):
continue
loader = BaiduBOSFileLoader(self.conf, self.bucket, str(content.key))
yield loader.load()[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/csv_loader.py | import csv
from io import TextIOWrapper
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.helpers import detect_file_encodings
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class CSVLoader(BaseLoader):
"""Load a `CSV` file into a list of Documents.
Each document represents one row of the CSV file. Every row is converted
into a key/value pair and outputted to a new line in the document's
page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all documents by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import CSVLoader
loader = CSVLoader(file_path='./hw_200.csv',
csv_args={
'delimiter': ',',
'quotechar': '"',
'fieldnames': ['Index', 'Height', 'Weight']
})
Load:
.. code-block:: python
docs = loader.load()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Index: Index
Height: Height(Inches)"
Weight: "Weight(Pounds)"
{'source': './hw_200.csv', 'row': 0}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Index: Index
Height: Height(Inches)"
Weight: "Weight(Pounds)"
{'source': './hw_200.csv', 'row': 0}
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Index: Index
Height: Height(Inches)"
Weight: "Weight(Pounds)"
{'source': './hw_200.csv', 'row': 0}
"""
def __init__(
self,
file_path: Union[str, Path],
source_column: Optional[str] = None,
metadata_columns: Sequence[str] = (),
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
autodetect_encoding: bool = False,
*,
content_columns: Sequence[str] = (),
):
"""
Args:
file_path: The path to the CSV file.
source_column: The name of the column in the CSV file to use as the source.
Optional. Defaults to None.
metadata_columns: A sequence of column names to use as metadata. Optional.
csv_args: A dictionary of arguments to pass to the csv.DictReader.
Optional. Defaults to None.
encoding: The encoding of the CSV file. Optional. Defaults to None.
autodetect_encoding: Whether to try to autodetect the file encoding.
content_columns: A sequence of column names to use for the document content.
If not present, use all columns that are not part of the metadata.
"""
self.file_path = file_path
self.source_column = source_column
self.metadata_columns = metadata_columns
self.encoding = encoding
self.csv_args = csv_args or {}
self.autodetect_encoding = autodetect_encoding
self.content_columns = content_columns
def lazy_load(self) -> Iterator[Document]:
try:
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
yield from self.__read_file(csvfile)
except UnicodeDecodeError as e:
if self.autodetect_encoding:
detected_encodings = detect_file_encodings(self.file_path)
for encoding in detected_encodings:
try:
with open(
self.file_path, newline="", encoding=encoding.encoding
) as csvfile:
yield from self.__read_file(csvfile)
break
except UnicodeDecodeError:
continue
else:
raise RuntimeError(f"Error loading {self.file_path}") from e
except Exception as e:
raise RuntimeError(f"Error loading {self.file_path}") from e
def __read_file(self, csvfile: TextIOWrapper) -> Iterator[Document]:
csv_reader = csv.DictReader(csvfile, **self.csv_args)
for i, row in enumerate(csv_reader):
try:
source = (
row[self.source_column]
if self.source_column is not None
else str(self.file_path)
)
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file."
)
content = "\n".join(
f"""{k.strip() if k is not None else k}: {v.strip()
if isinstance(v, str) else ','.join(map(str.strip, v))
if isinstance(v, list) else v}"""
for k, v in row.items()
if (
k in self.content_columns
if self.content_columns
else k not in self.metadata_columns
)
)
metadata = {"source": source, "row": i}
for col in self.metadata_columns:
try:
metadata[col] = row[col]
except KeyError:
raise ValueError(f"Metadata column '{col}' not found in CSV file.")
yield Document(page_content=content, metadata=metadata)
class UnstructuredCSVLoader(UnstructuredFileLoader):
"""Load `CSV` files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredCSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the CSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain_community.document_loaders.csv_loader import UnstructuredCSVLoader
loader = UnstructuredCSVLoader("stanley-cups.csv", mode="elements")
docs = loader.load()
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the CSV file.
mode: The mode to use when loading the CSV file.
Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.8")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.csv import partition_csv
return partition_csv(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/reddit.py | from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import praw
def _dependable_praw_import() -> praw:
try:
import praw
except ImportError:
raise ImportError(
"praw package not found, please install it with `pip install praw`"
)
return praw
class RedditPostsLoader(BaseLoader):
"""Load `Reddit` posts.
Read posts on a subreddit.
First, you need to go to
https://www.reddit.com/prefs/apps/
and create your application
"""
def __init__(
self,
client_id: str,
client_secret: str,
user_agent: str,
search_queries: Sequence[str],
mode: str,
categories: Sequence[str] = ["new"],
number_posts: Optional[int] = 10,
):
"""
Initialize with client_id, client_secret, user_agent, search_queries, mode,
categories, number_posts.
Example: https://www.reddit.com/r/learnpython/
Args:
client_id: Reddit client id.
client_secret: Reddit client secret.
user_agent: Reddit user agent.
search_queries: The search queries.
mode: The mode.
categories: The categories. Default: ["new"]
number_posts: The number of posts. Default: 10
"""
self.client_id = client_id
self.client_secret = client_secret
self.user_agent = user_agent
self.search_queries = search_queries
self.mode = mode
self.categories = categories
self.number_posts = number_posts
def load(self) -> List[Document]:
"""Load reddits."""
praw = _dependable_praw_import()
reddit = praw.Reddit(
client_id=self.client_id,
client_secret=self.client_secret,
user_agent=self.user_agent,
)
results: List[Document] = []
if self.mode == "subreddit":
for search_query in self.search_queries:
for category in self.categories:
docs = self._subreddit_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
elif self.mode == "username":
for search_query in self.search_queries:
for category in self.categories:
docs = self._user_posts_loader(
search_query=search_query, category=category, reddit=reddit
)
results.extend(docs)
else:
raise ValueError(
"mode not correct, please enter 'username' or 'subreddit' as mode"
)
return results
def _subreddit_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
def _user_posts_loader(
self, search_query: str, category: str, reddit: praw.reddit.Reddit
) -> Iterable[Document]:
user = reddit.redditor(search_query)
method = getattr(user.submissions, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {
"post_subreddit": post.subreddit_name_prefixed,
"post_category": category,
"post_title": post.title,
"post_score": post.score,
"post_id": post.id,
"post_url": post.url,
"post_author": post.author,
}
yield Document(
page_content=post.selftext,
metadata=metadata,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/__init__.py | """**Document Loaders** are classes to load Documents.
**Document Loaders** are usually used to load a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseLoader --> <name>Loader # Examples: TextLoader, UnstructuredFileLoader
**Main helpers:**
.. code-block::
Document, <name>TextSplitter
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.document_loaders.acreom import (
AcreomLoader,
)
from langchain_community.document_loaders.airbyte import (
AirbyteCDKLoader,
AirbyteGongLoader,
AirbyteHubspotLoader,
AirbyteSalesforceLoader,
AirbyteShopifyLoader,
AirbyteStripeLoader,
AirbyteTypeformLoader,
AirbyteZendeskSupportLoader,
)
from langchain_community.document_loaders.airbyte_json import (
AirbyteJSONLoader,
)
from langchain_community.document_loaders.airtable import (
AirtableLoader,
)
from langchain_community.document_loaders.apify_dataset import (
ApifyDatasetLoader,
)
from langchain_community.document_loaders.arcgis_loader import (
ArcGISLoader,
)
from langchain_community.document_loaders.arxiv import (
ArxivLoader,
)
from langchain_community.document_loaders.assemblyai import (
AssemblyAIAudioLoaderById,
AssemblyAIAudioTranscriptLoader,
)
from langchain_community.document_loaders.astradb import (
AstraDBLoader,
)
from langchain_community.document_loaders.async_html import (
AsyncHtmlLoader,
)
from langchain_community.document_loaders.athena import (
AthenaLoader,
)
from langchain_community.document_loaders.azlyrics import (
AZLyricsLoader,
)
from langchain_community.document_loaders.azure_ai_data import (
AzureAIDataLoader,
)
from langchain_community.document_loaders.azure_blob_storage_container import (
AzureBlobStorageContainerLoader,
)
from langchain_community.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain_community.document_loaders.bibtex import (
BibtexLoader,
)
from langchain_community.document_loaders.bigquery import (
BigQueryLoader,
)
from langchain_community.document_loaders.bilibili import (
BiliBiliLoader,
)
from langchain_community.document_loaders.blackboard import (
BlackboardLoader,
)
from langchain_community.document_loaders.blob_loaders import (
Blob,
BlobLoader,
FileSystemBlobLoader,
YoutubeAudioLoader,
)
from langchain_community.document_loaders.blockchain import (
BlockchainDocumentLoader,
)
from langchain_community.document_loaders.brave_search import (
BraveSearchLoader,
)
from langchain_community.document_loaders.browserbase import (
BrowserbaseLoader,
)
from langchain_community.document_loaders.browserless import (
BrowserlessLoader,
)
from langchain_community.document_loaders.cassandra import (
CassandraLoader,
)
from langchain_community.document_loaders.chatgpt import (
ChatGPTLoader,
)
from langchain_community.document_loaders.chm import (
UnstructuredCHMLoader,
)
from langchain_community.document_loaders.chromium import (
AsyncChromiumLoader,
)
from langchain_community.document_loaders.college_confidential import (
CollegeConfidentialLoader,
)
from langchain_community.document_loaders.concurrent import (
ConcurrentLoader,
)
from langchain_community.document_loaders.confluence import (
ConfluenceLoader,
)
from langchain_community.document_loaders.conllu import (
CoNLLULoader,
)
from langchain_community.document_loaders.couchbase import (
CouchbaseLoader,
)
from langchain_community.document_loaders.csv_loader import (
CSVLoader,
UnstructuredCSVLoader,
)
from langchain_community.document_loaders.cube_semantic import (
CubeSemanticLoader,
)
from langchain_community.document_loaders.datadog_logs import (
DatadogLogsLoader,
)
from langchain_community.document_loaders.dataframe import (
DataFrameLoader,
)
from langchain_community.document_loaders.dedoc import (
DedocAPIFileLoader,
DedocFileLoader,
)
from langchain_community.document_loaders.diffbot import (
DiffbotLoader,
)
from langchain_community.document_loaders.directory import (
DirectoryLoader,
)
from langchain_community.document_loaders.discord import (
DiscordChatLoader,
)
from langchain_community.document_loaders.doc_intelligence import (
AzureAIDocumentIntelligenceLoader,
)
from langchain_community.document_loaders.docugami import (
DocugamiLoader,
)
from langchain_community.document_loaders.docusaurus import (
DocusaurusLoader,
)
from langchain_community.document_loaders.dropbox import (
DropboxLoader,
)
from langchain_community.document_loaders.duckdb_loader import (
DuckDBLoader,
)
from langchain_community.document_loaders.email import (
OutlookMessageLoader,
UnstructuredEmailLoader,
)
from langchain_community.document_loaders.epub import (
UnstructuredEPubLoader,
)
from langchain_community.document_loaders.etherscan import (
EtherscanLoader,
)
from langchain_community.document_loaders.evernote import (
EverNoteLoader,
)
from langchain_community.document_loaders.excel import (
UnstructuredExcelLoader,
)
from langchain_community.document_loaders.facebook_chat import (
FacebookChatLoader,
)
from langchain_community.document_loaders.fauna import (
FaunaLoader,
)
from langchain_community.document_loaders.figma import (
FigmaFileLoader,
)
from langchain_community.document_loaders.firecrawl import (
FireCrawlLoader,
)
from langchain_community.document_loaders.gcs_directory import (
GCSDirectoryLoader,
)
from langchain_community.document_loaders.gcs_file import (
GCSFileLoader,
)
from langchain_community.document_loaders.geodataframe import (
GeoDataFrameLoader,
)
from langchain_community.document_loaders.git import (
GitLoader,
)
from langchain_community.document_loaders.gitbook import (
GitbookLoader,
)
from langchain_community.document_loaders.github import (
GithubFileLoader,
GitHubIssuesLoader,
)
from langchain_community.document_loaders.glue_catalog import (
GlueCatalogLoader,
)
from langchain_community.document_loaders.google_speech_to_text import (
GoogleSpeechToTextLoader,
)
from langchain_community.document_loaders.googledrive import (
GoogleDriveLoader,
)
from langchain_community.document_loaders.gutenberg import (
GutenbergLoader,
)
from langchain_community.document_loaders.hn import (
HNLoader,
)
from langchain_community.document_loaders.html import (
UnstructuredHTMLLoader,
)
from langchain_community.document_loaders.html_bs import (
BSHTMLLoader,
)
from langchain_community.document_loaders.hugging_face_dataset import (
HuggingFaceDatasetLoader,
)
from langchain_community.document_loaders.hugging_face_model import (
HuggingFaceModelLoader,
)
from langchain_community.document_loaders.ifixit import (
IFixitLoader,
)
from langchain_community.document_loaders.image import (
UnstructuredImageLoader,
)
from langchain_community.document_loaders.image_captions import (
ImageCaptionLoader,
)
from langchain_community.document_loaders.imsdb import (
IMSDbLoader,
)
from langchain_community.document_loaders.iugu import (
IuguLoader,
)
from langchain_community.document_loaders.joplin import (
JoplinLoader,
)
from langchain_community.document_loaders.json_loader import (
JSONLoader,
)
from langchain_community.document_loaders.kinetica_loader import KineticaLoader
from langchain_community.document_loaders.lakefs import (
LakeFSLoader,
)
from langchain_community.document_loaders.larksuite import (
LarkSuiteDocLoader,
)
from langchain_community.document_loaders.llmsherpa import (
LLMSherpaFileLoader,
)
from langchain_community.document_loaders.markdown import (
UnstructuredMarkdownLoader,
)
from langchain_community.document_loaders.mastodon import (
MastodonTootsLoader,
)
from langchain_community.document_loaders.max_compute import (
MaxComputeLoader,
)
from langchain_community.document_loaders.mediawikidump import (
MWDumpLoader,
)
from langchain_community.document_loaders.merge import (
MergedDataLoader,
)
from langchain_community.document_loaders.mhtml import (
MHTMLLoader,
)
from langchain_community.document_loaders.modern_treasury import (
ModernTreasuryLoader,
)
from langchain_community.document_loaders.mongodb import (
MongodbLoader,
)
from langchain_community.document_loaders.needle import (
NeedleLoader,
)
from langchain_community.document_loaders.news import (
NewsURLLoader,
)
from langchain_community.document_loaders.notebook import (
NotebookLoader,
)
from langchain_community.document_loaders.notion import (
NotionDirectoryLoader,
)
from langchain_community.document_loaders.notiondb import (
NotionDBLoader,
)
from langchain_community.document_loaders.obs_directory import (
OBSDirectoryLoader,
)
from langchain_community.document_loaders.obs_file import (
OBSFileLoader,
)
from langchain_community.document_loaders.obsidian import (
ObsidianLoader,
)
from langchain_community.document_loaders.odt import (
UnstructuredODTLoader,
)
from langchain_community.document_loaders.onedrive import (
OneDriveLoader,
)
from langchain_community.document_loaders.onedrive_file import (
OneDriveFileLoader,
)
from langchain_community.document_loaders.open_city_data import (
OpenCityDataLoader,
)
from langchain_community.document_loaders.oracleadb_loader import (
OracleAutonomousDatabaseLoader,
)
from langchain_community.document_loaders.oracleai import (
OracleDocLoader,
OracleTextSplitter,
)
from langchain_community.document_loaders.org_mode import (
UnstructuredOrgModeLoader,
)
from langchain_community.document_loaders.pdf import (
AmazonTextractPDFLoader,
DedocPDFLoader,
MathpixPDFLoader,
OnlinePDFLoader,
PagedPDFSplitter,
PDFMinerLoader,
PDFMinerPDFasHTMLLoader,
PDFPlumberLoader,
PyMuPDFLoader,
PyPDFDirectoryLoader,
PyPDFium2Loader,
PyPDFLoader,
UnstructuredPDFLoader,
)
from langchain_community.document_loaders.pebblo import (
PebbloSafeLoader,
PebbloTextLoader,
)
from langchain_community.document_loaders.polars_dataframe import (
PolarsDataFrameLoader,
)
from langchain_community.document_loaders.powerpoint import (
UnstructuredPowerPointLoader,
)
from langchain_community.document_loaders.psychic import (
PsychicLoader,
)
from langchain_community.document_loaders.pubmed import (
PubMedLoader,
)
from langchain_community.document_loaders.pyspark_dataframe import (
PySparkDataFrameLoader,
)
from langchain_community.document_loaders.python import (
PythonLoader,
)
from langchain_community.document_loaders.readthedocs import (
ReadTheDocsLoader,
)
from langchain_community.document_loaders.recursive_url_loader import (
RecursiveUrlLoader,
)
from langchain_community.document_loaders.reddit import (
RedditPostsLoader,
)
from langchain_community.document_loaders.roam import (
RoamLoader,
)
from langchain_community.document_loaders.rocksetdb import (
RocksetLoader,
)
from langchain_community.document_loaders.rss import (
RSSFeedLoader,
)
from langchain_community.document_loaders.rst import (
UnstructuredRSTLoader,
)
from langchain_community.document_loaders.rtf import (
UnstructuredRTFLoader,
)
from langchain_community.document_loaders.s3_directory import (
S3DirectoryLoader,
)
from langchain_community.document_loaders.s3_file import (
S3FileLoader,
)
from langchain_community.document_loaders.scrapfly import (
ScrapflyLoader,
)
from langchain_community.document_loaders.scrapingant import (
ScrapingAntLoader,
)
from langchain_community.document_loaders.sharepoint import (
SharePointLoader,
)
from langchain_community.document_loaders.sitemap import (
SitemapLoader,
)
from langchain_community.document_loaders.slack_directory import (
SlackDirectoryLoader,
)
from langchain_community.document_loaders.snowflake_loader import (
SnowflakeLoader,
)
from langchain_community.document_loaders.spider import (
SpiderLoader,
)
from langchain_community.document_loaders.spreedly import (
SpreedlyLoader,
)
from langchain_community.document_loaders.sql_database import (
SQLDatabaseLoader,
)
from langchain_community.document_loaders.srt import (
SRTLoader,
)
from langchain_community.document_loaders.stripe import (
StripeLoader,
)
from langchain_community.document_loaders.surrealdb import (
SurrealDBLoader,
)
from langchain_community.document_loaders.telegram import (
TelegramChatApiLoader,
TelegramChatFileLoader,
TelegramChatLoader,
)
from langchain_community.document_loaders.tencent_cos_directory import (
TencentCOSDirectoryLoader,
)
from langchain_community.document_loaders.tencent_cos_file import (
TencentCOSFileLoader,
)
from langchain_community.document_loaders.tensorflow_datasets import (
TensorflowDatasetLoader,
)
from langchain_community.document_loaders.text import (
TextLoader,
)
from langchain_community.document_loaders.tidb import (
TiDBLoader,
)
from langchain_community.document_loaders.tomarkdown import (
ToMarkdownLoader,
)
from langchain_community.document_loaders.toml import (
TomlLoader,
)
from langchain_community.document_loaders.trello import (
TrelloLoader,
)
from langchain_community.document_loaders.tsv import (
UnstructuredTSVLoader,
)
from langchain_community.document_loaders.twitter import (
TwitterTweetLoader,
)
from langchain_community.document_loaders.unstructured import (
UnstructuredAPIFileIOLoader,
UnstructuredAPIFileLoader,
UnstructuredFileIOLoader,
UnstructuredFileLoader,
)
from langchain_community.document_loaders.url import (
UnstructuredURLLoader,
)
from langchain_community.document_loaders.url_playwright import (
PlaywrightURLLoader,
)
from langchain_community.document_loaders.url_selenium import (
SeleniumURLLoader,
)
from langchain_community.document_loaders.vsdx import (
VsdxLoader,
)
from langchain_community.document_loaders.weather import (
WeatherDataLoader,
)
from langchain_community.document_loaders.web_base import (
WebBaseLoader,
)
from langchain_community.document_loaders.whatsapp_chat import (
WhatsAppChatLoader,
)
from langchain_community.document_loaders.wikipedia import (
WikipediaLoader,
)
from langchain_community.document_loaders.word_document import (
Docx2txtLoader,
UnstructuredWordDocumentLoader,
)
from langchain_community.document_loaders.xml import (
UnstructuredXMLLoader,
)
from langchain_community.document_loaders.xorbits import (
XorbitsLoader,
)
from langchain_community.document_loaders.youtube import (
GoogleApiClient,
GoogleApiYoutubeLoader,
YoutubeLoader,
)
from langchain_community.document_loaders.yuque import (
YuqueLoader,
)
_module_lookup = {
"AZLyricsLoader": "langchain_community.document_loaders.azlyrics",
"AcreomLoader": "langchain_community.document_loaders.acreom",
"AirbyteCDKLoader": "langchain_community.document_loaders.airbyte",
"AirbyteGongLoader": "langchain_community.document_loaders.airbyte",
"AirbyteHubspotLoader": "langchain_community.document_loaders.airbyte",
"AirbyteJSONLoader": "langchain_community.document_loaders.airbyte_json",
"AirbyteSalesforceLoader": "langchain_community.document_loaders.airbyte",
"AirbyteShopifyLoader": "langchain_community.document_loaders.airbyte",
"AirbyteStripeLoader": "langchain_community.document_loaders.airbyte",
"AirbyteTypeformLoader": "langchain_community.document_loaders.airbyte",
"AirbyteZendeskSupportLoader": "langchain_community.document_loaders.airbyte",
"AirtableLoader": "langchain_community.document_loaders.airtable",
"AmazonTextractPDFLoader": "langchain_community.document_loaders.pdf",
"ApifyDatasetLoader": "langchain_community.document_loaders.apify_dataset",
"ArcGISLoader": "langchain_community.document_loaders.arcgis_loader",
"ArxivLoader": "langchain_community.document_loaders.arxiv",
"AssemblyAIAudioLoaderById": "langchain_community.document_loaders.assemblyai",
"AssemblyAIAudioTranscriptLoader": "langchain_community.document_loaders.assemblyai", # noqa: E501
"AstraDBLoader": "langchain_community.document_loaders.astradb",
"AsyncChromiumLoader": "langchain_community.document_loaders.chromium",
"AsyncHtmlLoader": "langchain_community.document_loaders.async_html",
"AthenaLoader": "langchain_community.document_loaders.athena",
"AzureAIDataLoader": "langchain_community.document_loaders.azure_ai_data",
"AzureAIDocumentIntelligenceLoader": "langchain_community.document_loaders.doc_intelligence", # noqa: E501
"AzureBlobStorageContainerLoader": "langchain_community.document_loaders.azure_blob_storage_container", # noqa: E501
"AzureBlobStorageFileLoader": "langchain_community.document_loaders.azure_blob_storage_file", # noqa: E501
"BSHTMLLoader": "langchain_community.document_loaders.html_bs",
"BibtexLoader": "langchain_community.document_loaders.bibtex",
"BigQueryLoader": "langchain_community.document_loaders.bigquery",
"BiliBiliLoader": "langchain_community.document_loaders.bilibili",
"BlackboardLoader": "langchain_community.document_loaders.blackboard",
"Blob": "langchain_community.document_loaders.blob_loaders",
"BlobLoader": "langchain_community.document_loaders.blob_loaders",
"BlockchainDocumentLoader": "langchain_community.document_loaders.blockchain",
"BraveSearchLoader": "langchain_community.document_loaders.brave_search",
"BrowserbaseLoader": "langchain_community.document_loaders.browserbase",
"BrowserlessLoader": "langchain_community.document_loaders.browserless",
"CSVLoader": "langchain_community.document_loaders.csv_loader",
"CassandraLoader": "langchain_community.document_loaders.cassandra",
"ChatGPTLoader": "langchain_community.document_loaders.chatgpt",
"CoNLLULoader": "langchain_community.document_loaders.conllu",
"CollegeConfidentialLoader": "langchain_community.document_loaders.college_confidential", # noqa: E501
"ConcurrentLoader": "langchain_community.document_loaders.concurrent",
"ConfluenceLoader": "langchain_community.document_loaders.confluence",
"CouchbaseLoader": "langchain_community.document_loaders.couchbase",
"CubeSemanticLoader": "langchain_community.document_loaders.cube_semantic",
"DataFrameLoader": "langchain_community.document_loaders.dataframe",
"DatadogLogsLoader": "langchain_community.document_loaders.datadog_logs",
"DedocAPIFileLoader": "langchain_community.document_loaders.dedoc",
"DedocFileLoader": "langchain_community.document_loaders.dedoc",
"DedocPDFLoader": "langchain_community.document_loaders.pdf",
"DiffbotLoader": "langchain_community.document_loaders.diffbot",
"DirectoryLoader": "langchain_community.document_loaders.directory",
"DiscordChatLoader": "langchain_community.document_loaders.discord",
"DocugamiLoader": "langchain_community.document_loaders.docugami",
"DocusaurusLoader": "langchain_community.document_loaders.docusaurus",
"Docx2txtLoader": "langchain_community.document_loaders.word_document",
"DropboxLoader": "langchain_community.document_loaders.dropbox",
"DuckDBLoader": "langchain_community.document_loaders.duckdb_loader",
"EtherscanLoader": "langchain_community.document_loaders.etherscan",
"EverNoteLoader": "langchain_community.document_loaders.evernote",
"FacebookChatLoader": "langchain_community.document_loaders.facebook_chat",
"FaunaLoader": "langchain_community.document_loaders.fauna",
"FigmaFileLoader": "langchain_community.document_loaders.figma",
"FireCrawlLoader": "langchain_community.document_loaders.firecrawl",
"FileSystemBlobLoader": "langchain_community.document_loaders.blob_loaders",
"GCSDirectoryLoader": "langchain_community.document_loaders.gcs_directory",
"GCSFileLoader": "langchain_community.document_loaders.gcs_file",
"GeoDataFrameLoader": "langchain_community.document_loaders.geodataframe",
"GitHubIssuesLoader": "langchain_community.document_loaders.github",
"GitLoader": "langchain_community.document_loaders.git",
"GitbookLoader": "langchain_community.document_loaders.gitbook",
"GithubFileLoader": "langchain_community.document_loaders.github",
"GlueCatalogLoader": "langchain_community.document_loaders.glue_catalog",
"GoogleApiClient": "langchain_community.document_loaders.youtube",
"GoogleApiYoutubeLoader": "langchain_community.document_loaders.youtube",
"GoogleDriveLoader": "langchain_community.document_loaders.googledrive",
"GoogleSpeechToTextLoader": "langchain_community.document_loaders.google_speech_to_text", # noqa: E501
"GutenbergLoader": "langchain_community.document_loaders.gutenberg",
"HNLoader": "langchain_community.document_loaders.hn",
"HuggingFaceDatasetLoader": "langchain_community.document_loaders.hugging_face_dataset", # noqa: E501
"HuggingFaceModelLoader": "langchain_community.document_loaders.hugging_face_model",
"IFixitLoader": "langchain_community.document_loaders.ifixit",
"IMSDbLoader": "langchain_community.document_loaders.imsdb",
"ImageCaptionLoader": "langchain_community.document_loaders.image_captions",
"IuguLoader": "langchain_community.document_loaders.iugu",
"JSONLoader": "langchain_community.document_loaders.json_loader",
"JoplinLoader": "langchain_community.document_loaders.joplin",
"KineticaLoader": "langchain_community.document_loaders.kinetica_loader",
"LakeFSLoader": "langchain_community.document_loaders.lakefs",
"LarkSuiteDocLoader": "langchain_community.document_loaders.larksuite",
"LLMSherpaFileLoader": "langchain_community.document_loaders.llmsherpa",
"MHTMLLoader": "langchain_community.document_loaders.mhtml",
"MWDumpLoader": "langchain_community.document_loaders.mediawikidump",
"MastodonTootsLoader": "langchain_community.document_loaders.mastodon",
"MathpixPDFLoader": "langchain_community.document_loaders.pdf",
"MaxComputeLoader": "langchain_community.document_loaders.max_compute",
"MergedDataLoader": "langchain_community.document_loaders.merge",
"ModernTreasuryLoader": "langchain_community.document_loaders.modern_treasury",
"MongodbLoader": "langchain_community.document_loaders.mongodb",
"NeedleLoader": "langchain_community.document_loaders.needle",
"NewsURLLoader": "langchain_community.document_loaders.news",
"NotebookLoader": "langchain_community.document_loaders.notebook",
"NotionDBLoader": "langchain_community.document_loaders.notiondb",
"NotionDirectoryLoader": "langchain_community.document_loaders.notion",
"OBSDirectoryLoader": "langchain_community.document_loaders.obs_directory",
"OBSFileLoader": "langchain_community.document_loaders.obs_file",
"ObsidianLoader": "langchain_community.document_loaders.obsidian",
"OneDriveFileLoader": "langchain_community.document_loaders.onedrive_file",
"OneDriveLoader": "langchain_community.document_loaders.onedrive",
"OnlinePDFLoader": "langchain_community.document_loaders.pdf",
"OpenCityDataLoader": "langchain_community.document_loaders.open_city_data",
"OracleAutonomousDatabaseLoader": "langchain_community.document_loaders.oracleadb_loader", # noqa: E501
"OracleDocLoader": "langchain_community.document_loaders.oracleai",
"OracleTextSplitter": "langchain_community.document_loaders.oracleai",
"OutlookMessageLoader": "langchain_community.document_loaders.email",
"PDFMinerLoader": "langchain_community.document_loaders.pdf",
"PDFMinerPDFasHTMLLoader": "langchain_community.document_loaders.pdf",
"PDFPlumberLoader": "langchain_community.document_loaders.pdf",
"PagedPDFSplitter": "langchain_community.document_loaders.pdf",
"PebbloSafeLoader": "langchain_community.document_loaders.pebblo",
"PebbloTextLoader": "langchain_community.document_loaders.pebblo",
"PlaywrightURLLoader": "langchain_community.document_loaders.url_playwright",
"PolarsDataFrameLoader": "langchain_community.document_loaders.polars_dataframe",
"PsychicLoader": "langchain_community.document_loaders.psychic",
"PubMedLoader": "langchain_community.document_loaders.pubmed",
"PyMuPDFLoader": "langchain_community.document_loaders.pdf",
"PyPDFDirectoryLoader": "langchain_community.document_loaders.pdf",
"PyPDFLoader": "langchain_community.document_loaders.pdf",
"PyPDFium2Loader": "langchain_community.document_loaders.pdf",
"PySparkDataFrameLoader": "langchain_community.document_loaders.pyspark_dataframe",
"PythonLoader": "langchain_community.document_loaders.python",
"RSSFeedLoader": "langchain_community.document_loaders.rss",
"ReadTheDocsLoader": "langchain_community.document_loaders.readthedocs",
"RecursiveUrlLoader": "langchain_community.document_loaders.recursive_url_loader",
"RedditPostsLoader": "langchain_community.document_loaders.reddit",
"RoamLoader": "langchain_community.document_loaders.roam",
"RocksetLoader": "langchain_community.document_loaders.rocksetdb",
"S3DirectoryLoader": "langchain_community.document_loaders.s3_directory",
"S3FileLoader": "langchain_community.document_loaders.s3_file",
"ScrapflyLoader": "langchain_community.document_loaders.scrapfly",
"ScrapingAntLoader": "langchain_community.document_loaders.scrapingant",
"SQLDatabaseLoader": "langchain_community.document_loaders.sql_database",
"SRTLoader": "langchain_community.document_loaders.srt",
"SeleniumURLLoader": "langchain_community.document_loaders.url_selenium",
"SharePointLoader": "langchain_community.document_loaders.sharepoint",
"SitemapLoader": "langchain_community.document_loaders.sitemap",
"SlackDirectoryLoader": "langchain_community.document_loaders.slack_directory",
"SnowflakeLoader": "langchain_community.document_loaders.snowflake_loader",
"SpiderLoader": "langchain_community.document_loaders.spider",
"SpreedlyLoader": "langchain_community.document_loaders.spreedly",
"StripeLoader": "langchain_community.document_loaders.stripe",
"SurrealDBLoader": "langchain_community.document_loaders.surrealdb",
"TelegramChatApiLoader": "langchain_community.document_loaders.telegram",
"TelegramChatFileLoader": "langchain_community.document_loaders.telegram",
"TelegramChatLoader": "langchain_community.document_loaders.telegram",
"TencentCOSDirectoryLoader": "langchain_community.document_loaders.tencent_cos_directory", # noqa: E501
"TencentCOSFileLoader": "langchain_community.document_loaders.tencent_cos_file",
"TensorflowDatasetLoader": "langchain_community.document_loaders.tensorflow_datasets", # noqa: E501
"TextLoader": "langchain_community.document_loaders.text",
"TiDBLoader": "langchain_community.document_loaders.tidb",
"ToMarkdownLoader": "langchain_community.document_loaders.tomarkdown",
"TomlLoader": "langchain_community.document_loaders.toml",
"TrelloLoader": "langchain_community.document_loaders.trello",
"TwitterTweetLoader": "langchain_community.document_loaders.twitter",
"UnstructuredAPIFileIOLoader": "langchain_community.document_loaders.unstructured",
"UnstructuredAPIFileLoader": "langchain_community.document_loaders.unstructured",
"UnstructuredCHMLoader": "langchain_community.document_loaders.chm",
"UnstructuredCSVLoader": "langchain_community.document_loaders.csv_loader",
"UnstructuredEPubLoader": "langchain_community.document_loaders.epub",
"UnstructuredEmailLoader": "langchain_community.document_loaders.email",
"UnstructuredExcelLoader": "langchain_community.document_loaders.excel",
"UnstructuredFileIOLoader": "langchain_community.document_loaders.unstructured",
"UnstructuredFileLoader": "langchain_community.document_loaders.unstructured",
"UnstructuredHTMLLoader": "langchain_community.document_loaders.html",
"UnstructuredImageLoader": "langchain_community.document_loaders.image",
"UnstructuredMarkdownLoader": "langchain_community.document_loaders.markdown",
"UnstructuredODTLoader": "langchain_community.document_loaders.odt",
"UnstructuredOrgModeLoader": "langchain_community.document_loaders.org_mode",
"UnstructuredPDFLoader": "langchain_community.document_loaders.pdf",
"UnstructuredPowerPointLoader": "langchain_community.document_loaders.powerpoint",
"UnstructuredRSTLoader": "langchain_community.document_loaders.rst",
"UnstructuredRTFLoader": "langchain_community.document_loaders.rtf",
"UnstructuredTSVLoader": "langchain_community.document_loaders.tsv",
"UnstructuredURLLoader": "langchain_community.document_loaders.url",
"UnstructuredWordDocumentLoader": "langchain_community.document_loaders.word_document", # noqa: E501
"UnstructuredXMLLoader": "langchain_community.document_loaders.xml",
"VsdxLoader": "langchain_community.document_loaders.vsdx",
"WeatherDataLoader": "langchain_community.document_loaders.weather",
"WebBaseLoader": "langchain_community.document_loaders.web_base",
"WhatsAppChatLoader": "langchain_community.document_loaders.whatsapp_chat",
"WikipediaLoader": "langchain_community.document_loaders.wikipedia",
"XorbitsLoader": "langchain_community.document_loaders.xorbits",
"YoutubeAudioLoader": "langchain_community.document_loaders.blob_loaders",
"YoutubeLoader": "langchain_community.document_loaders.youtube",
"YuqueLoader": "langchain_community.document_loaders.yuque",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"AZLyricsLoader",
"AcreomLoader",
"AirbyteCDKLoader",
"AirbyteGongLoader",
"AirbyteHubspotLoader",
"AirbyteJSONLoader",
"AirbyteSalesforceLoader",
"AirbyteShopifyLoader",
"AirbyteStripeLoader",
"AirbyteTypeformLoader",
"AirbyteZendeskSupportLoader",
"AirtableLoader",
"AmazonTextractPDFLoader",
"ApifyDatasetLoader",
"ArcGISLoader",
"ArxivLoader",
"AssemblyAIAudioLoaderById",
"AssemblyAIAudioTranscriptLoader",
"AstraDBLoader",
"AsyncChromiumLoader",
"AsyncHtmlLoader",
"AthenaLoader",
"AzureAIDataLoader",
"AzureAIDocumentIntelligenceLoader",
"AzureBlobStorageContainerLoader",
"AzureBlobStorageFileLoader",
"BSHTMLLoader",
"BibtexLoader",
"BigQueryLoader",
"BiliBiliLoader",
"BlackboardLoader",
"Blob",
"BlobLoader",
"BlockchainDocumentLoader",
"BraveSearchLoader",
"BrowserbaseLoader",
"BrowserlessLoader",
"CSVLoader",
"CassandraLoader",
"ChatGPTLoader",
"CoNLLULoader",
"CollegeConfidentialLoader",
"ConcurrentLoader",
"ConfluenceLoader",
"CouchbaseLoader",
"CubeSemanticLoader",
"DataFrameLoader",
"DatadogLogsLoader",
"DedocAPIFileLoader",
"DedocFileLoader",
"DedocPDFLoader",
"DiffbotLoader",
"DirectoryLoader",
"DiscordChatLoader",
"DocugamiLoader",
"DocusaurusLoader",
"Docx2txtLoader",
"DropboxLoader",
"DuckDBLoader",
"EtherscanLoader",
"EverNoteLoader",
"FacebookChatLoader",
"FaunaLoader",
"FigmaFileLoader",
"FireCrawlLoader",
"FileSystemBlobLoader",
"GCSDirectoryLoader",
"GlueCatalogLoader",
"GCSFileLoader",
"GeoDataFrameLoader",
"GitHubIssuesLoader",
"GitLoader",
"GitbookLoader",
"GithubFileLoader",
"GoogleApiClient",
"GoogleApiYoutubeLoader",
"GoogleDriveLoader",
"GoogleSpeechToTextLoader",
"GutenbergLoader",
"HNLoader",
"HuggingFaceDatasetLoader",
"HuggingFaceModelLoader",
"IFixitLoader",
"ImageCaptionLoader",
"IMSDbLoader",
"IuguLoader",
"JoplinLoader",
"JSONLoader",
"KineticaLoader",
"LakeFSLoader",
"LarkSuiteDocLoader",
"LLMSherpaFileLoader",
"MastodonTootsLoader",
"MHTMLLoader",
"MWDumpLoader",
"MathpixPDFLoader",
"MaxComputeLoader",
"MergedDataLoader",
"ModernTreasuryLoader",
"MongodbLoader",
"NeedleLoader",
"NewsURLLoader",
"NotebookLoader",
"NotionDBLoader",
"NotionDirectoryLoader",
"OBSDirectoryLoader",
"OBSFileLoader",
"ObsidianLoader",
"OneDriveFileLoader",
"OneDriveLoader",
"OnlinePDFLoader",
"OpenCityDataLoader",
"OracleAutonomousDatabaseLoader",
"OracleDocLoader",
"OracleTextSplitter",
"OutlookMessageLoader",
"PDFMinerLoader",
"PDFMinerPDFasHTMLLoader",
"PDFPlumberLoader",
"PagedPDFSplitter",
"PebbloSafeLoader",
"PebbloTextLoader",
"PlaywrightURLLoader",
"PolarsDataFrameLoader",
"PsychicLoader",
"PubMedLoader",
"PyMuPDFLoader",
"PyPDFDirectoryLoader",
"PyPDFLoader",
"PyPDFium2Loader",
"PySparkDataFrameLoader",
"PythonLoader",
"RSSFeedLoader",
"ReadTheDocsLoader",
"RecursiveUrlLoader",
"RedditPostsLoader",
"RoamLoader",
"RocksetLoader",
"S3DirectoryLoader",
"S3FileLoader",
"ScrapflyLoader",
"ScrapingAntLoader",
"SQLDatabaseLoader",
"SRTLoader",
"SeleniumURLLoader",
"SharePointLoader",
"SitemapLoader",
"SlackDirectoryLoader",
"SnowflakeLoader",
"SpiderLoader",
"SpreedlyLoader",
"StripeLoader",
"SurrealDBLoader",
"TelegramChatApiLoader",
"TelegramChatFileLoader",
"TelegramChatLoader",
"TencentCOSDirectoryLoader",
"TencentCOSFileLoader",
"TensorflowDatasetLoader",
"TextLoader",
"TiDBLoader",
"ToMarkdownLoader",
"TomlLoader",
"TrelloLoader",
"TwitterTweetLoader",
"UnstructuredAPIFileIOLoader",
"UnstructuredAPIFileLoader",
"UnstructuredCHMLoader",
"UnstructuredCSVLoader",
"UnstructuredEPubLoader",
"UnstructuredEmailLoader",
"UnstructuredExcelLoader",
"UnstructuredFileIOLoader",
"UnstructuredFileLoader",
"UnstructuredHTMLLoader",
"UnstructuredImageLoader",
"UnstructuredMarkdownLoader",
"UnstructuredODTLoader",
"UnstructuredOrgModeLoader",
"UnstructuredPDFLoader",
"UnstructuredPowerPointLoader",
"UnstructuredRSTLoader",
"UnstructuredRTFLoader",
"UnstructuredTSVLoader",
"UnstructuredURLLoader",
"UnstructuredWordDocumentLoader",
"UnstructuredXMLLoader",
"VsdxLoader",
"WeatherDataLoader",
"WebBaseLoader",
"WhatsAppChatLoader",
"WikipediaLoader",
"XorbitsLoader",
"YoutubeAudioLoader",
"YoutubeLoader",
"YuqueLoader",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/azure_blob_storage_file.py | import os
import tempfile
from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class AzureBlobStorageFileLoader(BaseLoader):
"""Load from `Azure Blob Storage` files."""
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.blob = blob_name
"""Blob name."""
def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import BlobClient
except ImportError as exc:
raise ImportError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
client = BlobClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container, blob_name=self.blob
)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.container}/{self.blob}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f"{file_path}", "wb") as file:
blob_data = client.download_blob()
blob_data.readinto(file)
loader = UnstructuredFileLoader(file_path)
return loader.load()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/college_confidential.py | from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class CollegeConfidentialLoader(WebBaseLoader):
"""Load `College Confidential` webpages."""
def load(self) -> List[Document]:
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/yuque.py | import re
from typing import Dict, Iterator, List
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class YuqueLoader(BaseLoader):
"""Load documents from `Yuque`."""
def __init__(self, access_token: str, api_url: str = "https://www.yuque.com"):
"""Initialize with Yuque access_token and api_url.
Args:
access_token: Personal access token - see https://www.yuque.com/settings/tokens.
api_url: Yuque API url.
"""
self.access_token = access_token
self.api_url = api_url
@property
def headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"X-Auth-Token": self.access_token,
}
def get_user_id(self) -> int:
url = f"{self.api_url}/api/v2/user"
response = self.http_get(url=url)
return response["data"]["id"]
def get_books(self, user_id: int) -> List[Dict]:
url = f"{self.api_url}/api/v2/users/{user_id}/repos"
response = self.http_get(url=url)
return response["data"]
def get_document_ids(self, book_id: int) -> List[int]:
url = f"{self.api_url}/api/v2/repos/{book_id}/docs"
response = self.http_get(url=url)
return [document["id"] for document in response["data"]]
def get_document(self, book_id: int, document_id: int) -> Dict:
url = f"{self.api_url}/api/v2/repos/{book_id}/docs/{document_id}"
response = self.http_get(url=url)
return response["data"]
def parse_document(self, document: Dict) -> Document:
content = self.parse_document_body(document["body"])
metadata = {
"title": document["title"],
"description": document["description"],
"created_at": document["created_at"],
"updated_at": document["updated_at"],
}
return Document(page_content=content, metadata=metadata)
@staticmethod
def parse_document_body(body: str) -> str:
result = re.sub(r'<a name="(.*)"></a>', "", body)
result = re.sub(r"<br\s*/?>", "", result)
return result
def http_get(self, url: str) -> Dict:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
def get_documents(self) -> Iterator[Document]:
user_id = self.get_user_id()
books = self.get_books(user_id)
for book in books:
book_id = book["id"]
document_ids = self.get_document_ids(book_id)
for document_id in document_ids:
document = self.get_document(book_id, document_id)
parsed_document = self.parse_document(document)
yield parsed_document
def load(self) -> List[Document]:
"""Load documents from `Yuque`."""
return list(self.get_documents())
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/concurrent.py | from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseBlobParser
from langchain_community.document_loaders.blob_loaders import (
BlobLoader,
FileSystemBlobLoader,
)
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers.registry import get_parser
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
class ConcurrentLoader(GenericLoader):
"""Load and pars Documents concurrently."""
def __init__(
self,
blob_loader: BlobLoader, # type: ignore[valid-type]
blob_parser: BaseBlobParser,
num_workers: int = 4, # type: ignore[valid-type]
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs() # type: ignore[attr-defined]
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
@classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
parser_kwargs: Optional[dict] = None,
) -> ConcurrentLoader:
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader( # type: ignore[attr-defined, misc]
path,
glob=glob,
exclude=exclude,
suffixes=suffixes,
show_progress=show_progress,
)
if isinstance(parser, str):
if parser == "default" and cls.get_parser != GenericLoader.get_parser:
# There is an implementation of get_parser on the class, use it.
blob_parser = cls.get_parser(**(parser_kwargs or {}))
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/weather.py | """Simple reader that reads weather data from OpenWeatherMap API"""
from __future__ import annotations
from datetime import datetime
from typing import Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
class WeatherDataLoader(BaseLoader):
"""Load weather data with `Open Weather Map` API.
Reads the forecast & current weather of any location using OpenWeatherMap's free
API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free
OpenWeatherMap API.
"""
def __init__(
self,
client: OpenWeatherMapAPIWrapper,
places: Sequence[str],
) -> None:
"""Initialize with parameters."""
super().__init__()
self.client = client
self.places = places
@classmethod
def from_params(
cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None
) -> WeatherDataLoader:
client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) # type: ignore[call-arg]
return cls(client, places)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {"queried_at": datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/html.py | from typing import List
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Load `HTML` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredHTMLLoader
loader = UnstructuredHTMLLoader(
"example.html", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-html
"""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/json_loader.py | import json
from os import PathLike
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class JSONLoader(BaseLoader):
"""
Load a `JSON` file using a `jq` schema.
Setup:
.. code-block:: bash
pip install -U jq
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import JSONLoader
import json
from pathlib import Path
file_path='./sample_quiz.json'
data = json.loads(Path(file_path).read_text())
loader = JSONLoader(
file_path=file_path,
jq_schema='.quiz',
text_content=False)
Load:
.. code-block:: python
docs = loader.load()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
{"sport": {"q1": {"question": "Which one is correct team name in
NBA?", "options": ["New York Bulls"
{'source': '/sample_quiz
.json', 'seq_num': 1}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
{"sport": {"q1": {"question": "Which one is correct team name in
NBA?", "options": ["New York Bulls"
{'source': '/sample_quizg
.json', 'seq_num': 1}
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
{"sport": {"q1": {"question": "Which one is correct team name in
NBA?", "options": ["New York Bulls"
{'source': '/sample_quiz
.json', 'seq_num': 1}
"""
def __init__(
self,
file_path: Union[str, PathLike],
jq_schema: str,
content_key: Optional[str] = None,
is_content_key_jq_parsable: Optional[bool] = False,
metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None,
text_content: bool = True,
json_lines: bool = False,
):
"""Initialize the JSONLoader.
Args:
file_path (Union[str, PathLike]): The path to the JSON or JSON Lines file.
jq_schema (str): The jq schema to use to extract the data or text from
the JSON.
content_key (str): The key to use to extract the content from
the JSON if the jq_schema results to a list of objects (dict).
If is_content_key_jq_parsable is True, this has to be a jq compatible
schema. If is_content_key_jq_parsable is False, this should be a simple
string key.
is_content_key_jq_parsable (bool): A flag to determine if
content_key is parsable by jq or not. If True, content_key is
treated as a jq schema and compiled accordingly. If False or if
content_key is None, content_key is used as a simple string.
Default is False.
metadata_func (Callable[Dict, Dict]): A function that takes in the JSON
object extracted by the jq_schema and the default metadata and returns
a dict of the updated metadata.
text_content (bool): Boolean flag to indicate whether the content is in
string format, default to True.
json_lines (bool): Boolean flag to indicate whether the input is in
JSON Lines format.
"""
try:
import jq
self.jq = jq
except ImportError:
raise ImportError(
"jq package not found, please install it with `pip install jq`"
)
self.file_path = Path(file_path).resolve()
self._jq_schema = jq.compile(jq_schema)
self._is_content_key_jq_parsable = is_content_key_jq_parsable
self._content_key = content_key
self._metadata_func = metadata_func
self._text_content = text_content
self._json_lines = json_lines
def lazy_load(self) -> Iterator[Document]:
"""Load and return documents from the JSON file."""
index = 0
if self._json_lines:
with self.file_path.open(encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
for doc in self._parse(line, index):
yield doc
index += 1
else:
for doc in self._parse(self.file_path.read_text(encoding="utf-8"), index):
yield doc
index += 1
def _parse(self, content: str, index: int) -> Iterator[Document]:
"""Convert given content to documents."""
data = self._jq_schema.input(json.loads(content))
# Perform some validation
# This is not a perfect validation, but it should catch most cases
# and prevent the user from getting a cryptic error later on.
if self._content_key is not None:
self._validate_content_key(data)
if self._metadata_func is not None:
self._validate_metadata_func(data)
for i, sample in enumerate(data, index + 1):
text = self._get_text(sample=sample)
metadata = self._get_metadata(
sample=sample, source=str(self.file_path), seq_num=i
)
yield Document(page_content=text, metadata=metadata)
def _get_text(self, sample: Any) -> str:
"""Convert sample to string format"""
if self._content_key is not None:
if self._is_content_key_jq_parsable:
compiled_content_key = self.jq.compile(self._content_key)
content = compiled_content_key.input(sample).first()
else:
content = sample[self._content_key]
else:
content = sample
if self._text_content and not isinstance(content, str):
raise ValueError(
f"Expected page_content is string, got {type(content)} instead. \
Set `text_content=False` if the desired input for \
`page_content` is not a string"
)
# In case the text is None, set it to an empty string
elif isinstance(content, str):
return content
elif isinstance(content, dict):
return json.dumps(content) if content else ""
else:
return str(content) if content is not None else ""
def _get_metadata(
self, sample: Dict[str, Any], **additional_fields: Any
) -> Dict[str, Any]:
"""
Return a metadata dictionary base on the existence of metadata_func
:param sample: single data payload
:param additional_fields: key-word arguments to be added as metadata values
:return:
"""
if self._metadata_func is not None:
return self._metadata_func(sample, additional_fields)
else:
return additional_fields
def _validate_content_key(self, data: Any) -> None:
"""Check if a content key is valid"""
sample = data.first()
if not isinstance(sample, dict):
raise ValueError(
f"Expected the jq schema to result in a list of objects (dict), \
so sample must be a dict but got `{type(sample)}`"
)
if (
not self._is_content_key_jq_parsable
and sample.get(self._content_key) is None
):
raise ValueError(
f"Expected the jq schema to result in a list of objects (dict) \
with the key `{self._content_key}`"
)
if (
self._is_content_key_jq_parsable
and self.jq.compile(self._content_key).input(sample).text() is None
):
raise ValueError(
f"Expected the jq schema to result in a list of objects (dict) \
with the key `{self._content_key}` which should be parsable by jq"
)
def _validate_metadata_func(self, data: Any) -> None:
"""Check if the metadata_func output is valid"""
sample = data.first()
if self._metadata_func is not None:
sample_metadata = self._metadata_func(sample, {})
if not isinstance(sample_metadata, dict):
raise ValueError(
f"Expected the metadata_func to return a dict but got \
`{type(sample_metadata)}`"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/obsidian.py | import functools
import logging
import re
from pathlib import Path
from typing import Any, Dict, Iterator, Pattern, Union
import yaml
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class ObsidianLoader(BaseLoader):
"""Load `Obsidian` files from directory."""
FRONT_MATTER_REGEX: Pattern = re.compile(r"^---\n(.*?)\n---\n", re.DOTALL)
TEMPLATE_VARIABLE_REGEX: Pattern = re.compile(r"{{(.*?)}}", re.DOTALL)
TAG_REGEX: Pattern = re.compile(r"[^\S\/]#([a-zA-Z_]+[-_/\w]*)")
DATAVIEW_LINE_REGEX: Pattern = re.compile(r"^\s*(\w+)::\s*(.*)$", re.MULTILINE)
DATAVIEW_INLINE_BRACKET_REGEX: Pattern = re.compile(
r"\[(\w+)::\s*(.*)\]", re.MULTILINE
)
DATAVIEW_INLINE_PAREN_REGEX: Pattern = re.compile(
r"\((\w+)::\s*(.*)\)", re.MULTILINE
)
def __init__(
self,
path: Union[str, Path],
encoding: str = "UTF-8",
collect_metadata: bool = True,
):
"""Initialize with a path.
Args:
path: Path to the directory containing the Obsidian files.
encoding: Charset encoding, defaults to "UTF-8"
collect_metadata: Whether to collect metadata from the front matter.
Defaults to True.
"""
self.file_path = path
self.encoding = encoding
self.collect_metadata = collect_metadata
def _replace_template_var(
self, placeholders: Dict[str, str], match: re.Match
) -> str:
"""Replace a template variable with a placeholder."""
placeholder = f"__TEMPLATE_VAR_{len(placeholders)}__"
placeholders[placeholder] = match.group(1)
return placeholder
def _restore_template_vars(self, obj: Any, placeholders: Dict[str, str]) -> Any:
"""Restore template variables replaced with placeholders to original values."""
if isinstance(obj, str):
for placeholder, value in placeholders.items():
obj = obj.replace(placeholder, f"{{{{{value}}}}}")
elif isinstance(obj, dict):
for key, value in obj.items():
obj[key] = self._restore_template_vars(value, placeholders)
elif isinstance(obj, list):
for i, item in enumerate(obj):
obj[i] = self._restore_template_vars(item, placeholders)
return obj
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
if not match:
return {}
placeholders: Dict[str, str] = {}
replace_template_var = functools.partial(
self._replace_template_var, placeholders
)
front_matter_text = self.TEMPLATE_VARIABLE_REGEX.sub(
replace_template_var, match.group(1)
)
try:
front_matter = yaml.safe_load(front_matter_text)
front_matter = self._restore_template_vars(front_matter, placeholders)
# If tags are a string, split them into a list
if "tags" in front_matter and isinstance(front_matter["tags"], str):
front_matter["tags"] = front_matter["tags"].split(", ")
return front_matter
except yaml.parser.ParserError:
logger.warning("Encountered non-yaml frontmatter")
return {}
def _to_langchain_compatible_metadata(self, metadata: dict) -> dict:
"""Convert a dictionary to a compatible with langchain."""
result = {}
for key, value in metadata.items():
if type(value) in {str, int, float}:
result[key] = value
else:
result[key] = str(value)
return result
def _parse_document_tags(self, content: str) -> set:
"""Return a set of all tags in within the document."""
if not self.collect_metadata:
return set()
match = self.TAG_REGEX.findall(content)
if not match:
return set()
return {tag for tag in match}
def _parse_dataview_fields(self, content: str) -> dict:
"""Parse obsidian dataview plugin fields from the content and return it
as a dict."""
if not self.collect_metadata:
return {}
return {
**{
match[0]: match[1]
for match in self.DATAVIEW_LINE_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_PAREN_REGEX.findall(content)
},
**{
match[0]: match[1]
for match in self.DATAVIEW_INLINE_BRACKET_REGEX.findall(content)
},
}
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def lazy_load(self) -> Iterator[Document]:
paths = list(Path(self.file_path).glob("**/*.md"))
for path in paths:
with open(path, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
tags = self._parse_document_tags(text)
dataview_fields = self._parse_dataview_fields(text)
text = self._remove_front_matter(text)
metadata = {
"source": str(path.name),
"path": str(path),
"created": path.stat().st_ctime,
"last_modified": path.stat().st_mtime,
"last_accessed": path.stat().st_atime,
**self._to_langchain_compatible_metadata(front_matter),
**dataview_fields,
}
if tags or front_matter.get("tags"):
metadata["tags"] = ",".join(
tags | set(front_matter.get("tags", []) or [])
)
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/s3_directory.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.s3_file import S3FileLoader
if TYPE_CHECKING:
import botocore
class S3DirectoryLoader(BaseLoader):
"""Load from `Amazon AWS S3` directory."""
def __init__(
self,
bucket: str,
prefix: str = "",
*,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: Optional[bool] = True,
verify: Union[str, bool, None] = None,
endpoint_url: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
boto_config: Optional[botocore.client.Config] = None,
):
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
self.bucket = bucket
self.prefix = prefix
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.resource(
"s3",
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
config=self.boto_config,
)
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
# Skip directories
if obj.size == 0 and obj.key.endswith("/"):
continue
loader = S3FileLoader(
self.bucket,
obj.key,
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
boto_config=self.boto_config,
)
docs.extend(loader.load())
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/max_compute.py | from __future__ import annotations
from typing import Any, Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.max_compute import MaxComputeAPIWrapper
class MaxComputeLoader(BaseLoader):
"""Load from `Alibaba Cloud MaxCompute` table."""
def __init__(
self,
query: str,
api_wrapper: MaxComputeAPIWrapper,
*,
page_content_columns: Optional[Sequence[str]] = None,
metadata_columns: Optional[Sequence[str]] = None,
):
"""Initialize Alibaba Cloud MaxCompute document loader.
Args:
query: SQL query to execute.
api_wrapper: MaxCompute API wrapper.
page_content_columns: The columns to write into the `page_content` of the
Document. If unspecified, all columns will be written to `page_content`.
metadata_columns: The columns to write into the `metadata` of the Document.
If unspecified, all columns not added to `page_content` will be written.
"""
self.query = query
self.api_wrapper = api_wrapper
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
@classmethod
def from_params(
cls,
query: str,
endpoint: str,
project: str,
*,
access_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
**kwargs: Any,
) -> MaxComputeLoader:
"""Convenience constructor that builds the MaxCompute API wrapper from
given parameters.
Args:
query: SQL query to execute.
endpoint: MaxCompute endpoint.
project: A project is a basic organizational unit of MaxCompute, which is
similar to a database.
access_id: MaxCompute access ID. Should be passed in directly or set as the
environment variable `MAX_COMPUTE_ACCESS_ID`.
secret_access_key: MaxCompute secret access key. Should be passed in
directly or set as the environment variable
`MAX_COMPUTE_SECRET_ACCESS_KEY`.
"""
api_wrapper = MaxComputeAPIWrapper.from_params(
endpoint, project, access_id=access_id, secret_access_key=secret_access_key
)
return cls(query, api_wrapper, **kwargs)
def lazy_load(self) -> Iterator[Document]:
for row in self.api_wrapper.query(self.query):
if self.page_content_columns:
page_content_data = {
k: v for k, v in row.items() if k in self.page_content_columns
}
else:
page_content_data = row
page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items())
if self.metadata_columns:
metadata = {k: v for k, v in row.items() if k in self.metadata_columns}
else:
metadata = {k: v for k, v in row.items() if k not in page_content_data}
yield Document(page_content=page_content, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/scrapingant.py | """ScrapingAnt Web Extractor."""
import logging
from typing import Iterator, List, Optional
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.utils import get_from_env
logger = logging.getLogger(__file__)
class ScrapingAntLoader(BaseLoader):
"""Turn an url to LLM accessible markdown with `ScrapingAnt`.
For further details, visit: https://docs.scrapingant.com/python-client
"""
def __init__(
self,
urls: List[str],
*,
api_key: Optional[str] = None,
scrape_config: Optional[dict] = None,
continue_on_failure: bool = True,
) -> None:
"""Initialize client.
Args:
urls: List of urls to scrape.
api_key: The ScrapingAnt API key. If not specified must have env var
SCRAPINGANT_API_KEY set.
scrape_config: The scraping config from ScrapingAntClient.markdown_request
continue_on_failure: Whether to continue if scraping an url fails.
"""
try:
from scrapingant_client import ScrapingAntClient
except ImportError:
raise ImportError(
"`scrapingant-client` package not found,"
" run `pip install scrapingant-client`"
)
if not urls:
raise ValueError("URLs must be provided.")
api_key = api_key or get_from_env("api_key", "SCRAPINGANT_API_KEY")
self.client = ScrapingAntClient(token=api_key)
self.urls = urls
self.scrape_config = scrape_config
self.continue_on_failure = continue_on_failure
def lazy_load(self) -> Iterator[Document]:
"""Fetch data from ScrapingAnt."""
scrape_config = self.scrape_config if self.scrape_config is not None else {}
for url in self.urls:
try:
result = self.client.markdown_request(url=url, **scrape_config)
yield Document(
page_content=result.markdown,
metadata={"url": result.url},
)
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/url_selenium.py | """Loader that uses Selenium to load a page, then uses unstructured to load the html."""
import logging
from typing import TYPE_CHECKING, List, Literal, Optional, Union
if TYPE_CHECKING:
from selenium.webdriver import Chrome, Firefox
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class SeleniumURLLoader(BaseLoader):
"""Load `HTML` pages with `Selenium` and parse with `Unstructured`.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
browser (str): The browser to use, either 'chrome' or 'firefox'.
binary_location (Optional[str]): The location of the browser binary.
executable_path (Optional[str]): The path to the browser executable.
headless (bool): If True, the browser will run in headless mode.
arguments [List[str]]: List of arguments to pass to the browser.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
browser: Literal["chrome", "firefox"] = "chrome",
binary_location: Optional[str] = None,
executable_path: Optional[str] = None,
headless: bool = True,
arguments: List[str] = [],
):
"""Load a list of URLs using Selenium and unstructured."""
try:
import selenium # noqa:F401
except ImportError:
raise ImportError(
"selenium package not found, please install it with "
"`pip install selenium`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.browser = browser
self.binary_location = binary_location
self.executable_path = executable_path
self.headless = headless
self.arguments = arguments
def _get_driver(self) -> Union["Chrome", "Firefox"]:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == "chrome":
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(
options=chrome_options,
service=Service(executable_path=self.executable_path),
)
elif self.browser.lower() == "firefox":
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument("--headless")
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(
options=firefox_options,
service=Service(executable_path=self.executable_path),
)
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
def _build_metadata(self, url: str, driver: Union["Chrome", "Firefox"]) -> dict:
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
"""Build metadata based on the contents of the webpage"""
metadata = {
"source": url,
"title": "No title found.",
"description": "No description found.",
"language": "No language found.",
}
if title := driver.title:
metadata["title"] = title
try:
if description := driver.find_element(
By.XPATH, '//meta[@name="description"]'
):
metadata["description"] = (
description.get_attribute("content") or "No description found."
)
except NoSuchElementException:
pass
try:
if html_tag := driver.find_element(By.TAG_NAME, "html"):
metadata["language"] = (
html_tag.get_attribute("lang") or "No language found."
)
except NoSuchElementException:
pass
return metadata
def load(self) -> List[Document]:
"""Load the specified URLs using Selenium and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
driver = self._get_driver()
for url in self.urls:
try:
driver.get(url)
page_content = driver.page_source
elements = partition_html(text=page_content)
text = "\n\n".join([str(el) for el in elements])
metadata = self._build_metadata(url, driver)
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
driver.quit()
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/mintbase.py | import json
import os
import re
import time
from typing import Iterator, List, Literal, Optional
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class MintbaseDocumentLoader(BaseLoader):
"""Load elements from a blockchain smart contract.
The supported blockchains are: Near mainnet, Near testnet.
If no BlockchainType is specified, the default is Near mainnet.
The Loader uses the Mintbase API to interact with the blockchain.
MB_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Mintbase APIs (e.g. getTokens, etc.)
Example:
.. code-block:: python
contractAddress = "nft.yearofchef.near" # Year of chef contract address
blockchainLoader = MintbaseDocumentLoader(
contract_address=contractAddress, blockchain_type="mainnet",api_key="omni-site"
)
""" # noqa: E501
def __init__(
self,
contract_address: str,
*,
blockchain_type: Literal["mainnet", "testnet"],
api_key: str = "",
table: str = "",
select: str = "",
fields: Optional[List[str]] = None,
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Mintbase API key.
table: name of the table to query
select: Conditions for querying
fields: Information to display after query
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchain_type
self.api_key = os.environ.get("MB_API_KEY") or api_key
self.table = "mb_views_nft_tokens" or table
self.select = 'where: {nft_contract_id: {_eq: "contract_address"}}' or select
self.fields = fields or [
"base_uri",
"burned_receipt_id",
"burned_timestamp",
"copies",
"description",
"expires_at",
"extra",
"issued_at",
"last_transfer_receipt_id",
"last_transfer_timestamp",
"media",
"media_hash",
"metadata_content_flag",
"metadata_id",
"mint_memo",
"minted_receipt_id",
"minted_timestamp",
"minter",
"nft_contract_content_flag",
"nft_contract_created_at",
"nft_contract_icon",
"nft_contract_id",
"nft_contract_is_mintbase",
"nft_contract_name",
"nft_contract_owner_id",
"nft_contract_reference",
"nft_contract_spec",
"nft_contract_symbol",
"owner",
"reference",
"reference_blob",
"reference_hash",
"royalties",
"royalties_percent",
"splits",
"starts_at",
"title",
"token_id",
"updated_at",
]
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Mintbase API key not provided.")
if not re.match(
r"^(([a-z\d]+[\-_])*[a-z\d]+\.)*([a-z\d]+[\-_])*[a-z\d]+$",
self.contract_address,
):
raise ValueError(f"Invalid contract address {self.contract_address}")
def load(self) -> List[Document]:
result = []
start_time = time.time()
while True:
# Define the GraphQL query as a multi-line string
operations_doc = """
query MyQuery {
table(select) {
fields
}
}
"""
# Replace the placeholder with the actual contract address
operations_doc = operations_doc.replace("select", self.select)
operations_doc = operations_doc.replace(
"contract_address", self.contract_address
)
operations_doc = operations_doc.replace("table", self.table)
operations_doc = operations_doc.replace("fields", "\n".join(self.fields))
# Define the headers
headers = {"mb-api-key": self.api_key, "Content-Type": "application/json"}
# Define the POST data
data = {
"query": operations_doc,
"variables": {},
"operationName": "MyQuery",
}
url = f"https://graph.mintbase.xyz/{self.blockchainType}"
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["data"]["mb_views_nft_tokens"]
if not items:
break
for item in items:
content = str(item)
token_id = item["token_id"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": token_id,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
def lazy_load(self) -> Iterator[Document]:
start_time = time.time()
while True:
# Define the GraphQL query as a multi-line string
operations_doc = """
query MyQuery {
table(select) {
fields
}
}
"""
# Replace the placeholder with the actual contract address
operations_doc = operations_doc.replace("select", self.select)
operations_doc = operations_doc.replace(
"contract_address", self.contract_address
)
operations_doc = operations_doc.replace("table", self.table)
operations_doc = operations_doc.replace("fields", "\n".join(self.fields))
# Define the headers
headers = {"mb-api-key": self.api_key, "Content-Type": "application/json"}
# Define the POST data
data = {
"query": operations_doc,
"variables": {},
"operationName": "MyQuery",
}
url = f"https://graph.mintbase.xyz/{self.blockchainType}"
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["data"]["mb_views_nft_tokens"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["token_id"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
yield Document(page_content=content, metadata=metadata)
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/figma.py | import json
import urllib.request
from typing import Any, List
from langchain_core.documents import Document
from langchain_core.utils import stringify_dict
from langchain_community.document_loaders.base import BaseLoader
class FigmaFileLoader(BaseLoader):
"""Load `Figma` file."""
def __init__(self, access_token: str, ids: str, key: str):
"""Initialize with access token, ids, and key.
Args:
access_token: The access token for the Figma REST API.
ids: The ids of the Figma file.
key: The key for the Figma file
"""
self.access_token = access_token
self.ids = ids
self.key = key
def _construct_figma_api_url(self) -> str:
api_url = "https://api.figma.com/v1/files/%s/nodes?ids=%s" % (
self.key,
self.ids,
)
return api_url
def _get_figma_file(self) -> Any:
"""Get Figma file from Figma REST API."""
headers = {"X-Figma-Token": self.access_token}
request = urllib.request.Request(
self._construct_figma_api_url(), headers=headers
)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
def load(self) -> List[Document]:
"""Load file"""
data = self._get_figma_file()
text = stringify_dict(data)
metadata = {"source": self._construct_figma_api_url()}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/excel.py | """Loads Microsoft Excel files."""
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredExcelLoader(UnstructuredFileLoader):
"""Load Microsoft Excel files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredExcelLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, each sheet in the Excel file will be an Unstructured Table
element. If you use the loader in "single" mode, an
HTML representation of the table will be available in the
"text_as_html" key in the document metadata.
Examples
--------
from langchain_community.document_loaders.excel import UnstructuredExcelLoader
loader = UnstructuredExcelLoader("stanley-cups.xlsx", mode="elements")
docs = loader.load()
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the Microsoft Excel file.
mode: The mode to use when partitioning the file. See unstructured docs
for more info. Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xlsx import partition_xlsx
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/fauna.py | from typing import Iterator, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class FaunaLoader(BaseLoader):
"""Load from `FaunaDB`.
Attributes:
query (str): The FQL query string to execute.
page_content_field (str): The field that contains the content of each page.
secret (str): The secret key for authenticating to FaunaDB.
metadata_fields (Optional[Sequence[str]]):
Optional list of field names to include in metadata.
"""
def __init__(
self,
query: str,
page_content_field: str,
secret: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
def lazy_load(self) -> Iterator[Document]:
try:
from fauna import Page, fql
from fauna.client import Client
from fauna.encoding import QuerySuccess
except ImportError:
raise ImportError(
"Could not import fauna python package. "
"Please install it with `pip install fauna`."
)
# Create Fauna Client
client = Client(secret=self.secret)
# Run FQL Query
response: QuerySuccess = client.query(fql(self.query))
page: Page = response.data
for result in page:
if result is not None:
document_dict = dict(result.items())
page_content = ""
for key, value in document_dict.items():
if key == self.page_content_field:
page_content = value
document: Document = Document(
page_content=page_content,
metadata={"id": result.id, "ts": result.ts},
)
yield document
if page.after is not None:
yield Document(
page_content="Next Page Exists",
metadata={"after": page.after},
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/facebook_chat.py | import datetime
import json
from pathlib import Path
from typing import Iterator, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
"""
sender = row["sender_name"]
text = row["content"]
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{sender} on {date}: {text}\n\n"
class FacebookChatLoader(BaseLoader):
"""Load `Facebook Chat` messages directory dump."""
def __init__(self, path: Union[str, Path]):
"""Initialize with a path."""
self.file_path = path
def lazy_load(self) -> Iterator[Document]:
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message.get("content") and isinstance(message["content"], str)
)
metadata = {"source": str(p)}
yield Document(page_content=text, metadata=metadata)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/llmsherpa.py | from pathlib import Path
from typing import Iterator, Union
from urllib.parse import urlparse
from langchain_core.documents import Document
from langchain_community.document_loaders.pdf import BaseLoader
DEFAULT_API = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
class LLMSherpaFileLoader(BaseLoader):
"""Load Documents using `LLMSherpa`.
LLMSherpaFileLoader use LayoutPDFReader, which is part of the LLMSherpa library.
This tool is designed to parse PDFs while preserving their layout information,
which is often lost when using most PDF to text parsers.
Examples
--------
from langchain_community.document_loaders.llmsherpa import LLMSherpaFileLoader
loader = LLMSherpaFileLoader(
"example.pdf",
strategy="chunks",
llmsherpa_api_url="http://localhost:5010/api/parseDocument?renderFormat=all",
)
docs = loader.load()
"""
def __init__(
self,
file_path: Union[str, Path],
new_indent_parser: bool = True,
apply_ocr: bool = True,
strategy: str = "chunks",
llmsherpa_api_url: str = DEFAULT_API,
):
"""Initialize with a file path."""
try:
import llmsherpa # noqa:F401
except ImportError:
raise ImportError(
"llmsherpa package not found, please install it with "
"`pip install llmsherpa`"
)
_valid_strategies = ["sections", "chunks", "html", "text"]
if strategy not in _valid_strategies:
raise ValueError(
f"Got {strategy} for `strategy`, "
f"but should be one of `{_valid_strategies}`"
)
# validate llmsherpa url
if not self._is_valid_url(llmsherpa_api_url):
raise ValueError(f"Invalid URL: {llmsherpa_api_url}")
self.url = self._validate_llmsherpa_url(
url=llmsherpa_api_url,
new_indent_parser=new_indent_parser,
apply_ocr=apply_ocr,
)
self.strategy = strategy
self.file_path = str(file_path)
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
@staticmethod
def _validate_llmsherpa_url(
url: str, new_indent_parser: bool = True, apply_ocr: bool = True
) -> str:
"""Check if the llmsherpa url is valid."""
parsed = urlparse(url)
valid_url = url
if ("/api/parseDocument" not in parsed.path) and (
"/api/document/developer/parseDocument" not in parsed.path
):
raise ValueError(f"Invalid LLMSherpa URL: {url}")
if "renderFormat=all" not in parsed.query:
valid_url = valid_url + "?renderFormat=all"
if new_indent_parser and "useNewIndentParser=true" not in parsed.query:
valid_url = valid_url + "&useNewIndentParser=true"
if apply_ocr and "applyOcr=yes" not in parsed.query:
valid_url = valid_url + "&applyOcr=yes"
return valid_url
def lazy_load(
self,
) -> Iterator[Document]:
"""Load file."""
from llmsherpa.readers import LayoutPDFReader
docs_reader = LayoutPDFReader(self.url)
doc = docs_reader.read_pdf(self.file_path)
if self.strategy == "sections":
yield from [
Document(
page_content=section.to_text(include_children=True, recurse=True),
metadata={
"source": self.file_path,
"section_number": section_num,
"section_title": section.title,
},
)
for section_num, section in enumerate(doc.sections())
]
if self.strategy == "chunks":
yield from [
Document(
page_content=chunk.to_context_text(),
metadata={
"source": self.file_path,
"chunk_number": chunk_num,
"chunk_type": chunk.tag,
},
)
for chunk_num, chunk in enumerate(doc.chunks())
]
if self.strategy == "html":
yield from [
Document(
page_content=doc.to_html(),
metadata={
"source": self.file_path,
},
)
]
if self.strategy == "text":
yield from [
Document(
page_content=doc.to_text(),
metadata={
"source": self.file_path,
},
)
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/pdf.py | import json
import logging
import os
import re
import tempfile
import time
from abc import ABC
from io import StringIO
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Union,
)
from urllib.parse import urlparse
import requests
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.dedoc import DedocBaseLoader
from langchain_community.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
if TYPE_CHECKING:
from textractor.data.text_linearization_config import TextLinearizationConfig
logger = logging.getLogger(__file__)
class UnstructuredPDFLoader(UnstructuredFileLoader):
"""Load `PDF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredPDFLoader
loader = UnstructuredPDFLoader(
"example.pdf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-pdf
"""
def _get_elements(self) -> List:
from unstructured.partition.pdf import partition_pdf
return partition_pdf(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
class BasePDFLoader(BaseLoader, ABC):
"""Base Loader class for `PDF` files.
If the file is a web path, it will download it to a temporary file, use it, then
clean up the temporary file after completion.
"""
def __init__(self, file_path: Union[str, Path], *, headers: Optional[Dict] = None):
"""Initialize with a file path.
Args:
file_path: Either a local, S3 or web path to a PDF file.
headers: Headers to use for GET request to download a file from a web path.
"""
self.file_path = str(file_path)
self.web_path = None
self.headers = headers
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path or S3, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
self.temp_dir = tempfile.TemporaryDirectory()
_, suffix = os.path.splitext(self.file_path)
if self._is_s3_presigned_url(self.file_path):
suffix = urlparse(self.file_path).path.split("/")[-1]
temp_pdf = os.path.join(self.temp_dir.name, f"tmp{suffix}")
self.web_path = self.file_path
if not self._is_s3_url(self.file_path):
r = requests.get(self.file_path, headers=self.headers)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
with open(temp_pdf, mode="wb") as f:
f.write(r.content)
self.file_path = str(temp_pdf)
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_dir"):
self.temp_dir.cleanup()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
@staticmethod
def _is_s3_url(url: str) -> bool:
"""check if the url is S3"""
try:
result = urlparse(url)
if result.scheme == "s3" and result.netloc:
return True
return False
except ValueError:
return False
@staticmethod
def _is_s3_presigned_url(url: str) -> bool:
"""Check if the url is a presigned S3 url."""
try:
result = urlparse(url)
return bool(re.search(r"\.s3\.amazonaws\.com$", result.netloc))
except ValueError:
return False
@property
def source(self) -> str:
return self.web_path if self.web_path is not None else self.file_path
class OnlinePDFLoader(BasePDFLoader):
"""Load online `PDF`."""
def load(self) -> List[Document]:
"""Load documents."""
loader = UnstructuredPDFLoader(str(self.file_path))
return loader.load()
class PyPDFLoader(BasePDFLoader):
"""
PyPDFLoader document loader integration
Setup:
Install ``langchain-community``.
.. code-block:: bash
pip install -U langchain-community
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(
file_path = "./example_data/layout-parser-paper.pdf",
password = "my-password",
extract_images = True,
# headers = None
# extraction_mode = "plain",
# extraction_kwargs = None,
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
LayoutParser : A Unified Toolkit for Deep
Learning Based Document Image Analysis
Zejiang Shen1( ), R
{'source': './example_data/layout-parser-paper.pdf', 'page': 0}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
LayoutParser : A Unified Toolkit for Deep
Learning Based Document Image Analysis
Zejiang Shen1( ), R
{'source': './example_data/layout-parser-paper.pdf', 'page': 0}
""" # noqa: E501
def __init__(
self,
file_path: str,
password: Optional[Union[str, bytes]] = None,
headers: Optional[Dict] = None,
extract_images: bool = False,
*,
extraction_mode: str = "plain",
extraction_kwargs: Optional[Dict] = None,
) -> None:
"""Initialize with a file path."""
try:
import pypdf # noqa:F401
except ImportError:
raise ImportError(
"pypdf package not found, please install it with `pip install pypdf`"
)
super().__init__(file_path, headers=headers)
self.parser = PyPDFParser(
password=password,
extract_images=extract_images,
extraction_mode=extraction_mode,
extraction_kwargs=extraction_kwargs,
)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path) # type: ignore[attr-defined]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from self.parser.parse(blob)
class PyPDFium2Loader(BasePDFLoader):
"""Load `PDF` using `pypdfium2` and chunks at character level."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
):
"""Initialize with a file path."""
super().__init__(file_path, headers=headers)
self.parser = PyPDFium2Parser(extract_images=extract_images)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path) # type: ignore[attr-defined]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from self.parser.parse(blob)
class PyPDFDirectoryLoader(BaseLoader):
"""Load a directory with `PDF` files using `pypdf` and chunks at character level.
Loader also stores page numbers in metadata.
"""
def __init__(
self,
path: Union[str, Path],
glob: str = "**/[!.]*.pdf",
silent_errors: bool = False,
load_hidden: bool = False,
recursive: bool = False,
extract_images: bool = False,
):
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.recursive = recursive
self.silent_errors = silent_errors
self.extract_images = extract_images
@staticmethod
def _is_visible(path: Path) -> bool:
return not any(part.startswith(".") for part in path.parts)
def load(self) -> List[Document]:
p = Path(self.path)
docs = []
items = p.rglob(self.glob) if self.recursive else p.glob(self.glob)
for i in items:
if i.is_file():
if self._is_visible(i.relative_to(p)) or self.load_hidden:
try:
loader = PyPDFLoader(str(i), extract_images=self.extract_images)
sub_docs = loader.load()
for doc in sub_docs:
doc.metadata["source"] = str(i)
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
return docs
class PDFMinerLoader(BasePDFLoader):
"""Load `PDF` files using `PDFMiner`."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
concatenate_pages: bool = True,
) -> None:
"""Initialize with file path.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
try:
from pdfminer.high_level import extract_text # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path, headers=headers)
self.parser = PDFMinerParser(
extract_images=extract_images, concatenate_pages=concatenate_pages
)
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load documents."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path) # type: ignore[attr-defined]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from self.parser.parse(blob)
class PDFMinerPDFasHTMLLoader(BasePDFLoader):
"""Load `PDF` files as HTML content using `PDFMiner`."""
def __init__(self, file_path: str, *, headers: Optional[Dict] = None):
"""Initialize with a file path."""
try:
from pdfminer.high_level import extract_text_to_fp # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path, headers=headers)
def lazy_load(self) -> Iterator[Document]:
"""Load file."""
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
from pdfminer.utils import open_filename
output_string = StringIO()
with open_filename(self.file_path, "rb") as fp:
extract_text_to_fp(
fp,
output_string,
codec="",
laparams=LAParams(),
output_type="html",
)
metadata = {
"source": self.file_path if self.web_path is None else self.web_path
}
yield Document(page_content=output_string.getvalue(), metadata=metadata)
class PyMuPDFLoader(BasePDFLoader):
"""Load `PDF` files using `PyMuPDF`."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
**kwargs: Any,
) -> None:
"""Initialize with a file path."""
try:
import fitz # noqa:F401
except ImportError:
raise ImportError(
"`PyMuPDF` package not found, please install it with "
"`pip install pymupdf`"
)
super().__init__(file_path, headers=headers)
self.extract_images = extract_images
self.text_kwargs = kwargs
def _lazy_load(self, **kwargs: Any) -> Iterator[Document]:
if kwargs:
logger.warning(
f"Received runtime arguments {kwargs}. Passing runtime args to `load`"
f" is deprecated. Please pass arguments during initialization instead."
)
text_kwargs = {**self.text_kwargs, **kwargs}
parser = PyMuPDFParser(
text_kwargs=text_kwargs, extract_images=self.extract_images
)
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path) # type: ignore[attr-defined]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from parser.lazy_parse(blob)
def load(self, **kwargs: Any) -> List[Document]:
return list(self._lazy_load(**kwargs))
def lazy_load(self) -> Iterator[Document]:
yield from self._lazy_load()
# MathpixPDFLoader implementation taken largely from Daniel Gross's:
# https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21
class MathpixPDFLoader(BasePDFLoader):
"""Load `PDF` files using `Mathpix` service."""
def __init__(
self,
file_path: str,
processed_file_format: str = "md",
max_wait_time_seconds: int = 500,
should_clean_pdf: bool = False,
extra_request_data: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Initialize with a file path.
Args:
file_path: a file for loading.
processed_file_format: a format of the processed file. Default is "md".
max_wait_time_seconds: a maximum time to wait for the response from
the server. Default is 500.
should_clean_pdf: a flag to clean the PDF file. Default is False.
extra_request_data: Additional request data.
**kwargs: additional keyword arguments.
"""
self.mathpix_api_key = get_from_dict_or_env(
kwargs, "mathpix_api_key", "MATHPIX_API_KEY"
)
self.mathpix_api_id = get_from_dict_or_env(
kwargs, "mathpix_api_id", "MATHPIX_API_ID"
)
# The base class isn't expecting these and doesn't collect **kwargs
kwargs.pop("mathpix_api_key", None)
kwargs.pop("mathpix_api_id", None)
super().__init__(file_path, **kwargs)
self.processed_file_format = processed_file_format
self.extra_request_data = (
extra_request_data if extra_request_data is not None else {}
)
self.max_wait_time_seconds = max_wait_time_seconds
self.should_clean_pdf = should_clean_pdf
@property
def _mathpix_headers(self) -> Dict[str, str]:
return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key}
@property
def url(self) -> str:
return "https://api.mathpix.com/v3/pdf"
@property
def data(self) -> dict:
options = {
"conversion_formats": {self.processed_file_format: True},
**self.extra_request_data,
}
return {"options_json": json.dumps(options)}
def send_pdf(self) -> str:
with open(self.file_path, "rb") as f:
files = {"file": f}
response = requests.post(
self.url, headers=self._mathpix_headers, files=files, data=self.data
)
response_data = response.json()
if "error" in response_data:
raise ValueError(f"Mathpix request failed: {response_data['error']}")
if "pdf_id" in response_data:
pdf_id = response_data["pdf_id"]
return pdf_id
else:
raise ValueError("Unable to send PDF to Mathpix.")
def wait_for_processing(self, pdf_id: str) -> None:
"""Wait for processing to complete.
Args:
pdf_id: a PDF id.
Returns: None
"""
url = self.url + "/" + pdf_id
for _ in range(0, self.max_wait_time_seconds, 5):
response = requests.get(url, headers=self._mathpix_headers)
response_data = response.json()
# This indicates an error with the request (e.g. auth problems)
error = response_data.get("error", None)
error_info = response_data.get("error_info", None)
if error is not None:
error_msg = f"Unable to retrieve PDF from Mathpix: {error}"
if error_info is not None:
error_msg += f" ({error_info['id']})"
raise ValueError(error_msg)
status = response_data.get("status", None)
if status == "completed":
return
elif status == "error":
# This indicates an error with the PDF processing
raise ValueError("Unable to retrieve PDF from Mathpix")
else:
print(f"Status: {status}, waiting for processing to complete") # noqa: T201
time.sleep(5)
raise TimeoutError
def get_processed_pdf(self, pdf_id: str) -> str:
self.wait_for_processing(pdf_id)
url = f"{self.url}/{pdf_id}.{self.processed_file_format}"
response = requests.get(url, headers=self._mathpix_headers)
return response.content.decode("utf-8")
@staticmethod
def clean_pdf(contents: str) -> str:
"""Clean the PDF file.
Args:
contents: a PDF file contents.
Returns:
"""
contents = "\n".join(
[line for line in contents.split("\n") if not line.startswith("![]")]
)
# replace \section{Title} with # Title
contents = contents.replace("\\section{", "# ").replace("}", "")
# replace the "\" slash that Mathpix adds to escape $, %, (, etc.
contents = (
contents.replace(r"\$", "$")
.replace(r"\%", "%")
.replace(r"\(", "(")
.replace(r"\)", ")")
)
return contents
def load(self) -> List[Document]:
pdf_id = self.send_pdf()
contents = self.get_processed_pdf(pdf_id)
if self.should_clean_pdf:
contents = self.clean_pdf(contents)
metadata = {"source": self.source, "file_path": self.source, "pdf_id": pdf_id}
return [Document(page_content=contents, metadata=metadata)]
class PDFPlumberLoader(BasePDFLoader):
"""Load `PDF` files using `pdfplumber`."""
def __init__(
self,
file_path: str,
text_kwargs: Optional[Mapping[str, Any]] = None,
dedupe: bool = False,
headers: Optional[Dict] = None,
extract_images: bool = False,
) -> None:
"""Initialize with a file path."""
try:
import pdfplumber # noqa:F401
except ImportError:
raise ImportError(
"pdfplumber package not found, please install it with "
"`pip install pdfplumber`"
)
super().__init__(file_path, headers=headers)
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images
def load(self) -> List[Document]:
"""Load file."""
parser = PDFPlumberParser(
text_kwargs=self.text_kwargs,
dedupe=self.dedupe,
extract_images=self.extract_images,
)
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path) # type: ignore[attr-defined]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
return parser.parse(blob)
class AmazonTextractPDFLoader(BasePDFLoader):
"""Load `PDF` files from a local file system, HTTP or S3.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Amazon Textract service.
Example:
.. code-block:: python
from langchain_community.document_loaders import AmazonTextractPDFLoader
loader = AmazonTextractPDFLoader(
file_path="s3://pdfs/myfile.pdf"
)
document = loader.load()
"""
def __init__(
self,
file_path: str,
textract_features: Optional[Sequence[str]] = None,
client: Optional[Any] = None,
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
endpoint_url: Optional[str] = None,
headers: Optional[Dict] = None,
*,
linearization_config: Optional["TextLinearizationConfig"] = None,
) -> None:
"""Initialize the loader.
Args:
file_path: A file, url or s3 path for input file
textract_features: Features to be used for extraction, each feature
should be passed as a str that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client (Optional)
credentials_profile_name: AWS profile name, if not default (Optional)
region_name: AWS region, eg us-east-1 (Optional)
endpoint_url: endpoint url for the textract service (Optional)
linearization_config: Config to be used for linearization of the output
should be an instance of TextLinearizationConfig from
the `textractor` pkg
"""
super().__init__(file_path, headers=headers)
try:
import textractcaller as tc
except ImportError:
raise ImportError(
"Could not import amazon-textract-caller python package. "
"Please install it with `pip install amazon-textract-caller`."
)
if textract_features:
features = [tc.Textract_Features[x] for x in textract_features]
else:
features = []
if credentials_profile_name or region_name or endpoint_url:
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name:
client_params["region_name"] = region_name
if endpoint_url:
client_params["endpoint_url"] = endpoint_url
client = session.client("textract", **client_params)
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
f"profile name are valid. {e}"
) from e
self.parser = AmazonTextractPDFParser(
textract_features=features,
client=client,
linearization_config=linearization_config,
)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load documents"""
# the self.file_path is local, but the blob has to include
# the S3 location if the file originated from S3 for multi-page documents
# raises ValueError when multi-page and not on S3"""
if self.web_path and self._is_s3_url(self.web_path):
blob = Blob(path=self.web_path) # type: ignore[call-arg] # type: ignore[misc]
else:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
raise ValueError(
f"the file {blob.path} is a multi-page document, \
but not stored on S3. \
Textract requires multi-page documents to be on S3."
)
yield from self.parser.parse(blob)
@staticmethod
def _get_number_of_pages(blob: Blob) -> int: # type: ignore[valid-type]
try:
import pypdf
from PIL import Image, ImageSequence
except ImportError:
raise ImportError(
"Could not import pypdf or Pilloe python package. "
"Please install it with `pip install pypdf Pillow`."
)
if blob.mimetype == "application/pdf": # type: ignore[attr-defined]
with blob.as_bytes_io() as input_pdf_file: # type: ignore[attr-defined]
pdf_reader = pypdf.PdfReader(input_pdf_file)
return len(pdf_reader.pages)
elif blob.mimetype == "image/tiff": # type: ignore[attr-defined]
num_pages = 0
img = Image.open(blob.as_bytes()) # type: ignore[attr-defined]
for _, _ in enumerate(ImageSequence.Iterator(img)):
num_pages += 1
return num_pages
elif blob.mimetype in ["image/png", "image/jpeg"]: # type: ignore[attr-defined]
return 1
else:
raise ValueError(f"unsupported mime type: {blob.mimetype}") # type: ignore[attr-defined]
class DedocPDFLoader(DedocBaseLoader):
"""
DedocPDFLoader document loader integration to load PDF files using `dedoc`.
The file loader can automatically detect the correctness of a textual layer in the
PDF document.
Note that `__init__` method supports parameters that differ from ones of
DedocBaseLoader.
Setup:
Install ``dedoc`` package.
.. code-block:: bash
pip install -U dedoc
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import DedocPDFLoader
loader = DedocPDFLoader(
file_path="example.pdf",
# split=...,
# with_tables=...,
# pdf_with_text_layer=...,
# pages=...,
# ...
)
Load:
.. code-block:: python
docs = loader.load()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Some text
{
'file_name': 'example.pdf',
'file_type': 'application/pdf',
# ...
}
Parameters used for document parsing via `dedoc`
(https://dedoc.readthedocs.io/en/latest/parameters/pdf_handling.html):
with_attachments: enable attached files extraction
recursion_deep_attachments: recursion level for attached files extraction,
works only when with_attachments==True
pdf_with_text_layer: type of handler for parsing, available options
["true", "false", "tabby", "auto", "auto_tabby" (default)]
language: language of the document for PDF without a textual layer,
available options ["eng", "rus", "rus+eng" (default)], the list of
languages can be extended, please see
https://dedoc.readthedocs.io/en/latest/tutorials/add_new_language.html
pages: page slice to define the reading range for parsing
is_one_column_document: detect number of columns for PDF without a textual
layer, available options ["true", "false", "auto" (default)]
document_orientation: fix document orientation (90, 180, 270 degrees) for PDF
without a textual layer, available options ["auto" (default), "no_change"]
need_header_footer_analysis: remove headers and footers from the output result
need_binarization: clean pages background (binarize) for PDF without a textual
layer
need_pdf_table_analysis: parse tables for PDF without a textual layer
"""
def _make_config(self) -> dict:
from dedoc.utils.langchain import make_manager_pdf_config
return make_manager_pdf_config(
file_path=self.file_path,
parsing_params=self.parsing_parameters,
split=self.split,
)
class DocumentIntelligenceLoader(BasePDFLoader):
"""Load a PDF with Azure Document Intelligence"""
def __init__(
self,
file_path: str,
client: Any,
model: str = "prebuilt-document",
headers: Optional[Dict] = None,
) -> None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a DocumentIntelligenceParser object to be used
for parsing files using the Azure Document Intelligence API. The load method
generates a Document node including metadata (source blob and page number)
for each page.
Parameters:
-----------
file_path : str
The path to the file that needs to be parsed.
client: Any
A DocumentAnalysisClient to perform the analysis of the blob
model : str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = DocumentIntelligenceLoader(
... file_path="path/to/file",
... client=client,
... model="prebuilt-document"
... )
"""
self.parser = DocumentIntelligenceParser(client=client, model=model)
super().__init__(file_path, headers=headers)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
yield from self.parser.parse(blob)
class ZeroxPDFLoader(BasePDFLoader):
"""
Document loader utilizing Zerox library:
https://github.com/getomni-ai/zerox
Zerox converts PDF document to serties of images (page-wise) and
uses vision-capable LLM model to generate Markdown representation.
Zerox utilizes anyc operations. Therefore when using this loader
inside Jupyter Notebook (or any environment running async)
you will need to:
```python
import nest_asyncio
nest_asyncio.apply()
```
"""
def __init__(
self,
file_path: Union[str, Path],
model: str = "gpt-4o-mini",
**zerox_kwargs: Any,
) -> None:
super().__init__(file_path=file_path)
"""
Initialize the parser with arguments to be passed to the zerox function.
Make sure to set necessary environmnet variables such as API key, endpoint, etc.
Check zerox documentation for list of necessary environment variables for
any given model.
Args:
file_path:
Path or url of the pdf file
model:
Vision capable model to use. Defaults to "gpt-4o-mini".
Hosted models are passed in format "<provider>/<model>"
Examples: "azure/gpt-4o-mini", "vertex_ai/gemini-1.5-flash-001"
See more details in zerox documentation.
**zerox_kwargs:
Arguments specific to the zerox function.
see datailed list of arguments here in zerox repository:
https://github.com/getomni-ai/zerox/blob/main/py_zerox/pyzerox/core/zerox.py#L25
""" # noqa: E501
self.zerox_kwargs = zerox_kwargs
self.model = model
def lazy_load(self) -> Iterator[Document]:
"""
Loads documnts from pdf utilizing zerox library:
https://github.com/getomni-ai/zerox
Returns:
Iterator[Document]: An iterator over parsed Document instances.
"""
import asyncio
from pyzerox import zerox
# Directly call asyncio.run to execute zerox synchronously
zerox_output = asyncio.run(
zerox(file_path=self.file_path, model=self.model, **self.zerox_kwargs)
)
# Convert zerox output to Document instances and yield them
if len(zerox_output.pages) > 0:
num_pages = zerox_output.pages[-1].page
for page in zerox_output.pages:
yield Document(
page_content=page.content,
metadata={
"source": self.source,
"page": page.page,
"num_pages": num_pages,
},
)
# Legacy: only for backwards compatibility. Use PyPDFLoader instead
PagedPDFSplitter = PyPDFLoader
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blackboard.py | import contextlib
import re
from pathlib import Path
from typing import Any, List, Optional, Tuple
from urllib.parse import unquote
from langchain_core.documents import Document
from langchain_community.document_loaders.directory import DirectoryLoader
from langchain_community.document_loaders.pdf import PyPDFLoader
from langchain_community.document_loaders.web_base import WebBaseLoader
class BlackboardLoader(WebBaseLoader):
"""Load a `Blackboard` course.
This loader is not compatible with all Blackboard courses. It is only
compatible with courses that use the new Blackboard interface.
To use this loader, you must have the BbRouter cookie. You can get this
cookie by logging into the course and then copying the value of the
BbRouter cookie from the browser's developer tools.
Example:
.. code-block:: python
from langchain_community.document_loaders import BlackboardLoader
loader = BlackboardLoader(
blackboard_course_url="https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1",
bbrouter="expires:12345...",
)
documents = loader.load()
"""
def __init__(
self,
blackboard_course_url: str,
bbrouter: str,
load_all_recursively: bool = True,
basic_auth: Optional[Tuple[str, str]] = None,
cookies: Optional[dict] = None,
continue_on_failure: bool = False,
show_progress: bool = True,
):
"""Initialize with blackboard course url.
The BbRouter cookie is required for most blackboard courses.
Args:
blackboard_course_url: Blackboard course url.
bbrouter: BbRouter cookie.
load_all_recursively: If True, load all documents recursively.
basic_auth: Basic auth credentials.
cookies: Cookies.
continue_on_failure: whether to continue loading the sitemap if an error
occurs loading a url, emitting a warning instead of raising an
exception. Setting this to True makes the loader more robust, but also
may result in missing data. Default: False
show_progress: whether to show a progress bar while loading. Default: True
Raises:
ValueError: If blackboard course url is invalid.
"""
super().__init__(
web_paths=(blackboard_course_url),
continue_on_failure=continue_on_failure,
show_progress=show_progress,
)
# Get base url
try:
self.base_url = blackboard_course_url.split("/webapps/blackboard")[0]
except IndexError:
raise IndexError(
"Invalid blackboard course url. "
"Please provide a url that starts with "
"https://<blackboard_url>/webapps/blackboard"
)
if basic_auth is not None:
self.session.auth = basic_auth
# Combine cookies
if cookies is None:
cookies = {}
cookies.update({"BbRouter": bbrouter})
self.session.cookies.update(cookies)
self.load_all_recursively = load_all_recursively
self.check_bs4()
def check_bs4(self) -> None:
"""Check if BeautifulSoup4 is installed.
Raises:
ImportError: If BeautifulSoup4 is not installed.
"""
try:
import bs4 # noqa: F401
except ImportError:
raise ImportError(
"BeautifulSoup4 is required for BlackboardLoader. "
"Please install it with `pip install beautifulsoup4`."
)
def load(self) -> List[Document]:
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f"Fetching documents from {url}") # noqa: T201
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f"Fetching documents from {self.web_path}") # noqa: T201
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
def _get_folder_path(self, soup: Any) -> str:
"""Get the folder path to save the Documents in.
Args:
soup: BeautifulSoup4 soup object.
Returns:
Folder path.
"""
# Get the course name
course_name = soup.find("span", {"id": "crumb_1"})
if course_name is None:
raise ValueError("No course name found.")
course_name = course_name.text.strip()
# Prepare the folder path
course_name_clean = (
unquote(course_name)
.replace(" ", "_")
.replace("/", "_")
.replace(":", "_")
.replace(",", "_")
.replace("?", "_")
.replace("'", "_")
.replace("!", "_")
.replace('"', "_")
)
# Get the folder path
folder_path = Path(".") / course_name_clean
return str(folder_path)
def _get_documents(self, soup: Any) -> List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
def _get_attachments(self, soup: Any) -> List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
# Get content list
content_list: BeautifulSoup
content_list = soup.find("ul", {"class": "contentList"})
if content_list is None:
raise ValueError("No content list found.")
# Get all attachments
attachments = []
attachment: Tag
for attachment in content_list.find_all("ul", {"class": "attachments"}):
link: Tag
for link in attachment.find_all("a"):
href = link.get("href")
# Only add if href is not None and does not start with #
if href is not None and not href.startswith("#"):
attachments.append(href)
return attachments
def _download_attachments(self, attachments: List[str]) -> None:
"""Download all attachments.
Args:
attachments: List of attachments.
"""
# Make sure the folder exists
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
# Download all attachments
for attachment in attachments:
self.download(attachment)
def _load_documents(self) -> List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
# Create the document loader
loader = DirectoryLoader(
path=self.folder_path,
glob="*.pdf",
loader_cls=PyPDFLoader, # type: ignore
)
# Load the documents
documents = loader.load()
# Return all documents
return documents
def _get_paths(self, soup: Any) -> List[str]:
"""Get all relative paths in the navbar."""
relative_paths = []
course_menu = soup.find("ul", {"class": "courseMenu"})
if course_menu is None:
raise ValueError("No course menu found.")
for link in course_menu.find_all("a"):
href = link.get("href")
if href is not None and href.startswith("/"):
relative_paths.append(href)
return relative_paths
def download(self, path: str) -> None:
"""Download a file from an url.
Args:
path: Path to the file.
"""
# Get the file content
response = self.session.get(self.base_url + path, allow_redirects=True)
# Get the filename
filename = self.parse_filename(response.url)
# Write the file to disk
with open(Path(self.folder_path) / filename, "wb") as f:
f.write(response.content)
def parse_filename(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == ".pdf":
return url_path.name
else:
return self._parse_filename_from_url(url)
def _parse_filename_from_url(self, url: str) -> str:
"""Parse the filename from an url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
Raises:
ValueError: If the filename could not be parsed.
"""
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
if filename_matches:
filename = filename_matches.group(1)
else:
raise ValueError(f"Could not parse filename from {url}")
if ".pdf" not in filename:
raise ValueError(f"Incorrect file type: {filename}")
filename = filename.split(".pdf")[0] + ".pdf"
filename = unquote(filename)
filename = filename.replace("%20", " ")
return filename
if __name__ == "__main__":
loader = BlackboardLoader(
"https://<YOUR BLACKBOARD URL"
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
"<YOUR BBROUTER COOKIE HERE>",
load_all_recursively=True,
)
documents = loader.load()
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}") # noqa: T201
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/azlyrics.py | from typing import List
from langchain_core.documents import Document
from langchain_community.document_loaders.web_base import WebBaseLoader
class AZLyricsLoader(WebBaseLoader):
"""Load `AZLyrics` webpages."""
def load(self) -> List[Document]:
"""Load webpages into Documents."""
soup = self.scrape()
title = soup.title.text
lyrics = soup.find_all("div", {"class": ""})[2].text
text = title + lyrics
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/needle.py | from typing import Dict, Iterator, List, Optional
from langchain_core.document_loaders.base import BaseLoader
from langchain_core.documents import Document
class NeedleLoader(BaseLoader):
"""
NeedleLoader is a document loader for managing documents stored in a collection.
Setup:
Install the `needle-python` library and set your Needle API key.
.. code-block:: bash
pip install needle-python
export NEEDLE_API_KEY="your-api-key"
Key init args:
- `needle_api_key` (Optional[str]): API key for authenticating with Needle.
- `collection_id` (str): Needle collection to load documents from.
Usage:
.. code-block:: python
from langchain_community.document_loaders.needle import NeedleLoader
loader = NeedleLoader(
needle_api_key="your-api-key",
collection_id="your-collection-id"
)
# Load documents
documents = loader.load()
for doc in documents:
print(doc.metadata)
# Lazy load documents
for doc in loader.lazy_load():
print(doc.metadata)
"""
def __init__(
self,
needle_api_key: Optional[str] = None,
collection_id: Optional[str] = None,
) -> None:
"""
Initializes the NeedleLoader with API key and collection ID.
Args:
needle_api_key (Optional[str]): API key for authenticating with Needle.
collection_id (Optional[str]): Identifier for the Needle collection.
Raises:
ImportError: If the `needle-python` library is not installed.
ValueError: If the collection ID is not provided.
"""
try:
from needle.v1 import NeedleClient
except ImportError:
raise ImportError(
"Please install with `pip install needle-python` to use NeedleLoader."
)
super().__init__()
self.needle_api_key = needle_api_key
self.collection_id = collection_id
self.client: Optional[NeedleClient] = None
if self.needle_api_key:
self.client = NeedleClient(api_key=self.needle_api_key)
if not self.collection_id:
raise ValueError("Collection ID must be provided.")
def _get_collection(self) -> None:
"""
Ensures the Needle collection is set and the client is initialized.
Raises:
ValueError: If the Needle client is not initialized or
if the collection ID is missing.
"""
if self.client is None:
raise ValueError(
"NeedleClient is not initialized. Provide a valid API key."
)
if not self.collection_id:
raise ValueError("Collection ID must be provided.")
def add_files(self, files: Dict[str, str]) -> None:
"""
Adds files to the Needle collection.
Args:
files (Dict[str, str]): Dictionary where keys are file names and values
are file URLs.
Raises:
ImportError: If the `needle-python` library is not installed.
ValueError: If the collection is not properly initialized.
"""
try:
from needle.v1.models import FileToAdd
except ImportError:
raise ImportError(
"Please install with `pip install needle-python` to add files."
)
self._get_collection()
assert self.client is not None, "NeedleClient must be initialized."
files_to_add = [FileToAdd(name=name, url=url) for name, url in files.items()]
self.client.collections.files.add(
collection_id=self.collection_id, files=files_to_add
)
def _fetch_documents(self) -> List[Document]:
"""
Fetches metadata for documents from the Needle collection.
Returns:
List[Document]: A list of documents with metadata. Content is excluded.
Raises:
ValueError: If the collection is not properly initialized.
"""
self._get_collection()
assert self.client is not None, "NeedleClient must be initialized."
files = self.client.collections.files.list(self.collection_id)
docs = [
Document(
page_content="", # Needle doesn't provide file content fetching
metadata={
"source": file.url,
"title": file.name,
"size": getattr(file, "size", None),
},
)
for file in files
if file.status == "indexed"
]
return docs
def load(self) -> List[Document]:
"""
Loads all documents from the Needle collection.
Returns:
List[Document]: A list of documents from the collection.
"""
return self._fetch_documents()
def lazy_load(self) -> Iterator[Document]:
"""
Lazily loads documents from the Needle collection.
Yields:
Iterator[Document]: An iterator over the documents.
"""
yield from self._fetch_documents()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/onedrive_file.py | from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, List
from langchain_core.documents import Document
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader
if TYPE_CHECKING:
from O365.drive import File
CHUNK_SIZE = 1024 * 1024 * 5
class OneDriveFileLoader(BaseLoader, BaseModel):
"""Load a file from `Microsoft OneDrive`."""
file: File = Field(...)
"""The file to load."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def load(self) -> List[Document]:
"""Load Documents"""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.file.name}"
self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE)
loader = UnstructuredFileLoader(file_path)
return loader.load()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/notion.py | from pathlib import Path
from typing import List, Union
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class NotionDirectoryLoader(BaseLoader):
"""Load `Notion directory` dump."""
def __init__(self, path: Union[str, Path], *, encoding: str = "utf-8") -> None:
"""Initialize with a file path."""
self.file_path = path
self.encoding = encoding
def load(self) -> List[Document]:
"""Load documents."""
paths = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in paths:
with open(p, encoding=self.encoding) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/mongodb.py | import asyncio
import logging
from typing import Dict, List, Optional, Sequence
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MongodbLoader(BaseLoader):
"""Load MongoDB documents."""
def __init__(
self,
connection_string: str,
db_name: str,
collection_name: str,
*,
filter_criteria: Optional[Dict] = None,
field_names: Optional[Sequence[str]] = None,
metadata_names: Optional[Sequence[str]] = None,
include_db_collection_in_metadata: bool = True,
) -> None:
"""
Initializes the MongoDB loader with necessary database connection
details and configurations.
Args:
connection_string (str): MongoDB connection URI.
db_name (str):Name of the database to connect to.
collection_name (str): Name of the collection to fetch documents from.
filter_criteria (Optional[Dict]): MongoDB filter criteria for querying
documents.
field_names (Optional[Sequence[str]]): List of field names to retrieve
from documents.
metadata_names (Optional[Sequence[str]]): Additional metadata fields to
extract from documents.
include_db_collection_in_metadata (bool): Flag to include database and
collection names in metadata.
Raises:
ImportError: If the motor library is not installed.
ValueError: If any necessary argument is missing.
"""
try:
from motor.motor_asyncio import AsyncIOMotorClient
except ImportError as e:
raise ImportError(
"Cannot import from motor, please install with `pip install motor`."
) from e
if not connection_string:
raise ValueError("connection_string must be provided.")
if not db_name:
raise ValueError("db_name must be provided.")
if not collection_name:
raise ValueError("collection_name must be provided.")
self.client = AsyncIOMotorClient(connection_string)
self.db_name = db_name
self.collection_name = collection_name
self.field_names = field_names or []
self.filter_criteria = filter_criteria or {}
self.metadata_names = metadata_names or []
self.include_db_collection_in_metadata = include_db_collection_in_metadata
self.db = self.client.get_database(db_name)
self.collection = self.db.get_collection(collection_name)
def load(self) -> List[Document]:
"""Load data into Document objects.
Attention:
This implementation starts an asyncio event loop which
will only work if running in a sync env. In an async env, it should
fail since there is already an event loop running.
This code should be updated to kick off the event loop from a separate
thread if running within an async context.
"""
return asyncio.run(self.aload())
async def aload(self) -> List[Document]:
"""Asynchronously loads data into Document objects."""
result = []
total_docs = await self.collection.count_documents(self.filter_criteria)
projection = self._construct_projection()
async for doc in self.collection.find(self.filter_criteria, projection):
metadata = self._extract_fields(doc, self.metadata_names, default="")
# Optionally add database and collection names to metadata
if self.include_db_collection_in_metadata:
metadata.update(
{"database": self.db_name, "collection": self.collection_name}
)
# Extract text content from filtered fields or use the entire document
if self.field_names is not None:
fields = self._extract_fields(doc, self.field_names, default="")
texts = [str(value) for value in fields.values()]
text = " ".join(texts)
else:
text = str(doc)
result.append(Document(page_content=text, metadata=metadata))
if len(result) != total_docs:
logger.warning(
f"Only partial collection of documents returned. "
f"Loaded {len(result)} docs, expected {total_docs}."
)
return result
def _construct_projection(self) -> Optional[Dict]:
"""Constructs the projection dictionary for MongoDB query based
on the specified field names and metadata names."""
field_names = list(self.field_names) or []
metadata_names = list(self.metadata_names) or []
all_fields = field_names + metadata_names
return {field: 1 for field in all_fields} if all_fields else None
def _extract_fields(
self,
document: Dict,
fields: Sequence[str],
default: str = "",
) -> Dict:
"""Extracts and returns values for specified fields from a document."""
extracted = {}
for field in fields or []:
value = document
for key in field.split("."):
value = value.get(key, default)
if value == default:
break
new_field_name = field.replace(".", "_")
extracted[new_field_name] = value
return extracted
|
0 | lc_public_repos/langchain/libs/community/langchain_community/document_loaders | lc_public_repos/langchain/libs/community/langchain_community/document_loaders/blob_loaders/file_system.py | """Use to load blobs from the local file system."""
from pathlib import Path
from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union
from langchain_community.document_loaders.blob_loaders.schema import Blob, BlobLoader
T = TypeVar("T")
def _make_iterator(
length_func: Callable[[], int], show_progress: bool = False
) -> Callable[[Iterable[T]], Iterator[T]]:
"""Create a function that optionally wraps an iterable in tqdm."""
iterator: Callable[[Iterable[T]], Iterator[T]]
if show_progress:
try:
from tqdm.auto import tqdm
except ImportError:
raise ImportError(
"You must install tqdm to use show_progress=True."
"You can install tqdm with `pip install tqdm`."
)
# Make sure to provide `total` here so that tqdm can show
# a progress bar that takes into account the total number of files.
def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]:
"""Wrap an iterable in a tqdm progress bar."""
return tqdm(iterable, total=length_func())
iterator = _with_tqdm
else:
iterator = iter
return iterator
# PUBLIC API
class FileSystemBlobLoader(BlobLoader):
"""Load blobs in the local file system.
Example:
.. code-block:: python
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
loader = FileSystemBlobLoader("/path/to/directory")
for blob in loader.yield_blobs():
print(blob) # noqa: T201
""" # noqa: E501
def __init__(
self,
path: Union[str, Path],
*,
glob: str = "**/[!.]*",
exclude: Sequence[str] = (),
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
) -> None:
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory to load from or path to file to load.
If a path to a file is provided, glob/exclude/suffixes are ignored.
glob: Glob pattern relative to the specified path
by default set to pick up all non-hidden files
exclude: patterns to exclude from results, use glob syntax
suffixes: Provide to keep only files with these suffixes
Useful when wanting to keep files with different suffixes
Suffixes must include the dot, e.g. ".txt"
show_progress: If true, will show a progress bar as the files are loaded.
This forces an iteration through all matching files
to count them prior to loading them.
Examples:
.. code-block:: python
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
# Load a single file.
loader = FileSystemBlobLoader("/path/to/file.txt")
# Recursively load all text files in a directory.
loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt")
# Recursively load all non-hidden files in a directory.
loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*")
# Load all files in a directory without recursion.
loader = FileSystemBlobLoader("/path/to/directory", glob="*")
# Recursively load all files in a directory, except for py or pyc files.
loader = FileSystemBlobLoader(
"/path/to/directory",
glob="**/*.txt",
exclude=["**/*.py", "**/*.pyc"]
)
""" # noqa: E501
if isinstance(path, Path):
_path = path
elif isinstance(path, str):
_path = Path(path)
else:
raise TypeError(f"Expected str or Path, got {type(path)}")
self.path = _path.expanduser() # Expand user to handle ~
self.glob = glob
self.suffixes = set(suffixes or [])
self.show_progress = show_progress
self.exclude = exclude
def yield_blobs(
self,
) -> Iterable[Blob]:
"""Yield blobs that match the requested pattern."""
iterator = _make_iterator(
length_func=self.count_matching_files, show_progress=self.show_progress
)
for path in iterator(self._yield_paths()):
yield Blob.from_path(path)
def _yield_paths(self) -> Iterable[Path]:
"""Yield paths that match the requested pattern."""
if self.path.is_file():
yield self.path
return
paths = self.path.glob(self.glob)
for path in paths:
if self.exclude:
if any(path.match(glob) for glob in self.exclude):
continue
if path.is_file():
if self.suffixes and path.suffix not in self.suffixes:
continue
yield path
def count_matching_files(self) -> int:
"""Count files that match the pattern without loading them."""
# Carry out a full iteration to count the files without
# materializing anything expensive in memory.
num = 0
for _ in self._yield_paths():
num += 1
return num
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.