id
stringlengths 14
15
| text
stringlengths 49
2.47k
| source
stringlengths 61
166
|
|---|---|---|
365a40336446-0
|
Source code for langchain.document_loaders.googledrive
"""Loads data from Google Drive."""
# Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
[docs]class GoogleDriveLoader(BaseLoader, BaseModel):
"""Loads Google Docs from Google Drive."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
"""Path to the service account key file."""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
"""Path to the credentials file."""
token_path: Path = Path.home() / ".credentials" / "token.json"
"""Path to the token file."""
folder_id: Optional[str] = None
"""The folder id to load from."""
document_ids: Optional[List[str]] = None
"""The document ids to load from."""
file_ids: Optional[List[str]] = None
"""The file ids to load from."""
recursive: bool = False
"""Whether to load recursively. Only applies when folder_id is given."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-1
|
"""Whether to load recursively. Only applies when folder_id is given."""
file_types: Optional[Sequence[str]] = None
"""The file types to load. Only applies when folder_id is given."""
load_trashed_files: bool = False
"""Whether to load trashed files. Only applies when folder_id is given."""
# NOTE(MthwRobinson) - changing the file_loader_cls to type here currently
# results in pydantic validation errors
file_loader_cls: Any = None
"""The file loader class to use."""
file_loader_kwargs: Dict["str", Any] = {}
"""The file loader kwargs to use."""
@root_validator
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
file_types = values.get("file_types")
if file_types:
if values.get("document_ids") or values.get("file_ids"):
raise ValueError(
"file_types can only be given when folder_id is given,"
" (not when document_ids or file_ids are given)."
)
type_mapping = {
"document": "application/vnd.google-apps.document",
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-2
|
type_mapping = {
"document": "application/vnd.google-apps.document",
"sheet": "application/vnd.google-apps.spreadsheet",
"pdf": "application/pdf",
}
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()])
full_names = ", ".join([f"'{x}'" for x in type_mapping.values()])
for file_type in file_types:
if file_type not in allowed_types:
raise ValueError(
f"Given file type {file_type} is not supported. "
f"Supported values are: {short_names}; and "
f"their full-form names: {full_names}"
)
# replace short-form file types by full-form file types
def full_form(x: str) -> str:
return type_mapping[x] if x in type_mapping else x
values["file_types"] = [full_form(file_type) for file_type in file_types]
return values
@validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth import default
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-3
|
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run "
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib` "
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
creds, project = default()
creds = creds.with_scopes(SCOPES)
# no need to write to file
if creds:
return creds
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-4
|
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-5
|
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(
self, folder_id: str, *, file_types: Optional[Sequence[str]] = None
) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
# If file types filter is provided, we'll filter by the file type.
if file_types:
_files = [f for f in files if f["mimeType"] in file_types] # type: ignore
else:
_files = files
returns = []
for file in _files:
if file["trashed"] and not self.load_trashed_files:
continue
elif file["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(file["id"])) # type: ignore
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-6
|
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore
elif (
file["mimeType"] == "application/pdf"
or self.file_loader_cls is not None
):
returns.extend(self._load_file_from_id(file["id"])) # type: ignore
else:
pass
return returns
def _fetch_files_recursive(
self, service: Any, folder_id: str
) -> List[Dict[str, Union[str, List[str]]]]:
"""Fetch all files and subfolders recursively."""
results = (
service.files()
.list(
q=f"'{folder_id}' in parents",
pageSize=1000,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields="nextPageToken, files(id, name, mimeType, parents, trashed)",
)
.execute()
)
files = results.get("files", [])
returns = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
if self.recursive:
returns.extend(self._fetch_files_recursive(service, file["id"]))
else:
returns.append(file)
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-7
|
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if self.file_loader_cls is not None:
fh.seek(0)
loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs)
docs = loader.load()
for doc in docs:
doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view"
return docs
else:
from PyPDF2 import PdfReader
content = fh.getvalue()
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
365a40336446-8
|
docs.extend(self._load_file_from_id(file_id))
return docs
[docs] def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder(
self.folder_id, file_types=self.file_types
)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
16bd9e50cd74-0
|
Source code for langchain.document_loaders.blockchain
import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class BlockchainType(Enum):
"""Enumerator of the supported blockchains."""
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
[docs]class BlockchainDocumentLoader(BaseLoader):
"""Loads elements from a blockchain smart contract into Langchain documents.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockain APIs (e.g. Infura, Opensea, etc.)
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
16bd9e50cd74-1
|
"""
[docs] def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
[docs] def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
16bd9e50cd74-2
|
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
16bd9e50cd74-3
|
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
9bfa200fb9ad-0
|
Source code for langchain.document_loaders.news
"""Loader that uses unstructured to load HTML files."""
import logging
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class NewsURLLoader(BaseLoader):
"""Loader that uses newspaper to load news articles from URLs.
Args:
urls: URLs to load. Each is loaded into its own document.
text_mode: If True, extract text from URL and use that for page content.
Otherwise, extract raw HTML.
nlp: If True, perform NLP on the extracted contents, like providing a summary
and extracting keywords.
continue_on_failure: If True, continue loading documents even if
loading fails for a particular URL.
show_progress_bar: If True, use tqdm to show a loading progress bar. Requires
tqdm to be installed, ``pip install tqdm``.
**newspaper_kwargs: Any additional named arguments to pass to
newspaper.Article().
Example:
.. code-block:: python
from langchain.document_loaders import NewsURLLoader
loader = NewsURLLoader(
urls=["<url-1>", "<url-2>"],
)
docs = loader.load()
Newspaper reference:
https://newspaper.readthedocs.io/en/latest/
"""
[docs] def __init__(
self,
urls: List[str],
text_mode: bool = True,
nlp: bool = False,
continue_on_failure: bool = True,
show_progress_bar: bool = False,
**newspaper_kwargs: Any,
) -> None:
"""Initialize with file path."""
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/news.html
|
9bfa200fb9ad-1
|
) -> None:
"""Initialize with file path."""
try:
import newspaper # noqa:F401
self.__version = newspaper.__version__
except ImportError:
raise ImportError(
"newspaper package not found, please install it with "
"`pip install newspaper3k`"
)
self.urls = urls
self.text_mode = text_mode
self.nlp = nlp
self.continue_on_failure = continue_on_failure
self.newspaper_kwargs = newspaper_kwargs
self.show_progress_bar = show_progress_bar
[docs] def load(self) -> List[Document]:
iter = self.lazy_load()
if self.show_progress_bar:
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Package tqdm must be installed if show_progress_bar=True. "
"Please install with 'pip install tqdm' or set "
"show_progress_bar=False."
) from e
iter = tqdm(iter)
return list(iter)
[docs] def lazy_load(self) -> Iterator[Document]:
try:
from newspaper import Article
except ImportError as e:
raise ImportError(
"Cannot import newspaper, please install with `pip install newspaper3k`"
) from e
for url in self.urls:
try:
article = Article(url, **self.newspaper_kwargs)
article.download()
article.parse()
if self.nlp:
article.nlp()
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
continue
else:
raise e
metadata = {
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/news.html
|
9bfa200fb9ad-2
|
continue
else:
raise e
metadata = {
"title": getattr(article, "title", ""),
"link": getattr(article, "url", getattr(article, "canonical_link", "")),
"authors": getattr(article, "authors", []),
"language": getattr(article, "meta_lang", ""),
"description": getattr(article, "meta_description", ""),
"publish_date": getattr(article, "publish_date", ""),
}
if self.text_mode:
content = article.text
else:
content = article.html
if self.nlp:
metadata["keywords"] = getattr(article, "keywords", [])
metadata["summary"] = getattr(article, "summary", "")
yield Document(page_content=content, metadata=metadata)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/news.html
|
eed5fef15e79-0
|
Source code for langchain.document_loaders.pyspark_dataframe
"""Load from a Spark Dataframe object"""
import itertools
import logging
import sys
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
from pyspark.sql import SparkSession
[docs]class PySparkDataFrameLoader(BaseLoader):
"""Load PySpark DataFrames"""
[docs] def __init__(
self,
spark_session: Optional["SparkSession"] = None,
df: Optional[Any] = None,
page_content_column: str = "text",
fraction_of_memory: float = 0.1,
):
"""Initialize with a Spark DataFrame object.
Args:
spark_session: The SparkSession object.
df: The Spark DataFrame object.
page_content_column: The name of the column containing the page content.
Defaults to "text".
fraction_of_memory: The fraction of memory to use. Defaults to 0.1.
"""
try:
from pyspark.sql import DataFrame, SparkSession
except ImportError:
raise ImportError(
"pyspark is not installed. "
"Please install it with `pip install pyspark`"
)
self.spark = (
spark_session if spark_session else SparkSession.builder.getOrCreate()
)
if not isinstance(df, DataFrame):
raise ValueError(
f"Expected data_frame to be a PySpark DataFrame, got {type(df)}"
)
self.df = df
self.page_content_column = page_content_column
self.fraction_of_memory = fraction_of_memory
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
|
eed5fef15e79-1
|
self.fraction_of_memory = fraction_of_memory
self.num_rows, self.max_num_rows = self.get_num_rows()
self.rdd_df = self.df.rdd.map(list)
self.column_names = self.df.columns
[docs] def get_num_rows(self) -> Tuple[int, int]:
"""Gets the number of "feasible" rows for the DataFrame"""
try:
import psutil
except ImportError as e:
raise ImportError(
"psutil not installed. Please install it with `pip install psutil`."
) from e
row = self.df.limit(1).collect()[0]
estimated_row_size = sys.getsizeof(row)
mem_info = psutil.virtual_memory()
available_memory = mem_info.available
max_num_rows = int(
(available_memory / estimated_row_size) * self.fraction_of_memory
)
return min(max_num_rows, self.df.count()), max_num_rows
[docs] def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for document content."""
for row in self.rdd_df.toLocalIterator():
metadata = {self.column_names[i]: row[i] for i in range(len(row))}
text = metadata[self.page_content_column]
metadata.pop(self.page_content_column)
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load from the dataframe."""
if self.df.count() > self.max_num_rows:
logger.warning(
f"The number of DataFrame rows is {self.df.count()}, "
f"but we will only include the amount "
f"of rows that can reasonably fit in memory: {self.num_rows}."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
|
eed5fef15e79-2
|
)
lazy_load_iterator = self.lazy_load()
return list(itertools.islice(lazy_load_iterator, self.num_rows))
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
|
dc7acb147068-0
|
Source code for langchain.document_loaders.rocksetdb
from typing import Any, Callable, Iterator, List, Optional, Tuple
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
[docs]def default_joiner(docs: List[Tuple[str, Any]]) -> str:
"""Default joiner for content columns."""
return "\n".join([doc[1] for doc in docs])
[docs]class ColumnNotFoundError(Exception):
"""Column not found error."""
def __init__(self, missing_key: str, query: str):
super().__init__(f'Column "{missing_key}" not selected in query:\n{query}')
[docs]class RocksetLoader(BaseLoader):
"""Wrapper around Rockset db
To use, you should have the `rockset` python package installed.
Example:
.. code-block:: python
# This code will load 3 records from the "langchain_demo"
# collection as Documents, with the `text` column used as
# the content
from langchain.document_loaders import RocksetLoader
from rockset import RocksetClient, Regions, models
loader = RocksetLoader(
RocksetClient(Regions.usw2a1, "<api key>"),
models.QueryRequestSql(
query="select * from langchain_demo limit 3"
),
["text"]
)
)
"""
[docs] def __init__(
self,
client: Any,
query: Any,
content_keys: List[str],
metadata_keys: Optional[List[str]] = None,
content_columns_joiner: Callable[[List[Tuple[str, Any]]], str] = default_joiner,
):
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rocksetdb.html
|
dc7acb147068-1
|
):
"""Initialize with Rockset client.
Args:
client: Rockset client object.
query: Rockset query object.
content_keys: The collection columns to be written into the `page_content`
of the Documents.
metadata_keys: The collection columns to be written into the `metadata` of
the Documents. By default, this is all the keys in the document.
content_columns_joiner: Method that joins content_keys and its values into a
string. It's method that takes in a List[Tuple[str, Any]]],
representing a list of tuples of (column name, column value).
By default, this is a method that joins each column value with a new
line. This method is only relevant if there are multiple content_keys.
"""
try:
from rockset import QueryPaginator, RocksetClient
from rockset.models import QueryRequestSql
except ImportError:
raise ImportError(
"Could not import rockset client python package. "
"Please install it with `pip install rockset`."
)
if not isinstance(client, RocksetClient):
raise ValueError(
f"client should be an instance of rockset.RocksetClient, "
f"got {type(client)}"
)
if not isinstance(query, QueryRequestSql):
raise ValueError(
f"query should be an instance of rockset.model.QueryRequestSql, "
f"got {type(query)}"
)
self.client = client
self.query = query
self.content_keys = content_keys
self.content_columns_joiner = content_columns_joiner
self.metadata_keys = metadata_keys
self.paginator = QueryPaginator
self.request_model = QueryRequestSql
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rocksetdb.html
|
dc7acb147068-2
|
self.paginator = QueryPaginator
self.request_model = QueryRequestSql
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
query_results = self.client.Queries.query(
sql=self.query
).results # execute the SQL query
for doc in query_results: # for each doc in the response
try:
yield Document(
page_content=self.content_columns_joiner(
[(col, doc[col]) for col in self.content_keys]
),
metadata={col: doc[col] for col in self.metadata_keys}
if self.metadata_keys is not None
else doc,
) # try to yield the Document
except (
KeyError
) as e: # either content_columns or metadata_columns is invalid
raise ColumnNotFoundError(
e.args[0], self.query
) # raise that the column isn't in the db schema
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rocksetdb.html
|
95a4315057fb-0
|
Source code for langchain.document_loaders.odt
"""Loads OpenOffice ODT files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredODTLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load OpenOffice ODT files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredODTLoader
loader = UnstructuredODTLoader(
"example.odt", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-odt
"""
[docs] def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.3")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
|
95a4315057fb-1
|
"""
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
|
4ed3dc34e3cf-0
|
Source code for langchain.document_loaders.spreedly
"""Loader that fetches data from Spreedly API."""
import json
import urllib.request
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import stringify_dict
SPREEDLY_ENDPOINTS = {
"gateways_options": "https://core.spreedly.com/v1/gateways_options.json",
"gateways": "https://core.spreedly.com/v1/gateways.json",
"receivers_options": "https://core.spreedly.com/v1/receivers_options.json",
"receivers": "https://core.spreedly.com/v1/receivers.json",
"payment_methods": "https://core.spreedly.com/v1/payment_methods.json",
"certificates": "https://core.spreedly.com/v1/certificates.json",
"transactions": "https://core.spreedly.com/v1/transactions.json",
"environments": "https://core.spreedly.com/v1/environments.json",
}
[docs]class SpreedlyLoader(BaseLoader):
"""Loader that fetches data from Spreedly API."""
[docs] def __init__(self, access_token: str, resource: str) -> None:
"""Initialize with an access token and a resource.
Args:
access_token: The access token.
resource: The resource.
"""
self.access_token = access_token
self.resource = resource
self.headers = {
"Authorization": f"Bearer {self.access_token}",
"Accept": "application/json",
}
def _make_request(self, url: str) -> List[Document]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/spreedly.html
|
4ed3dc34e3cf-1
|
}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = SPREEDLY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
return self._get_resource()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/spreedly.html
|
b277cd8106f6-0
|
Source code for langchain.document_loaders.concurrent
from __future__ import annotations
import concurrent.futures
from pathlib import Path
from typing import Iterator, Literal, Optional, Sequence, Union
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers.registry import get_parser
from langchain.schema import Document
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
[docs]class ConcurrentLoader(GenericLoader):
"""
A generic document loader that loads and parses documents concurrently.
"""
[docs] def __init__(
self, blob_loader: BlobLoader, blob_parser: BaseBlobParser, num_workers: int = 4
) -> None:
super().__init__(blob_loader, blob_parser)
self.num_workers = num_workers
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers
) as executor:
futures = {
executor.submit(self.blob_parser.lazy_parse, blob)
for blob in self.blob_loader.yield_blobs()
}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
[docs] @classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
num_workers: int = 4,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/concurrent.html
|
b277cd8106f6-1
|
num_workers: int = 4,
) -> ConcurrentLoader:
"""
Create a concurrent generic document loader using a
filesystem blob loader.
"""
blob_loader = FileSystemBlobLoader(
path, glob=glob, suffixes=suffixes, show_progress=show_progress
)
if isinstance(parser, str):
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/concurrent.html
|
6c551eeaf2ce-0
|
Source code for langchain.document_loaders.html_bs
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Dict, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class BSHTMLLoader(BaseLoader):
"""Loader that uses beautiful soup to parse HTML files."""
[docs] def __init__(
self,
file_path: str,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""Initialise with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object.
Args:
file_path: The path to the file to load.
open_encoding: The encoding to use when opening the file.
bs_kwargs: Any kwargs to pass to the BeautifulSoup object.
get_text_separator: The separator to use when calling get_text on the soup.
"""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
[docs] def load(self) -> List[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html
|
6c551eeaf2ce-1
|
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with open(self.file_path, "r", encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html
|
4e2421f26a1a-0
|
Source code for langchain.document_loaders.rst
"""Loads RST files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredRSTLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load RST files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredRSTLoader
loader = UnstructuredRSTLoader(
"example.rst", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-rst
"""
[docs] def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Initialize with a file path.
Args:
file_path: The path to the file to load.
mode: The mode to use for partitioning. See unstructured for details.
Defaults to "single".
**unstructured_kwargs: Additional keyword arguments to pass
to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.7.5")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rst.html
|
4e2421f26a1a-1
|
"""
validate_unstructured_version(min_unstructured_version="0.7.5")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.rst import partition_rst
return partition_rst(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rst.html
|
66075a72188d-0
|
Source code for langchain.document_loaders.acreom
"""Loads acreom vault from a directory."""
import re
from pathlib import Path
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class AcreomLoader(BaseLoader):
"""Loader that loads acreom vault from a directory."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL)
"""Regex to match front matter metadata in markdown files."""
[docs] def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
self.file_path = path
"""Path to the directory containing the markdown files."""
self.encoding = encoding
"""Encoding to use when reading the files."""
self.collect_metadata = collect_metadata
"""Whether to collect metadata from the front matter."""
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split("\n")
for line in lines:
if ":" in line:
key, value = line.split(":", 1)
front_matter[key.strip()] = value.strip()
else:
# Skip lines without a colon
continue
return front_matter
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/acreom.html
|
66075a72188d-1
|
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def _process_acreom_content(self, content: str) -> str:
# remove acreom specific elements from content that
# do not contribute to the context of current document
content = re.sub("\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks
content = re.sub("#", "", content) # rm hashtags
content = re.sub("\[\[.*?\]\]", "", content) # rm doclinks
return content
[docs] def lazy_load(self) -> Iterator[Document]:
ps = list(Path(self.file_path).glob("**/*.md"))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {
"source": str(p.name),
"path": str(p),
**front_matter,
}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/acreom.html
|
8a107bb6afcf-0
|
Source code for langchain.document_loaders.word_document
"""Loads word documents."""
import os
import tempfile
from abc import ABC
from typing import List
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class Docx2txtLoader(BaseLoader, ABC):
"""Loads a DOCX with docx2txt and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
[docs] def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
8a107bb6afcf-1
|
if hasattr(self, "temp_file"):
self.temp_file.close()
[docs] def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
[docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load word documents.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
8a107bb6afcf-2
|
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
|
97abf29fa7a0-0
|
Source code for langchain.document_loaders.github
from abc import ABC
from datetime import datetime
from typing import Dict, Iterator, List, Literal, Optional, Union
import requests
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_dict_or_env
[docs]class BaseGitHubLoader(BaseLoader, BaseModel, ABC):
"""Load issues of a GitHub repository."""
repo: str
"""Name of repository"""
access_token: str
"""Personal access token - see https://github.com/settings/tokens?type=beta"""
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that access token exists in environment."""
values["access_token"] = get_from_dict_or_env(
values, "access_token", "GITHUB_PERSONAL_ACCESS_TOKEN"
)
return values
@property
def headers(self) -> Dict[str, str]:
return {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {self.access_token}",
}
[docs]class GitHubIssuesLoader(BaseGitHubLoader):
"""Load issues of a GitHub repository."""
include_prs: bool = True
"""If True include Pull Requests in results, otherwise ignore them."""
milestone: Union[int, Literal["*", "none"], None] = None
"""If integer is passed, it should be a milestone's number field.
If the string '*' is passed, issues with any milestone are accepted.
If the string 'none' is passed, issues without milestones are returned.
"""
state: Optional[Literal["open", "closed", "all"]] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
|
97abf29fa7a0-1
|
state: Optional[Literal["open", "closed", "all"]] = None
"""Filter on issue state. Can be one of: 'open', 'closed', 'all'."""
assignee: Optional[str] = None
"""Filter on assigned user. Pass 'none' for no user and '*' for any user."""
creator: Optional[str] = None
"""Filter on the user that created the issue."""
mentioned: Optional[str] = None
"""Filter on a user that's mentioned in the issue."""
labels: Optional[List[str]] = None
"""Label names to filter one. Example: bug,ui,@high."""
sort: Optional[Literal["created", "updated", "comments"]] = None
"""What to sort results by. Can be one of: 'created', 'updated', 'comments'.
Default is 'created'."""
direction: Optional[Literal["asc", "desc"]] = None
"""The direction to sort the results by. Can be one of: 'asc', 'desc'."""
since: Optional[str] = None
"""Only show notifications updated after the given time.
This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ."""
@validator("since")
def validate_since(cls, v: Optional[str]) -> Optional[str]:
if v:
try:
datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
raise ValueError(
"Invalid value for 'since'. Expected a date string in "
f"YYYY-MM-DDTHH:MM:SSZ format. Received: {v}"
)
return v
[docs] def lazy_load(self) -> Iterator[Document]:
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
|
97abf29fa7a0-2
|
[docs] def lazy_load(self) -> Iterator[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
url: Optional[str] = self.url
while url:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
issues = response.json()
for issue in issues:
doc = self.parse_issue(issue)
if not self.include_prs and doc.metadata["is_pull_request"]:
continue
yield doc
if response.links and response.links.get("next"):
url = response.links["next"]["url"]
else:
url = None
[docs] def load(self) -> List[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
return list(self.lazy_load())
[docs] def parse_issue(self, issue: dict) -> Document:
"""Create Document objects from a list of GitHub issues."""
metadata = {
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
|
97abf29fa7a0-3
|
"""Create Document objects from a list of GitHub issues."""
metadata = {
"url": issue["html_url"],
"title": issue["title"],
"creator": issue["user"]["login"],
"created_at": issue["created_at"],
"comments": issue["comments"],
"state": issue["state"],
"labels": [label["name"] for label in issue["labels"]],
"assignee": issue["assignee"]["login"] if issue["assignee"] else None,
"milestone": issue["milestone"]["title"] if issue["milestone"] else None,
"locked": issue["locked"],
"number": issue["number"],
"is_pull_request": "pull_request" in issue,
}
content = issue["body"] if issue["body"] is not None else ""
return Document(page_content=content, metadata=metadata)
@property
def query_params(self) -> str:
"""Create query parameters for GitHub API."""
labels = ",".join(self.labels) if self.labels else self.labels
query_params_dict = {
"milestone": self.milestone,
"state": self.state,
"assignee": self.assignee,
"creator": self.creator,
"mentioned": self.mentioned,
"labels": labels,
"sort": self.sort,
"direction": self.direction,
"since": self.since,
}
query_params_list = [
f"{k}={v}" for k, v in query_params_dict.items() if v is not None
]
query_params = "&".join(query_params_list)
return query_params
@property
def url(self) -> str:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
|
97abf29fa7a0-4
|
return query_params
@property
def url(self) -> str:
"""Create URL for GitHub API."""
return f"https://api.github.com/repos/{self.repo}/issues?{self.query_params}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/github.html
|
151c7d800637-0
|
Source code for langchain.document_loaders.browserless
from typing import Iterator, List, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class BrowserlessLoader(BaseLoader):
"""Loads the content of webpages using Browserless' /content endpoint"""
[docs] def __init__(
self, api_token: str, urls: Union[str, List[str]], text_content: bool = True
):
"""Initialize with API token and the URLs to scrape"""
self.api_token = api_token
"""Browserless API token."""
self.urls = urls
"""List of URLs to scrape."""
self.text_content = text_content
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load Documents from URLs."""
for url in self.urls:
if self.text_content:
response = requests.post(
"https://chrome.browserless.io/scrape",
params={
"token": self.api_token,
},
json={
"url": url,
"elements": [
{
"selector": "body",
}
],
},
)
yield Document(
page_content=response.json()["data"][0]["results"][0]["text"],
metadata={
"source": url,
},
)
else:
response = requests.post(
"https://chrome.browserless.io/content",
params={
"token": self.api_token,
},
json={
"url": url,
},
)
yield Document(
page_content=response.text,
metadata={
"source": url,
},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/browserless.html
|
151c7d800637-1
|
metadata={
"source": url,
},
)
[docs] def load(self) -> List[Document]:
"""Load Documents from URLs."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/browserless.html
|
c06ba5e8541c-0
|
Source code for langchain.document_loaders.notion
"""Loads Notion directory dump."""
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class NotionDirectoryLoader(BaseLoader):
"""Loads Notion directory dump."""
[docs] def __init__(self, path: str):
"""Initialize with a file path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notion.html
|
58d9e6fc45bb-0
|
Source code for langchain.document_loaders.tensorflow_datasets
from typing import Callable, Dict, Iterator, List, Optional
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.utilities.tensorflow_datasets import TensorflowDatasets
[docs]class TensorflowDatasetLoader(BaseLoader):
"""Loads from TensorFlow Datasets into a list of Documents.
Attributes:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document
Example:
.. code-block:: python
from langchain.document_loaders import TensorflowDatasetLoader
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
tsds_client = TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=100,
sample_to_document_function=mlqaen_example_to_document,
)
"""
[docs] def __init__(
self,
dataset_name: str,
split_name: str,
load_max_docs: Optional[int] = 100,
sample_to_document_function: Optional[Callable[[Dict], Document]] = None,
):
"""Initialize the TensorflowDatasetLoader.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tensorflow_datasets.html
|
58d9e6fc45bb-1
|
):
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[
Callable[[Dict], Document]
] = sample_to_document_function
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets(
dataset_name=self.dataset_name,
split_name=self.split_name,
load_max_docs=self.load_max_docs,
sample_to_document_function=self.sample_to_document_function,
)
[docs] def lazy_load(self) -> Iterator[Document]:
yield from self._tfds_client.lazy_load()
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tensorflow_datasets.html
|
bf4cfdcd3152-0
|
Source code for langchain.document_loaders.slack_directory
"""Loader for documents from a Slack export."""
import json
import zipfile
from pathlib import Path
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class SlackDirectoryLoader(BaseLoader):
"""Loads documents from a Slack directory dump."""
[docs] def __init__(self, zip_path: str, workspace_url: Optional[str] = None):
"""Initialize the SlackDirectoryLoader.
Args:
zip_path (str): The path to the Slack directory dump zip file.
workspace_url (Optional[str]): The Slack workspace URL.
Including the URL will turn
sources into links. Defaults to None.
"""
self.zip_path = Path(zip_path)
self.workspace_url = workspace_url
self.channel_id_map = self._get_channel_id_map(self.zip_path)
@staticmethod
def _get_channel_id_map(zip_path: Path) -> Dict[str, str]:
"""Get a dictionary mapping channel names to their respective IDs."""
with zipfile.ZipFile(zip_path, "r") as zip_file:
try:
with zip_file.open("channels.json", "r") as f:
channels = json.load(f)
return {channel["name"]: channel["id"] for channel in channels}
except KeyError:
return {}
[docs] def load(self) -> List[Document]:
"""Load and return documents from the Slack directory dump."""
docs = []
with zipfile.ZipFile(self.zip_path, "r") as zip_file:
for channel_path in zip_file.namelist():
channel_name = Path(channel_path).parent.name
if not channel_name:
continue
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
|
bf4cfdcd3152-1
|
if not channel_name:
continue
if channel_path.endswith(".json"):
messages = self._read_json(zip_file, channel_path)
for message in messages:
document = self._convert_message_to_document(
message, channel_name
)
docs.append(document)
return docs
def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, "r") as f:
data = json.load(f)
return data
def _convert_message_to_document(
self, message: dict, channel_name: str
) -> Document:
"""
Convert a message to a Document object.
Args:
message (dict): A message in the form of a dictionary.
channel_name (str): The name of the channel the message belongs to.
Returns:
Document: A Document object representing the message.
"""
text = message.get("text", "")
metadata = self._get_message_metadata(message, channel_name)
return Document(
page_content=text,
metadata=metadata,
)
def _get_message_metadata(self, message: dict, channel_name: str) -> dict:
"""Create and return metadata for a given message and channel."""
timestamp = message.get("ts", "")
user = message.get("user", "")
source = self._get_message_source(channel_name, user, timestamp)
return {
"source": source,
"channel": channel_name,
"timestamp": timestamp,
"user": user,
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
|
bf4cfdcd3152-2
|
"timestamp": timestamp,
"user": user,
}
def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str:
"""
Get the message source as a string.
Args:
channel_name (str): The name of the channel the message belongs to.
user (str): The user ID who sent the message.
timestamp (str): The timestamp of the message.
Returns:
str: The message source.
"""
if self.workspace_url:
channel_id = self.channel_id_map.get(channel_name, "")
return (
f"{self.workspace_url}/archives/{channel_id}"
+ f"/p{timestamp.replace('.', '')}"
)
else:
return f"{channel_name} - {user} - {timestamp}"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
|
ce275bcdecac-0
|
Source code for langchain.document_loaders.hugging_face_dataset
"""Loads HuggingFace datasets."""
from typing import Iterator, List, Mapping, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class HuggingFaceDatasetLoader(BaseLoader):
"""Load Documents from the Hugging Face Hub."""
[docs] def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hugging_face_dataset.html
|
ce275bcdecac-1
|
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=row.pop(self.page_content_column),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
[docs] def load(self) -> List[Document]:
"""Load documents."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hugging_face_dataset.html
|
f2cc23a810a0-0
|
Source code for langchain.document_loaders.tsv
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
[docs]class UnstructuredTSVLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load TSV files. Like other
Unstructured loaders, UnstructuredTSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the TSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain.document_loaders.tsv import UnstructuredTSVLoader
loader = UnstructuredTSVLoader("stanley-cups.tsv", mode="elements")
docs = loader.load()
"""
[docs] def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.7.6")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tsv.html
|
ce0b10d669af-0
|
Source code for langchain.document_loaders.geodataframe
"""Load from Dataframe object"""
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class GeoDataFrameLoader(BaseLoader):
"""Load geopandas Dataframe."""
[docs] def __init__(self, data_frame: Any, page_content_column: str = "geometry"):
"""Initialize with geopandas Dataframe.
Args:
data_frame: geopandas DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "geometry".
"""
try:
import geopandas as gpd
except ImportError:
raise ImportError(
"geopandas package not found, please install it with "
"`pip install geopandas`"
)
if not isinstance(data_frame, gpd.GeoDataFrame):
raise ValueError(
f"Expected data_frame to be a gpd.GeoDataFrame, got {type(data_frame)}"
)
self.data_frame = data_frame
self.page_content_column = page_content_column
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from dataframe."""
for _, row in self.data_frame.iterrows():
text = row[self.page_content_column]
metadata = row.to_dict()
metadata.pop(self.page_content_column)
# Enforce str since shapely Point objects
# geometry type used in GeoPandas) are not strings
yield Document(page_content=str(text), metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load full dataframe."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/geodataframe.html
|
f03acb6952b1-0
|
Source code for langchain.document_loaders.html
"""Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses Unstructured to load HTML files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredHTMLLoader
loader = UnstructuredHTMLLoader(
"example.html", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-html
"""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html
|
b599a17a3f8c-0
|
Source code for langchain.document_loaders.college_confidential
"""Loads College Confidential."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class CollegeConfidentialLoader(WebBaseLoader):
"""Loads College Confidential webpages."""
[docs] def load(self) -> List[Document]:
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/college_confidential.html
|
ded42ce1fdfe-0
|
Source code for langchain.document_loaders.mastodon
"""Mastodon document loader."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ImportError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
[docs]class MastodonTootsLoader(BaseLoader):
"""Mastodon toots loader."""
[docs] def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
ded42ce1fdfe-1
|
Defaults to "https://mastodon.social".
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
[docs] def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
542244ea6e20-0
|
Source code for langchain.document_loaders.image_captions
"""Loads image captions.
By default, the loader utilizes the pre-trained BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
from typing import Any, List, Tuple, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ImageCaptionLoader(BaseLoader):
"""Loads the captions of an image"""
[docs] def __init__(
self,
path_images: Union[str, List[str]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""
Initialize with a list of image paths
Args:
path_images: A list of image paths.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(path_images, str):
self.image_paths = [path_images]
else:
self.image_paths = path_images
self.blip_processor = blip_processor
self.blip_model = blip_model
[docs] def load(self) -> List[Document]:
"""
Load from a list of image files
"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image_captions.html
|
542244ea6e20-1
|
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for path_image in self.image_paths:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, path_image=path_image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, path_image: str
) -> Tuple[str, dict]:
"""
Helper function for getting the captions and metadata of an image
"""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
try:
if path_image.startswith("http://") or path_image.startswith("https://"):
image = Image.open(requests.get(path_image, stream=True).raw).convert(
"RGB"
)
else:
image = Image.open(path_image).convert("RGB")
except Exception:
raise ValueError(f"Could not get image data for {path_image}")
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
metadata: dict = {"image_path": path_image}
return caption, metadata
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/image_captions.html
|
dc1e1802cab0-0
|
Source code for langchain.document_loaders.bibtex
import logging
import re
from pathlib import Path
from typing import Any, Iterator, List, Mapping, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.bibtex import BibtexparserWrapper
logger = logging.getLogger(__name__)
[docs]class BibtexLoader(BaseLoader):
"""Loads a bibtex file into a list of Documents.
Each document represents one entry from the bibtex file.
If a PDF file is present in the `file` bibtex field, the original PDF
is loaded into the document text. If no such file entry is present,
the `abstract` field is used instead.
"""
[docs] def __init__(
self,
file_path: str,
*,
parser: Optional[BibtexparserWrapper] = None,
max_docs: Optional[int] = None,
max_content_chars: Optional[int] = 4_000,
load_extra_metadata: bool = False,
file_pattern: str = r"[^:]+\.pdf",
):
"""Initialize the BibtexLoader.
Args:
file_path: Path to the bibtex file.
parser: The parser to use. If None, a default parser is used.
max_docs: Max number of associated documents to load. Use -1 means
no limit.
max_content_chars: Maximum number of characters to load from the PDF.
load_extra_metadata: Whether to load extra metadata from the PDF.
file_pattern: Regex pattern to match the file name in the bibtex.
"""
self.file_path = file_path
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html
|
dc1e1802cab0-1
|
"""
self.file_path = file_path
self.parser = parser or BibtexparserWrapper()
self.max_docs = max_docs
self.max_content_chars = max_content_chars
self.load_extra_metadata = load_extra_metadata
self.file_regex = re.compile(file_pattern)
def _load_entry(self, entry: Mapping[str, Any]) -> Optional[Document]:
import fitz
parent_dir = Path(self.file_path).parent
# regex is useful for Zotero flavor bibtex files
file_names = self.file_regex.findall(entry.get("file", ""))
if not file_names:
return None
texts: List[str] = []
for file_name in file_names:
try:
with fitz.open(parent_dir / file_name) as f:
texts.extend(page.get_text() for page in f)
except FileNotFoundError as e:
logger.debug(e)
content = "\n".join(texts) or entry.get("abstract", "")
if self.max_content_chars:
content = content[: self.max_content_chars]
metadata = self.parser.get_metadata(entry, load_extra=self.load_extra_metadata)
return Document(
page_content=content,
metadata=metadata,
)
[docs] def lazy_load(self) -> Iterator[Document]:
"""Load bibtex file using bibtexparser and get the article texts plus the
article metadata.
See https://bibtexparser.readthedocs.io/en/master/
Returns:
a list of documents with the document.page_content in text format
"""
try:
import fitz # noqa: F401
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html
|
dc1e1802cab0-2
|
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
entries = self.parser.load_bibtex_entries(self.file_path)
if self.max_docs:
entries = entries[: self.max_docs]
for entry in entries:
doc = self._load_entry(entry)
if doc:
yield doc
[docs] def load(self) -> List[Document]:
"""Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
"""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bibtex.html
|
94c89bb862a4-0
|
Source code for langchain.document_loaders.blob_loaders.file_system
"""Use to load blobs from the local file system."""
from pathlib import Path
from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union
from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader
T = TypeVar("T")
def _make_iterator(
length_func: Callable[[], int], show_progress: bool = False
) -> Callable[[Iterable[T]], Iterator[T]]:
"""Create a function that optionally wraps an iterable in tqdm."""
if show_progress:
try:
from tqdm.auto import tqdm
except ImportError:
raise ImportError(
"You must install tqdm to use show_progress=True."
"You can install tqdm with `pip install tqdm`."
)
# Make sure to provide `total` here so that tqdm can show
# a progress bar that takes into account the total number of files.
def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]:
"""Wrap an iterable in a tqdm progress bar."""
return tqdm(iterable, total=length_func())
iterator = _with_tqdm
else:
iterator = iter # type: ignore
return iterator
# PUBLIC API
[docs]class FileSystemBlobLoader(BlobLoader):
"""Blob loader for the local file system.
Example:
.. code-block:: python
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
loader = FileSystemBlobLoader("/path/to/directory")
for blob in loader.yield_blobs():
print(blob)
"""
[docs] def __init__(
self,
path: Union[str, Path],
*,
glob: str = "**/[!.]*",
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
|
94c89bb862a4-1
|
*,
glob: str = "**/[!.]*",
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
) -> None:
"""Initialize with path to directory and how to glob over it.
Args:
path: Path to directory to load from
glob: Glob pattern relative to the specified path
by default set to pick up all non-hidden files
suffixes: Provide to keep only files with these suffixes
Useful when wanting to keep files with different suffixes
Suffixes must include the dot, e.g. ".txt"
show_progress: If true, will show a progress bar as the files are loaded.
This forces an iteration through all matching files
to count them prior to loading them.
Examples:
... code-block:: python
# Recursively load all text files in a directory.
loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt")
# Recursively load all non-hidden files in a directory.
loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*")
# Load all files in a directory without recursion.
loader = FileSystemBlobLoader("/path/to/directory", glob="*")
"""
if isinstance(path, Path):
_path = path
elif isinstance(path, str):
_path = Path(path)
else:
raise TypeError(f"Expected str or Path, got {type(path)}")
self.path = _path
self.glob = glob
self.suffixes = set(suffixes or [])
self.show_progress = show_progress
[docs] def yield_blobs(
self,
) -> Iterable[Blob]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
|
94c89bb862a4-2
|
self,
) -> Iterable[Blob]:
"""Yield blobs that match the requested pattern."""
iterator = _make_iterator(
length_func=self.count_matching_files, show_progress=self.show_progress
)
for path in iterator(self._yield_paths()):
yield Blob.from_path(path)
def _yield_paths(self) -> Iterable[Path]:
"""Yield paths that match the requested pattern."""
paths = self.path.glob(self.glob)
for path in paths:
if path.is_file():
if self.suffixes and path.suffix not in self.suffixes:
continue
yield path
[docs] def count_matching_files(self) -> int:
"""Count files that match the pattern without loading them."""
# Carry out a full iteration to count the files without
# materializing anything expensive in memory.
num = 0
for _ in self._yield_paths():
num += 1
return num
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
|
bb25e63f4842-0
|
Source code for langchain.document_loaders.blob_loaders.youtube_audio
from typing import Iterable, List
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader
[docs]class YoutubeAudioLoader(BlobLoader):
"""Load YouTube urls as audio file(s)."""
[docs] def __init__(self, urls: List[str], save_dir: str):
if not isinstance(urls, list):
raise TypeError("urls must be a list")
self.urls = urls
self.save_dir = save_dir
[docs] def yield_blobs(self) -> Iterable[Blob]:
"""Yield audio blobs for each url."""
try:
import yt_dlp
except ImportError:
raise ImportError(
"yt_dlp package not found, please install it with "
"`pip install yt_dlp`"
)
# Use yt_dlp to download audio given a YouTube url
ydl_opts = {
"format": "m4a/bestaudio/best",
"noplaylist": True,
"outtmpl": self.save_dir + "/%(title)s.%(ext)s",
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "m4a",
}
],
}
for url in self.urls:
# Download file
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download(url)
# Yield the written blobs
loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a")
for blob in loader.yield_blobs():
yield blob
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/youtube_audio.html
|
6c1960077bda-0
|
Source code for langchain.document_loaders.blob_loaders.schema
"""Schema for Blobs and Blob Loaders.
The goal is to facilitate decoupling of content loading from content parsing code.
In addition, content loading code should provide a lazy loading interface by default.
"""
from __future__ import annotations
import contextlib
import mimetypes
from abc import ABC, abstractmethod
from io import BufferedReader, BytesIO
from pathlib import PurePath
from typing import Any, Generator, Iterable, Mapping, Optional, Union
from pydantic import BaseModel, root_validator
PathLike = Union[str, PurePath]
[docs]class Blob(BaseModel):
"""A blob is used to represent raw data by either reference or value.
Provides an interface to materialize the blob in different representations, and
help to decouple the development of data loaders from the downstream parsing of
the raw data.
Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob
"""
data: Union[bytes, str, None] # Raw data
mimetype: Optional[str] = None # Not to be confused with a file extension
encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string
# Location where the original content was found
# Represent location on the local file system
# Useful for situations where downstream code assumes it must work with file paths
# rather than in-memory content.
path: Optional[PathLike] = None
class Config:
arbitrary_types_allowed = True
frozen = True
@property
def source(self) -> Optional[str]:
"""The source location of the blob as string if known otherwise none."""
return str(self.path) if self.path else None
@root_validator(pre=True)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
|
6c1960077bda-1
|
return str(self.path) if self.path else None
@root_validator(pre=True)
def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]:
"""Verify that either data or path is provided."""
if "data" not in values and "path" not in values:
raise ValueError("Either data or path must be provided")
return values
[docs] def as_string(self) -> str:
"""Read data as a string."""
if self.data is None and self.path:
with open(str(self.path), "r", encoding=self.encoding) as f:
return f.read()
elif isinstance(self.data, bytes):
return self.data.decode(self.encoding)
elif isinstance(self.data, str):
return self.data
else:
raise ValueError(f"Unable to get string for blob {self}")
[docs] def as_bytes(self) -> bytes:
"""Read data as bytes."""
if isinstance(self.data, bytes):
return self.data
elif isinstance(self.data, str):
return self.data.encode(self.encoding)
elif self.data is None and self.path:
with open(str(self.path), "rb") as f:
return f.read()
else:
raise ValueError(f"Unable to get bytes for blob {self}")
[docs] @contextlib.contextmanager
def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]:
"""Read data as a byte stream."""
if isinstance(self.data, bytes):
yield BytesIO(self.data)
elif self.data is None and self.path:
with open(str(self.path), "rb") as f:
yield f
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
|
6c1960077bda-2
|
yield f
else:
raise NotImplementedError(f"Unable to convert blob {self}")
[docs] @classmethod
def from_path(
cls,
path: PathLike,
*,
encoding: str = "utf-8",
mime_type: Optional[str] = None,
guess_type: bool = True,
) -> Blob:
"""Load the blob from a path like object.
Args:
path: path like object to file to be read
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
guess_type: If True, the mimetype will be guessed from the file extension,
if a mime-type was not provided
Returns:
Blob instance
"""
if mime_type is None and guess_type:
_mimetype = mimetypes.guess_type(path)[0] if guess_type else None
else:
_mimetype = mime_type
# We do not load the data immediately, instead we treat the blob as a
# reference to the underlying data.
return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path)
[docs] @classmethod
def from_data(
cls,
data: Union[str, bytes],
*,
encoding: str = "utf-8",
mime_type: Optional[str] = None,
path: Optional[str] = None,
) -> Blob:
"""Initialize the blob from in-memory data.
Args:
data: the in-memory data associated with the blob
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
|
6c1960077bda-3
|
mime_type: if provided, will be set as the mime-type of the data
path: if provided, will be set as the source from which the data came
Returns:
Blob instance
"""
return cls(data=data, mimetype=mime_type, encoding=encoding, path=path)
def __repr__(self) -> str:
"""Define the blob representation."""
str_repr = f"Blob {id(self)}"
if self.source:
str_repr += f" {self.source}"
return str_repr
[docs]class BlobLoader(ABC):
"""Abstract interface for blob loaders implementation.
Implementer should be able to load raw content from a storage system according
to some criteria and return the raw content lazily as a stream of blobs.
"""
[docs] @abstractmethod
def yield_blobs(
self,
) -> Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
|
ee72afc3877b-0
|
Source code for langchain.document_loaders.parsers.pdf
"""Module contains common parsers for PDFs."""
from typing import Any, Iterator, Mapping, Optional, Sequence, Union
from urllib.parse import urlparse
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.schema import Document
[docs]class PyPDFParser(BaseBlobParser):
"""Loads a PDF with pypdf and chunks at character level."""
[docs] def __init__(self, password: Optional[Union[str, bytes]] = None):
self.password = password
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import pypdf
with blob.as_bytes_io() as pdf_file_obj:
pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password)
yield from [
Document(
page_content=page.extract_text(),
metadata={"source": blob.source, "page": page_number},
)
for page_number, page in enumerate(pdf_reader.pages)
]
[docs]class PDFMinerParser(BaseBlobParser):
"""Parse PDFs with PDFMiner."""
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
from pdfminer.high_level import extract_text
with blob.as_bytes_io() as pdf_file_obj:
text = extract_text(pdf_file_obj)
metadata = {"source": blob.source}
yield Document(page_content=text, metadata=metadata)
[docs]class PyMuPDFParser(BaseBlobParser):
"""Parse PDFs with PyMuPDF."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
ee72afc3877b-1
|
"""Parse PDFs with PyMuPDF."""
[docs] def __init__(self, text_kwargs: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``.
"""
self.text_kwargs = text_kwargs or {}
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path:
doc = fitz.open(file_path) # open document
yield from [
Document(
page_content=page.get_text(**self.text_kwargs),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
[docs]class PyPDFium2Parser(BaseBlobParser):
"""Parse PDFs with PyPDFium2."""
[docs] def __init__(self) -> None:
"""Initialize the parser."""
try:
import pypdfium2 # noqa:F401
except ImportError:
raise ImportError(
"pypdfium2 package not found, please install it with"
" `pip install pypdfium2`"
)
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
ee72afc3877b-2
|
"""Lazily parse the blob."""
import pypdfium2
# pypdfium2 is really finicky with respect to closing things,
# if done incorrectly creates seg faults.
with blob.as_bytes_io() as file_path:
pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True)
try:
for page_number, page in enumerate(pdf_reader):
text_page = page.get_textpage()
content = text_page.get_text_range()
text_page.close()
page.close()
metadata = {"source": blob.source, "page": page_number}
yield Document(page_content=content, metadata=metadata)
finally:
pdf_reader.close()
[docs]class PDFPlumberParser(BaseBlobParser):
"""Parse PDFs with PDFPlumber."""
[docs] def __init__(self, text_kwargs: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
"""
self.text_kwargs = text_kwargs or {}
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import pdfplumber
with blob.as_bytes_io() as file_path:
doc = pdfplumber.open(file_path) # open document
yield from [
Document(
page_content=page.extract_text(**self.text_kwargs),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.page_number,
"total_pages": len(doc.pages),
},
**{
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
ee72afc3877b-3
|
"total_pages": len(doc.pages),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc.pages
]
[docs]class AmazonTextractPDFParser(BaseBlobParser):
"""Sends PDF files to Amazon Textract and parses them to generate Documents.
For parsing multi-page PDFs, they have to reside on S3.
"""
[docs] def __init__(
self,
textract_features: Optional[Sequence[int]] = None,
client: Optional[Any] = None,
) -> None:
"""Initializes the parser.
Args:
textract_features: Features to be used for extraction, each feature
should be passed as an int that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client
"""
try:
import textractcaller as tc
self.tc = tc
if textract_features is not None:
self.textract_features = [
tc.Textract_Features(f) for f in textract_features
]
else:
self.textract_features = []
except ImportError:
raise ImportError(
"Could not import amazon-textract-caller python package. "
"Please install it with `pip install amazon-textract-caller`."
)
if not client:
try:
import boto3
self.boto3_textract_client = boto3.client("textract")
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
ee72afc3877b-4
|
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
else:
self.boto3_textract_client = client
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs the blob.data is taken
"""
url_parse_result = urlparse(str(blob.path)) if blob.path else None
# Either call with S3 path (multi-page) or with bytes (single-page)
if (
url_parse_result
and url_parse_result.scheme == "s3"
and url_parse_result.netloc
):
textract_response_json = self.tc.call_textract(
input_document=str(blob.path),
features=self.textract_features,
boto3_textract_client=self.boto3_textract_client,
)
else:
textract_response_json = self.tc.call_textract(
input_document=blob.as_bytes(),
features=self.textract_features,
call_mode=self.tc.Textract_Call_Mode.FORCE_SYNC,
boto3_textract_client=self.boto3_textract_client,
)
current_text = ""
current_page = 1
for block in textract_response_json["Blocks"]:
if "Page" in block and not (int(block["Page"]) == current_page):
yield Document(
page_content=current_text,
metadata={"source": blob.source, "page": current_page},
)
current_text = ""
current_page = int(block["Page"])
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
ee72afc3877b-5
|
)
current_text = ""
current_page = int(block["Page"])
if "Text" in block:
current_text += block["Text"] + " "
yield Document(
page_content=current_text,
metadata={"source": blob.source, "page": current_page},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
|
76ad9265f680-0
|
Source code for langchain.document_loaders.parsers.registry
"""Module includes a registry of default parser configurations."""
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.parsers.generic import MimeTypeBasedParser
from langchain.document_loaders.parsers.pdf import PyMuPDFParser
from langchain.document_loaders.parsers.txt import TextParser
def _get_default_parser() -> BaseBlobParser:
"""Get default mime-type based parser."""
return MimeTypeBasedParser(
handlers={
"application/pdf": PyMuPDFParser(),
"text/plain": TextParser(),
},
fallback_parser=None,
)
_REGISTRY = {
"default": _get_default_parser,
}
# PUBLIC API
[docs]def get_parser(parser_name: str) -> BaseBlobParser:
"""Get a parser by parser name."""
if parser_name not in _REGISTRY:
raise ValueError(f"Unknown parser combination: {parser_name}")
return _REGISTRY[parser_name]()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/registry.html
|
85583c300aba-0
|
Source code for langchain.document_loaders.parsers.txt
"""Module for parsing text files.."""
from typing import Iterator
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.schema import Document
[docs]class TextParser(BaseBlobParser):
"""Parser for text blobs."""
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/txt.html
|
386cc420576e-0
|
Source code for langchain.document_loaders.parsers.generic
"""Code for generic / auxiliary parsers.
This module contains some logic to help assemble more sophisticated parsers.
"""
from typing import Iterator, Mapping, Optional
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders.schema import Blob
from langchain.schema import Document
[docs]class MimeTypeBasedParser(BaseBlobParser):
"""A parser that uses mime-types to determine how to parse a blob.
This parser is useful for simple pipelines where the mime-type is sufficient
to determine how to parse a blob.
To use, configure handlers based on mime-types and pass them to the initializer.
Example:
.. code-block:: python
from langchain.document_loaders.parsers.generic import MimeTypeBasedParser
parser = MimeTypeBasedParser(
handlers={
"application/pdf": ...,
},
fallback_parser=...,
)
"""
[docs] def __init__(
self,
handlers: Mapping[str, BaseBlobParser],
*,
fallback_parser: Optional[BaseBlobParser] = None,
) -> None:
"""Define a parser that uses mime-types to determine how to parse a blob.
Args:
handlers: A mapping from mime-types to functions that take a blob, parse it
and return a document.
fallback_parser: A fallback_parser parser to use if the mime-type is not
found in the handlers. If provided, this parser will be
used to parse blobs with all mime-types not found in
the handlers.
If not provided, a ValueError will be raised if the
mime-type is not found in the handlers.
"""
self.handlers = handlers
self.fallback_parser = fallback_parser
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/generic.html
|
386cc420576e-1
|
"""
self.handlers = handlers
self.fallback_parser = fallback_parser
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load documents from a blob."""
mimetype = blob.mimetype
if mimetype is None:
raise ValueError(f"{blob} does not have a mimetype.")
if mimetype in self.handlers:
handler = self.handlers[mimetype]
yield from handler.lazy_parse(blob)
else:
if self.fallback_parser is not None:
yield from self.fallback_parser.lazy_parse(blob)
else:
raise ValueError(f"Unsupported mime type: {mimetype}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/generic.html
|
7d9267c44d13-0
|
Source code for langchain.document_loaders.parsers.grobid
import logging
from typing import Dict, Iterator, List, Union
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
[docs]class ServerUnavailableException(Exception):
"""Exception raised when the GROBID server is unavailable."""
pass
[docs]class GrobidParser(BaseBlobParser):
"""Loader that uses Grobid to load article PDF files."""
[docs] def __init__(
self,
segment_sentences: bool,
grobid_server: str = "http://localhost:8070/api/processFulltextDocument",
) -> None:
self.segment_sentences = segment_sentences
self.grobid_server = grobid_server
try:
requests.get(grobid_server)
except requests.exceptions.RequestException:
logger.error(
"GROBID server does not appear up and running, \
please ensure Grobid is installed and the server is running"
)
raise ServerUnavailableException
[docs] def process_xml(
self, file_path: str, xml_data: str, segment_sentences: bool
) -> Iterator[Document]:
"""Process the XML file from Grobin."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"`bs4` package not found, please install it with " "`pip install bs4`"
)
soup = BeautifulSoup(xml_data, "xml")
sections = soup.find_all("div")
title = soup.find_all("title")[0].text
chunks = []
for section in sections:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
|
7d9267c44d13-1
|
chunks = []
for section in sections:
sect = section.find("head")
if sect is not None:
for i, paragraph in enumerate(section.find_all("p")):
chunk_bboxes = []
paragraph_text = []
for i, sentence in enumerate(paragraph.find_all("s")):
paragraph_text.append(sentence.text)
sbboxes = []
for bbox in sentence.get("coords").split(";"):
box = bbox.split(",")
sbboxes.append(
{
"page": box[0],
"x": box[1],
"y": box[2],
"h": box[3],
"w": box[4],
}
)
chunk_bboxes.append(sbboxes)
if segment_sentences is True:
fpage, lpage = sbboxes[0]["page"], sbboxes[-1]["page"]
sentence_dict = {
"text": sentence.text,
"para": str(i),
"bboxes": [sbboxes],
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
chunks.append(sentence_dict)
if segment_sentences is not True:
fpage, lpage = (
chunk_bboxes[0][0]["page"],
chunk_bboxes[-1][-1]["page"],
)
paragraph_dict = {
"text": "".join(paragraph_text),
"para": str(i),
"bboxes": chunk_bboxes,
"section_title": sect.text,
"section_number": sect.get("n"),
"pages": (fpage, lpage),
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
|
7d9267c44d13-2
|
"pages": (fpage, lpage),
}
chunks.append(paragraph_dict)
yield from [
Document(
page_content=chunk["text"],
metadata=dict(
{
"text": str(chunk["text"]),
"para": str(chunk["para"]),
"bboxes": str(chunk["bboxes"]),
"pages": str(chunk["pages"]),
"section_title": str(chunk["section_title"]),
"section_number": str(chunk["section_number"]),
"paper_title": str(title),
"file_path": str(file_path),
}
),
)
for chunk in chunks
]
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
file_path = blob.source
if file_path is None:
raise ValueError("blob.source cannot be None.")
pdf = open(file_path, "rb")
files = {"input": (file_path, pdf, "application/pdf", {"Expires": "0"})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ["generateIDs", "consolidateHeader", "segmentSentences"]:
data[param] = "1"
data["teiCoordinates"] = ["head", "s"]
files = files or {}
r = requests.request(
"POST",
self.grobid_server,
headers=None,
params=None,
files=files,
data=data,
timeout=60,
)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error("GROBID server timed out. Return None.")
xml_data = None
if xml_data is None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
|
7d9267c44d13-3
|
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
|
91271c883130-0
|
Source code for langchain.document_loaders.parsers.audio
import logging
import time
from typing import Dict, Iterator, Optional, Tuple
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.schema import Document
logger = logging.getLogger(__name__)
[docs]class OpenAIWhisperParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
[docs] def __init__(self, api_key: Optional[str] = None):
self.api_key = api_key
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
import openai
except ImportError:
raise ImportError(
"openai package not found, please install it with "
"`pip install openai`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
# Set the API key if provided
if self.api_key:
openai.api_key = self.api_key
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
# Define the duration of each chunk in minutes
# Need to meet 25MB size limit for Whisper API
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
# Split the audio into chunk_duration_ms chunks
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
# Audio chunk
chunk = audio[i : i + chunk_duration_ms]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
|
91271c883130-1
|
# Audio chunk
chunk = audio[i : i + chunk_duration_ms]
file_obj = io.BytesIO(chunk.export(format="mp3").read())
if blob.source is not None:
file_obj.name = blob.source + f"_part_{split_number}.mp3"
else:
file_obj.name = f"part_{split_number}.mp3"
# Transcribe
print(f"Transcribing part {split_number+1}!")
attempts = 0
while attempts < 3:
try:
transcript = openai.Audio.transcribe("whisper-1", file_obj)
break
except Exception as e:
attempts += 1
print(f"Attempt {attempts} failed. Exception: {str(e)}")
time.sleep(5)
else:
print("Failed to transcribe after 3 attempts.")
continue
yield Document(
page_content=transcript.text,
metadata={"source": blob.source, "chunk": split_number},
)
[docs]class OpenAIWhisperParserLocal(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription with OpenAI Whisper model locally from transformers
Parameters:
device - device to use
NOTE: By default uses the gpu if available,
if you want to use cpu, please set device = "cpu"
lang_model - whisper model to use, for example "openai/whisper-medium"
forced_decoder_ids - id states for decoder in multilanguage model,
usage example:
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-medium")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="transcribe")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
|
91271c883130-2
|
task="transcribe")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="translate")
"""
[docs] def __init__(
self,
device: str = "0",
lang_model: Optional[str] = None,
forced_decoder_ids: Optional[Tuple[Dict]] = None,
):
try:
from transformers import pipeline
except ImportError:
raise ImportError(
"transformers package not found, please install it with "
"`pip install transformers`"
)
try:
import torch
except ImportError:
raise ImportError(
"torch package not found, please install it with " "`pip install torch`"
)
# set device, cpu by default check if there is a GPU available
if device == "cpu":
self.device = "cpu"
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Using model: ", self.lang_model)
else:
# unless overridden, use the small base model on cpu
self.lang_model = "openai/whisper-base"
else:
if torch.cuda.is_available():
self.device = "cuda:0"
# check GPU memory and select automatically the model
mem = torch.cuda.get_device_properties(self.device).total_memory / (
1024**2
)
if mem < 5000:
rec_model = "openai/whisper-base"
elif mem < 7000:
rec_model = "openai/whisper-small"
elif mem < 12000:
rec_model = "openai/whisper-medium"
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
|
91271c883130-3
|
rec_model = "openai/whisper-medium"
else:
rec_model = "openai/whisper-large"
# check if model is overridden
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Might not fit in your GPU")
else:
self.lang_model = rec_model
else:
"cpu"
print("Using the following model: ", self.lang_model)
# load model for inference
self.pipe = pipeline(
"automatic-speech-recognition",
model=self.lang_model,
chunk_length_s=30,
device=self.device,
)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
"Unable to set forced_decoder_ids parameter for whisper model"
f"Text of exception: {exception_text}"
"Therefore whisper model will use default mode for decoder"
)
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with `pip install pydub`"
)
try:
import librosa
except ImportError:
raise ImportError(
"librosa package not found, please install it with "
"`pip install librosa`"
)
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
file_obj = io.BytesIO(audio.export(format="mp3").read())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
|
91271c883130-4
|
file_obj = io.BytesIO(audio.export(format="mp3").read())
# Transcribe
print(f"Transcribing part {blob.path}!")
y, sr = librosa.load(file_obj, sr=16000)
prediction = self.pipe(y.copy(), batch_size=8)["text"]
yield Document(
page_content=prediction,
metadata={"source": blob.source},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
|
4af1401b8c78-0
|
Source code for langchain.document_loaders.parsers.language.javascript
from typing import Any, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
[docs]class JavaScriptSegmenter(CodeSegmenter):
"""The code segmenter for JavaScript."""
[docs] def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import esprima # noqa: F401
except ImportError:
raise ImportError(
"Could not import esprima Python package. "
"Please install it with `pip install esprima`."
)
[docs] def is_valid(self) -> bool:
import esprima
try:
esprima.parseScript(self.code)
return True
except esprima.Error:
return False
def _extract_code(self, node: Any) -> str:
start = node.loc.start.line - 1
end = node.loc.end.line
return "\n".join(self.source_lines[start:end])
[docs] def extract_functions_classes(self) -> List[str]:
import esprima
tree = esprima.parseScript(self.code, loc=True)
functions_classes = []
for node in tree.body:
if isinstance(
node,
(esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration),
):
functions_classes.append(self._extract_code(node))
return functions_classes
[docs] def simplify_code(self) -> str:
import esprima
tree = esprima.parseScript(self.code, loc=True)
simplified_lines = self.source_lines[:]
for node in tree.body:
if isinstance(
node,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/javascript.html
|
4af1401b8c78-1
|
for node in tree.body:
if isinstance(
node,
(esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration),
):
start = node.loc.start.line - 1
simplified_lines[start] = f"// Code for: {simplified_lines[start]}"
for line_num in range(start + 1, node.loc.end.line):
simplified_lines[line_num] = None # type: ignore
return "\n".join(line for line in simplified_lines if line is not None)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/javascript.html
|
9ac418e3a027-0
|
Source code for langchain.document_loaders.parsers.language.language_parser
from typing import Any, Dict, Iterator, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers.language.javascript import JavaScriptSegmenter
from langchain.document_loaders.parsers.language.python import PythonSegmenter
from langchain.text_splitter import Language
LANGUAGE_EXTENSIONS: Dict[str, str] = {
"py": Language.PYTHON,
"js": Language.JS,
}
LANGUAGE_SEGMENTERS: Dict[str, Any] = {
Language.PYTHON: PythonSegmenter,
Language.JS: JavaScriptSegmenter,
}
[docs]class LanguageParser(BaseBlobParser):
"""
Language parser that split code using the respective language syntax.
Each top-level function and class in the code is loaded into separate documents.
Furthermore, an extra document is generated, containing the remaining top-level code
that excludes the already segmented functions and classes.
This approach can potentially improve the accuracy of QA models over source code.
Currently, the supported languages for code parsing are Python and JavaScript.
The language used for parsing can be configured, along with the minimum number of
lines required to activate the splitting based on syntax.
Examples:
.. code-block:: python
from langchain.text_splitter.Language
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import LanguageParser
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py", ".js"],
parser=LanguageParser()
)
docs = loader.load()
Example instantiations to manually select the language:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
|
9ac418e3a027-1
|
docs = loader.load()
Example instantiations to manually select the language:
... code-block:: python
from langchain.text_splitter import Language
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py"],
parser=LanguageParser(language=Language.PYTHON)
)
Example instantiations to set number of lines threshold:
... code-block:: python
loader = GenericLoader.from_filesystem(
"./code",
glob="**/*",
suffixes=[".py"],
parser=LanguageParser(parser_threshold=200)
)
"""
[docs] def __init__(self, language: Optional[Language] = None, parser_threshold: int = 0):
"""
Language parser that split code using the respective language syntax.
Args:
language: If None (default), it will try to infer language from source.
parser_threshold: Minimum lines needed to activate parsing (0 by default).
"""
self.language = language
self.parser_threshold = parser_threshold
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
code = blob.as_string()
language = self.language or (
LANGUAGE_EXTENSIONS.get(blob.source.rsplit(".", 1)[-1])
if isinstance(blob.source, str)
else None
)
if language is None:
yield Document(
page_content=code,
metadata={
"source": blob.source,
},
)
return
if self.parser_threshold >= len(code.splitlines()):
yield Document(
page_content=code,
metadata={
"source": blob.source,
"language": language,
},
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
|
9ac418e3a027-2
|
"source": blob.source,
"language": language,
},
)
return
self.Segmenter = LANGUAGE_SEGMENTERS[language]
segmenter = self.Segmenter(blob.as_string())
if not segmenter.is_valid():
yield Document(
page_content=code,
metadata={
"source": blob.source,
},
)
return
for functions_classes in segmenter.extract_functions_classes():
yield Document(
page_content=functions_classes,
metadata={
"source": blob.source,
"content_type": "functions_classes",
"language": language,
},
)
yield Document(
page_content=segmenter.simplify_code(),
metadata={
"source": blob.source,
"content_type": "simplified_code",
"language": language,
},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
|
586f10c5273d-0
|
Source code for langchain.document_loaders.parsers.language.python
import ast
from typing import Any, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
[docs]class PythonSegmenter(CodeSegmenter):
"""The code segmenter for Python."""
[docs] def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
[docs] def is_valid(self) -> bool:
try:
ast.parse(self.code)
return True
except SyntaxError:
return False
def _extract_code(self, node: Any) -> str:
start = node.lineno - 1
end = node.end_lineno
return "\n".join(self.source_lines[start:end])
[docs] def extract_functions_classes(self) -> List[str]:
tree = ast.parse(self.code)
functions_classes = []
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
functions_classes.append(self._extract_code(node))
return functions_classes
[docs] def simplify_code(self) -> str:
tree = ast.parse(self.code)
simplified_lines = self.source_lines[:]
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
start = node.lineno - 1
simplified_lines[start] = f"# Code for: {simplified_lines[start]}"
assert isinstance(node.end_lineno, int)
for line_num in range(start + 1, node.end_lineno):
simplified_lines[line_num] = None # type: ignore
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/python.html
|
586f10c5273d-1
|
simplified_lines[line_num] = None # type: ignore
return "\n".join(line for line in simplified_lines if line is not None)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/python.html
|
1466f91bfb91-0
|
Source code for langchain.document_loaders.parsers.language.code_segmenter
from abc import ABC, abstractmethod
from typing import List
[docs]class CodeSegmenter(ABC):
"""The abstract class for the code segmenter."""
[docs] def __init__(self, code: str):
self.code = code
[docs] def is_valid(self) -> bool:
return True
[docs] @abstractmethod
def simplify_code(self) -> str:
raise NotImplementedError() # pragma: no cover
[docs] @abstractmethod
def extract_functions_classes(self) -> List[str]:
raise NotImplementedError() # pragma: no cover
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/code_segmenter.html
|
5d0c53f7f09d-0
|
Source code for langchain.document_loaders.parsers.html.bs4
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Any, Dict, Iterator, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
[docs]class BS4HTMLParser(BaseBlobParser):
"""Parser that uses beautiful soup to parse HTML files."""
[docs] def __init__(
self,
*,
features: str = "lxml",
get_text_separator: str = "",
**kwargs: Any,
) -> None:
"""Initialize a bs4 based HTML parser."""
try:
import bs4 # noqa:F401
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.bs_kwargs = {"features": features, **kwargs}
self.get_text_separator = get_text_separator
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Load HTML document into document objects."""
from bs4 import BeautifulSoup
with blob.as_bytes_io() as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": blob.source,
"title": title,
}
yield Document(page_content=text, metadata=metadata)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/html/bs4.html
|
1a31b8ea94ee-0
|
Source code for langchain._api.deprecation
"""Helper functions for deprecating parts of the LangChain API.
This module was adapted from matplotlibs _api/deprecation.py module:
https://github.com/matplotlib/matplotlib/blob/main/lib/matplotlib/_api/deprecation.py
.. warning::
This module is for internal use only. Do not use it in your own code.
We may change the API at any time with no warning.
"""
import contextlib
import functools
import inspect
import warnings
from typing import Any, Callable, Generator, Type, TypeVar
[docs]class LangChainDeprecationWarning(DeprecationWarning):
"""A class for issuing deprecation warnings for LangChain users."""
def _warn_deprecated(
since: str,
*,
message: str = "",
name: str = "",
alternative: str = "",
pending: bool = False,
obj_type: str = "",
addendum: str = "",
removal: str = "",
) -> None:
"""Display a standardized deprecation.
Arguments:
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The %(since)s,
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
and %(removal)s format specifiers will be replaced by the
values of the respective arguments passed to this function.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative API that the user may use in place of the
deprecated API. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
|
https://api.python.langchain.com/en/latest/_modules/langchain/_api/deprecation.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.