id
stringlengths 14
15
| text
stringlengths 35
2.51k
| source
stringlengths 61
154
|
|---|---|---|
00b928ef36c7-0
|
Source code for langchain.output_parsers.rail_parser
from __future__ import annotations
from typing import Any, Callable, Dict, Optional
from langchain.schema import BaseOutputParser
[docs]class GuardrailsOutputParser(BaseOutputParser):
guard: Any
api: Optional[Callable]
args: Any
kwargs: Any
@property
def _type(self) -> str:
return "guardrails"
[docs] @classmethod
def from_rail(
cls,
rail_file: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_rail(rail_file, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
[docs] @classmethod
def from_rail_string(
cls,
rail_str: str,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
|
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
|
00b928ef36c7-1
|
)
return cls(
guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
[docs] @classmethod
def from_pydantic(
cls,
output_class: Any,
num_reasks: int = 1,
api: Optional[Callable] = None,
*args: Any,
**kwargs: Any,
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(
guard=Guard.from_pydantic(output_class, "", num_reasks=num_reasks),
api=api,
args=args,
kwargs=kwargs,
)
[docs] def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
[docs] def parse(self, text: str) -> Dict:
return self.guard.parse(text, llm_api=self.api, *self.args, **self.kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/output_parsers/rail_parser.html
|
1c7528234fbc-0
|
Source code for langchain.document_loaders.blockchain
import os
import re
import time
from enum import Enum
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class BlockchainType(Enum):
"""Enumerator of the supported blockchains."""
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
[docs]class BlockchainDocumentLoader(BaseLoader):
"""Loads elements from a blockchain smart contract into Langchain documents.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
The API returns 100 NFTs per request and can be paginated using the
startToken parameter.
If get_all_tokens is set to True, the loader will get all tokens
on the contract. Note that for contracts with a large number of tokens,
this may take a long time (e.g. 10k tokens is 100 requests).
Default value is false for this reason.
The max_execution_time (sec) can be set to limit the execution time
of the loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
- Support additional blockain APIs (e.g. Infura, Opensea, etc.)
"""
def __init__(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
1c7528234fbc-1
|
"""
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: str = "",
get_all_tokens: bool = False,
max_execution_time: Optional[int] = None,
):
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
[docs] def load(self) -> List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={current_start_token}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f"Request failed with status code {response.status_code}"
)
items = response.json()["nfts"]
if not items:
break
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
1c7528234fbc-2
|
tokenId = item["id"]["tokenId"]
metadata = {
"source": self.contract_address,
"blockchain": self.blockchainType,
"tokenId": tokenId,
}
result.append(Document(page_content=content, metadata=metadata))
# exit after the first API call if get_all_tokens is False
if not self.get_all_tokens:
break
# get the start token for the next API call from the last item in array
current_start_token = self._get_next_tokenId(result[-1].metadata["tokenId"])
if (
self.max_execution_time is not None
and (time.time() - start_time) > self.max_execution_time
):
raise RuntimeError("Execution time exceeded the allowed time limit.")
if not result:
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
return result
# add one to the tokenId, ensuring the correct tokenId format is used
def _get_next_tokenId(self, tokenId: str) -> str:
value_type = self._detect_value_type(tokenId)
if value_type == "hex_0x":
value_int = int(tokenId, 16)
elif value_type == "hex_0xbf":
value_int = int(tokenId[2:], 16)
else:
value_int = int(tokenId)
result = value_int + 1
if value_type == "hex_0x":
return "0x" + format(result, "0" + str(len(tokenId) - 2) + "x")
elif value_type == "hex_0xbf":
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
1c7528234fbc-3
|
elif value_type == "hex_0xbf":
return "0xbf" + format(result, "0" + str(len(tokenId) - 4) + "x")
else:
return str(result)
# A smart contract can use different formats for the tokenId
@staticmethod
def _detect_value_type(tokenId: str) -> str:
if isinstance(tokenId, int):
return "int"
elif tokenId.startswith("0x"):
return "hex_0x"
elif tokenId.startswith("0xbf"):
return "hex_0xbf"
else:
return "hex_0xbf"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html
|
27ada31af51a-0
|
Source code for langchain.document_loaders.airtable
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class AirtableLoader(BaseLoader):
"""Loader for Airtable tables."""
def __init__(self, api_token: str, table_id: str, base_id: str):
"""Initialize with API token and the IDs for table and base"""
self.api_token = api_token
self.table_id = table_id
self.base_id = base_id
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from table."""
from pyairtable import Table
table = Table(self.api_token, self.base_id, self.table_id)
records = table.all()
for record in records:
# Need to convert record from dict to str
yield Document(
page_content=str(record),
metadata={
"source": self.base_id + "_" + self.table_id,
"base_id": self.base_id,
"table_id": self.table_id,
},
)
[docs] def load(self) -> List[Document]:
"""Load Table."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airtable.html
|
7b23cafd55cc-0
|
Source code for langchain.document_loaders.sitemap
"""Loader that fetches a sitemap and loads those URLs."""
import itertools
import re
from typing import Any, Callable, Generator, Iterable, List, Optional
from langchain.document_loaders.web_base import WebBaseLoader
from langchain.schema import Document
def _default_parsing_function(content: Any) -> str:
return str(content.get_text())
def _default_meta_function(meta: dict, _content: Any) -> dict:
return {"source": meta["loc"], **meta}
def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]:
it = iter(iterable)
while item := list(itertools.islice(it, size)):
yield item
[docs]class SitemapLoader(WebBaseLoader):
"""Loader that fetches a sitemap and loads those URLs."""
def __init__(
self,
web_path: str,
filter_urls: Optional[List[str]] = None,
parsing_function: Optional[Callable] = None,
blocksize: Optional[int] = None,
blocknum: int = 0,
meta_function: Optional[Callable] = None,
is_local: bool = False,
):
"""Initialize with webpage path and optional filter URLs.
Args:
web_path: url of the sitemap. can also be a local path
filter_urls: list of strings or regexes that will be applied to filter the
urls that are parsed and loaded
parsing_function: Function to parse bs4.Soup output
blocksize: number of sitemap locations per block
blocknum: the number of the block that should be loaded - zero indexed
meta_function: Function to parse bs4.Soup output for metadata
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
|
7b23cafd55cc-1
|
meta_function: Function to parse bs4.Soup output for metadata
remember when setting this method to also copy metadata["loc"]
to metadata["source"] if you are using this field
is_local: whether the sitemap is a local file
"""
if blocksize is not None and blocksize < 1:
raise ValueError("Sitemap blocksize should be at least 1")
if blocknum < 0:
raise ValueError("Sitemap blocknum can not be lower then 0")
try:
import lxml # noqa:F401
except ImportError:
raise ImportError(
"lxml package not found, please install it with " "`pip install lxml`"
)
super().__init__(web_path)
self.filter_urls = filter_urls
self.parsing_function = parsing_function or _default_parsing_function
self.meta_function = meta_function or _default_meta_function
self.blocksize = blocksize
self.blocknum = blocknum
self.is_local = is_local
[docs] def parse_sitemap(self, soup: Any) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts."""
els = []
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
continue
# Strip leading and trailing whitespace and newlines
loc_text = loc.text.strip()
if self.filter_urls and not any(
re.match(r, loc_text) for r in self.filter_urls
):
continue
els.append(
{
tag: prop.text
for tag in ["loc", "lastmod", "changefreq", "priority"]
if (prop := url.find(tag))
}
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
|
7b23cafd55cc-2
|
if (prop := url.find(tag))
}
)
for sitemap in soup.find_all("sitemap"):
loc = sitemap.find("loc")
if not loc:
continue
soup_child = self.scrape_all([loc.text], "xml")[0]
els.extend(self.parse_sitemap(soup_child))
return els
[docs] def load(self) -> List[Document]:
"""Load sitemap."""
if self.is_local:
try:
import bs4
except ImportError:
raise ImportError(
"beautifulsoup4 package not found, please install it"
" with `pip install beautifulsoup4`"
)
fp = open(self.web_path)
soup = bs4.BeautifulSoup(fp, "xml")
else:
soup = self.scrape("xml")
els = self.parse_sitemap(soup)
if self.blocksize is not None:
elblocks = list(_batch_block(els, self.blocksize))
blockcount = len(elblocks)
if blockcount - 1 < self.blocknum:
raise ValueError(
"Selected sitemap does not contain enough blocks for given blocknum"
)
else:
els = elblocks[self.blocknum]
results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el])
return [
Document(
page_content=self.parsing_function(results[i]),
metadata=self.meta_function(els[i], results[i]),
)
for i in range(len(results))
]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
|
bb6b466b38c2-0
|
Source code for langchain.document_loaders.notiondb
"""Notion DB loader for langchain"""
from typing import Any, Dict, List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
[docs]class NotionDBLoader(BaseLoader):
"""Notion DB Loader.
Reads content from pages within a Noton Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
request_timeout_sec (int): Timeout for Notion requests in seconds.
"""
def __init__(
self,
integration_token: str,
database_id: str,
request_timeout_sec: Optional[int] = 10,
) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
self.request_timeout_sec = request_timeout_sec
[docs] def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
|
bb6b466b38c2-1
|
Returns:
List[Document]: List of documents.
"""
page_summaries = self._retrieve_page_summaries()
return list(self.load_page(page_summary) for page_summary in page_summaries)
def _retrieve_page_summaries(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[Dict[str, Any]]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
return pages
[docs] def load_page(self, page_summary: Dict[str, Any]) -> Document:
"""Read a page."""
page_id = page_summary["id"]
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in page_summary["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
|
bb6b466b38c2-2
|
if prop_data["multi_select"]
else []
)
elif prop_type == "url":
value = prop_data["url"]
else:
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {}
) -> Any:
res = requests.request(
method,
url,
headers=self.headers,
json=query_dict,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
|
bb6b466b38c2-3
|
method,
url,
headers=self.headers,
json=query_dict,
timeout=self.request_timeout_sec,
)
res.raise_for_status()
return res.json()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
|
e7da210922b2-0
|
Source code for langchain.document_loaders.larksuite
"""Loader that loads LarkSuite (FeiShu) document json dump."""
import json
import urllib.request
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class LarkSuiteDocLoader(BaseLoader):
"""Loader that loads LarkSuite (FeiShu) document."""
def __init__(self, domain: str, access_token: str, document_id: str):
"""Initialize with domain, access_token (tenant / user), and document_id."""
self.domain = domain
self.access_token = access_token
self.document_id = document_id
def _get_larksuite_api_json_data(self, api_url: str) -> Any:
"""Get LarkSuite (FeiShu) API response json data."""
headers = {"Authorization": f"Bearer {self.access_token}"}
request = urllib.request.Request(api_url, headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load LarkSuite (FeiShu) document."""
api_url_prefix = f"{self.domain}/open-apis/docx/v1/documents"
metadata_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}"
)
raw_content_json = self._get_larksuite_api_json_data(
f"{api_url_prefix}/{self.document_id}/raw_content"
)
text = raw_content_json["data"]["content"]
metadata = {
"document_id": self.document_id,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/larksuite.html
|
e7da210922b2-1
|
metadata = {
"document_id": self.document_id,
"revision_id": metadata_json["data"]["document"]["revision_id"],
"title": metadata_json["data"]["document"]["title"],
}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load LarkSuite (FeiShu) document."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/larksuite.html
|
5f2eb59bc49c-0
|
Source code for langchain.document_loaders.trello
"""Loader that loads cards from Trello"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env
if TYPE_CHECKING:
from trello import Board, Card, TrelloClient
[docs]class TrelloLoader(BaseLoader):
"""Trello loader. Reads all cards from a Trello board."""
def __init__(
self,
client: TrelloClient,
board_name: str,
*,
include_card_name: bool = True,
include_comments: bool = True,
include_checklist: bool = True,
card_filter: Literal["closed", "open", "all"] = "all",
extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"),
):
"""Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
self.client = client
self.board_name = board_name
self.include_card_name = include_card_name
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
5f2eb59bc49c-1
|
self.board_name = board_name
self.include_card_name = include_card_name
self.include_comments = include_comments
self.include_checklist = include_checklist
self.extra_metadata = extra_metadata
self.card_filter = card_filter
[docs] @classmethod
def from_credentials(
cls,
board_name: str,
*,
api_key: Optional[str] = None,
token: Optional[str] = None,
**kwargs: Any,
) -> TrelloLoader:
"""Convenience constructor that builds TrelloClient init param for you.
Args:
board_name: The name of the Trello board.
api_key: Trello API key. Can also be specified as environment variable
TRELLO_API_KEY.
token: Trello token. Can also be specified as environment variable
TRELLO_TOKEN.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
try:
from trello import TrelloClient # type: ignore
except ImportError as ex:
raise ImportError(
"Could not import trello python package. "
"Please install it with `pip install py-trello`."
) from ex
api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
5f2eb59bc49c-2
|
token = token or get_from_env("token", "TRELLO_TOKEN")
client = TrelloClient(api_key=api_key, token=token)
return cls(client, board_name, **kwargs)
[docs] def load(self) -> List[Document]:
"""Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
"""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError as ex:
raise ImportError(
"`beautifulsoup4` package not found, please run"
" `pip install beautifulsoup4`"
) from ex
board = self._get_board()
# Create a dictionary with the list IDs as keys and the list names as values
list_dict = {list_item.id: list_item.name for list_item in board.list_lists()}
# Get Cards on the board
cards = board.get_cards(card_filter=self.card_filter)
return [self._card_to_doc(card, list_dict) for card in cards]
def _get_board(self) -> Board:
# Find the first board with a matching name
board = next(
(b for b in self.client.list_boards() if b.name == self.board_name), None
)
if not board:
raise ValueError(f"Board `{self.board_name}` not found.")
return board
def _card_to_doc(self, card: Card, list_dict: dict) -> Document:
from bs4 import BeautifulSoup # type: ignore
text_content = ""
if self.include_card_name:
text_content = card.name + "\n"
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
5f2eb59bc49c-3
|
if self.include_card_name:
text_content = card.name + "\n"
if card.description.strip():
text_content += BeautifulSoup(card.description, "lxml").get_text()
if self.include_checklist:
# Get all the checklist items on the card
for checklist in card.checklists:
if checklist.items:
items = [
f"{item['name']}:{item['state']}" for item in checklist.items
]
text_content += f"\n{checklist.name}\n" + "\n".join(items)
if self.include_comments:
# Get all the comments on the card
comments = [
BeautifulSoup(comment["data"]["text"], "lxml").get_text()
for comment in card.comments
]
text_content += "Comments:" + "\n".join(comments)
# Default metadata fields
metadata = {
"title": card.name,
"id": card.id,
"url": card.url,
}
# Extra metadata fields. Card object is not subscriptable.
if "labels" in self.extra_metadata:
metadata["labels"] = [label.name for label in card.labels]
if "list" in self.extra_metadata:
if card.list_id in list_dict:
metadata["list"] = list_dict[card.list_id]
if "closed" in self.extra_metadata:
metadata["closed"] = card.closed
if "due_date" in self.extra_metadata:
metadata["due_date"] = card.due_date
return Document(page_content=text_content, metadata=metadata)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
|
aba0121c9013-0
|
Source code for langchain.document_loaders.gcs_directory
"""Loading logic for loading documents from an GCS directory."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.gcs_file import GCSFileLoader
[docs]class GCSDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from GCS."""
def __init__(self, project_name: str, bucket: str, prefix: str = ""):
"""Initialize with bucket and key name."""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ValueError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(project=self.project_name)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(self.project_name, self.bucket, blob.name)
docs.extend(loader.load())
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html
|
36e7751bc8e6-0
|
Source code for langchain.document_loaders.acreom
"""Loader that loads acreom vault from a directory."""
import re
from pathlib import Path
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class AcreomLoader(BaseLoader):
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL)
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize with path."""
self.file_path = path
self.encoding = encoding
self.collect_metadata = collect_metadata
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split("\n")
for line in lines:
if ":" in line:
key, value = line.split(":", 1)
front_matter[key.strip()] = value.strip()
else:
# Skip lines without a colon
continue
return front_matter
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def _process_acreom_content(self, content: str) -> str:
# remove acreom specific elements from content that
# do not contribute to the context of current document
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/acreom.html
|
36e7751bc8e6-1
|
# do not contribute to the context of current document
content = re.sub("\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks
content = re.sub("#", "", content) # rm hashtags
content = re.sub("\[\[.*?\]\]", "", content) # rm doclinks
return content
[docs] def lazy_load(self) -> Iterator[Document]:
ps = list(Path(self.file_path).glob("**/*.md"))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {
"source": str(p.name),
"path": str(p),
**front_matter,
}
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/acreom.html
|
f0d5cd018459-0
|
Source code for langchain.document_loaders.embaas
import base64
import warnings
from typing import Any, Dict, Iterator, List, Optional
import requests
from pydantic import BaseModel, root_validator, validator
from typing_extensions import NotRequired, TypedDict
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
from langchain.document_loaders.blob_loaders import Blob
from langchain.text_splitter import TextSplitter
from langchain.utils import get_from_dict_or_env
EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/"
[docs]class EmbaasDocumentExtractionParameters(TypedDict):
"""Parameters for the embaas document extraction API."""
mime_type: NotRequired[str]
"""The mime type of the document."""
file_extension: NotRequired[str]
"""The file extension of the document."""
file_name: NotRequired[str]
"""The file name of the document."""
should_chunk: NotRequired[bool]
"""Whether to chunk the document into pages."""
chunk_size: NotRequired[int]
"""The maximum size of the text chunks."""
chunk_overlap: NotRequired[int]
"""The maximum overlap allowed between chunks."""
chunk_splitter: NotRequired[str]
"""The text splitter class name for creating chunks."""
separators: NotRequired[List[str]]
"""The separators for chunks."""
should_embed: NotRequired[bool]
"""Whether to create embeddings for the document in the response."""
model: NotRequired[str]
"""The model to pass to the Embaas document extraction API."""
instruction: NotRequired[str]
"""The instruction to pass to the Embaas document extraction API."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
f0d5cd018459-1
|
"""The instruction to pass to the Embaas document extraction API."""
[docs]class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters):
"""Payload for the Embaas document extraction API."""
bytes: str
"""The base64 encoded bytes of the document to extract text from."""
[docs]class BaseEmbaasLoader(BaseModel):
embaas_api_key: Optional[str] = None
api_url: str = EMBAAS_DOC_API_URL
"""The URL of the embaas document extraction API."""
params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters()
"""Additional parameters to pass to the embaas document extraction API."""
[docs] @root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = get_from_dict_or_env(
values, "embaas_api_key", "EMBAAS_API_KEY"
)
values["embaas_api_key"] = embaas_api_key
return values
[docs]class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser):
"""Wrapper around embaas's document byte loader service.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Default parsing
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader()
blob = Blob.from_path(path="example.mp3")
documents = loader.parse(blob=blob)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
f0d5cd018459-2
|
documents = loader.parse(blob=blob)
# Custom api parameters (create embeddings automatically)
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader(
params={
"should_embed": True,
"model": "e5-large-v2",
"chunk_size": 256,
"chunk_splitter": "CharacterTextSplitter"
}
)
blob = Blob.from_path(path="example.pdf")
documents = loader.parse(blob=blob)
"""
[docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]:
yield from self._get_documents(blob=blob)
@staticmethod
def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]:
"""Convert the API response to a list of documents."""
docs = []
for chunk in chunks:
metadata = chunk["metadata"]
if chunk.get("embedding", None) is not None:
metadata["embedding"] = chunk["embedding"]
doc = Document(page_content=chunk["text"], metadata=metadata)
docs.append(doc)
return docs
def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload:
"""Generates payload for the API request."""
base64_byte_str = base64.b64encode(blob.as_bytes()).decode()
payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload(
bytes=base64_byte_str,
# Workaround for mypy issue: https://github.com/python/mypy/issues/9408
# type: ignore
**self.params,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
f0d5cd018459-3
|
# type: ignore
**self.params,
)
if blob.mimetype is not None and payload.get("mime_type", None) is None:
payload["mime_type"] = blob.mimetype
return payload
def _handle_request(
self, payload: EmbaasDocumentExtractionPayload
) -> List[Document]:
"""Sends a request to the embaas API and handles the response."""
headers = {
"Authorization": f"Bearer {self.embaas_api_key}",
"Content-Type": "application/json",
}
response = requests.post(self.api_url, headers=headers, json=payload)
response.raise_for_status()
parsed_response = response.json()
return EmbaasBlobLoader._api_response_to_documents(
chunks=parsed_response["data"]["chunks"]
)
def _get_documents(self, blob: Blob) -> Iterator[Document]:
"""Get the documents from the blob."""
payload = self._generate_payload(blob=blob)
try:
documents = self._handle_request(payload=payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(
f"Error raised by embaas document text extraction API: {e}"
)
parsed_response = e.response.json()
if "message" in parsed_response:
raise ValueError(
f"Validation Error raised by embaas document text extraction API:"
f" {parsed_response['message']}"
)
raise
yield from documents
[docs]class EmbaasLoader(BaseEmbaasLoader, BaseLoader):
"""Wrapper around embaas's document loader service.
To use, you should have the
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
f0d5cd018459-4
|
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# Default parsing
from langchain.document_loaders.embaas import EmbaasLoader
loader = EmbaasLoader(file_path="example.mp3")
documents = loader.load()
# Custom api parameters (create embeddings automatically)
from langchain.document_loaders.embaas import EmbaasBlobLoader
loader = EmbaasBlobLoader(
file_path="example.pdf",
params={
"should_embed": True,
"model": "e5-large-v2",
"chunk_size": 256,
"chunk_splitter": "CharacterTextSplitter"
}
)
documents = loader.load()
"""
file_path: str
"""The path to the file to load."""
blob_loader: Optional[EmbaasBlobLoader]
"""The blob loader to use. If not provided, a default one will be created."""
[docs] @validator("blob_loader", always=True)
def validate_blob_loader(
cls, v: EmbaasBlobLoader, values: Dict
) -> EmbaasBlobLoader:
return v or EmbaasBlobLoader(
embaas_api_key=values["embaas_api_key"],
api_url=values["api_url"],
params=values["params"],
)
[docs] def lazy_load(self) -> Iterator[Document]:
"""Load the documents from the file path lazily."""
blob = Blob.from_path(path=self.file_path)
assert self.blob_loader is not None
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
f0d5cd018459-5
|
assert self.blob_loader is not None
# Should never be None, but mypy doesn't know that.
yield from self.blob_loader.lazy_parse(blob=blob)
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
[docs] def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> List[Document]:
if self.params.get("should_embed", False):
warnings.warn(
"Embeddings are not supported with load_and_split."
" Use the API splitter to properly generate embeddings."
" For more information see embaas.io docs."
)
return super().load_and_split(text_splitter=text_splitter)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
|
e5429edf82f2-0
|
Source code for langchain.document_loaders.college_confidential
"""Loader that loads College Confidential."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class CollegeConfidentialLoader(WebBaseLoader):
"""Loader that loads College Confidential webpages."""
[docs] def load(self) -> List[Document]:
"""Load webpage."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/college_confidential.html
|
05ec8c196b0c-0
|
Source code for langchain.document_loaders.googledrive
"""Loader that loads data from Google Drive."""
# Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
[docs]class GoogleDriveLoader(BaseLoader, BaseModel):
"""Loader that loads Google Docs from Google Drive."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
folder_id: Optional[str] = None
document_ids: Optional[List[str]] = None
file_ids: Optional[List[str]] = None
recursive: bool = False
file_types: Optional[Sequence[str]] = None
load_trashed_files: bool = False
# NOTE(MthwRobinson) - changing the file_loader_cls to type here currently
# results in pydantic validation errors
file_loader_cls: Any = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-1
|
# results in pydantic validation errors
file_loader_cls: Any = None
file_loader_kwargs: Dict["str", Any] = {}
[docs] @root_validator
def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
file_types = values.get("file_types")
if file_types:
if values.get("document_ids") or values.get("file_ids"):
raise ValueError(
"file_types can only be given when folder_id is given,"
" (not when document_ids or file_ids are given)."
)
type_mapping = {
"document": "application/vnd.google-apps.document",
"sheet": "application/vnd.google-apps.spreadsheet",
"pdf": "application/pdf",
}
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()])
full_names = ", ".join([f"'{x}'" for x in type_mapping.values()])
for file_type in file_types:
if file_type not in allowed_types:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-2
|
if file_type not in allowed_types:
raise ValueError(
f"Given file type {file_type} is not supported. "
f"Supported values are: {short_names}; and "
f"their full-form names: {full_names}"
)
# replace short-form file types by full-form file types
def full_form(x: str) -> str:
return type_mapping[x] if x in type_mapping else x
values["file_types"] = [full_form(file_type) for file_type in file_types]
return values
[docs] @validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth import default
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run "
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib` "
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-3
|
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
creds, project = default()
creds = creds.with_scopes(SCOPES)
# no need to write to file
if creds:
return creds
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-4
|
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-5
|
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(
self, folder_id: str, *, file_types: Optional[Sequence[str]] = None
) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
# If file types filter is provided, we'll filter by the file type.
if file_types:
_files = [f for f in files if f["mimeType"] in file_types] # type: ignore
else:
_files = files
returns = []
for file in _files:
if file["trashed"] and not self.load_trashed_files:
continue
elif file["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(file["id"])) # type: ignore
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore
elif (
file["mimeType"] == "application/pdf"
or self.file_loader_cls is not None
):
returns.extend(self._load_file_from_id(file["id"])) # type: ignore
else:
pass
return returns
def _fetch_files_recursive(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-6
|
else:
pass
return returns
def _fetch_files_recursive(
self, service: Any, folder_id: str
) -> List[Dict[str, Union[str, List[str]]]]:
"""Fetch all files and subfolders recursively."""
results = (
service.files()
.list(
q=f"'{folder_id}' in parents",
pageSize=1000,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields="nextPageToken, files(id, name, mimeType, parents, trashed)",
)
.execute()
)
files = results.get("files", [])
returns = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
if self.recursive:
returns.extend(self._fetch_files_recursive(service, file["id"]))
else:
returns.append(file)
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-7
|
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if self.file_loader_cls is not None:
fh.seek(0)
loader = self.file_loader_cls(file=fh, **self.file_loader_kwargs)
docs = loader.load()
for doc in docs:
doc.metadata["source"] = f"https://drive.google.com/file/d/{id}/view"
return docs
else:
from PyPDF2 import PdfReader
content = fh.getvalue()
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
return docs
[docs] def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder(
self.folder_id, file_types=self.file_types
)
elif self.document_ids:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
05ec8c196b0c-8
|
)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html
|
568944512e6f-0
|
Source code for langchain.document_loaders.stripe
"""Loader that fetches data from Stripe"""
import json
import urllib.request
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_dict
STRIPE_ENDPOINTS = {
"balance_transactions": "https://api.stripe.com/v1/balance_transactions",
"charges": "https://api.stripe.com/v1/charges",
"customers": "https://api.stripe.com/v1/customers",
"events": "https://api.stripe.com/v1/events",
"refunds": "https://api.stripe.com/v1/refunds",
"disputes": "https://api.stripe.com/v1/disputes",
}
[docs]class StripeLoader(BaseLoader):
"""Loader that fetches data from Stripe."""
def __init__(self, resource: str, access_token: Optional[str] = None) -> None:
self.resource = resource
access_token = access_token or get_from_env(
"access_token", "STRIPE_ACCESS_TOKEN"
)
self.headers = {"Authorization": f"Bearer {access_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = STRIPE_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
|
568944512e6f-1
|
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
return self._get_resource()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
|
9ca4d2d127b8-0
|
Source code for langchain.document_loaders.fauna
from typing import Iterator, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class FaunaLoader(BaseLoader):
"""FaunaDB Loader.
Attributes:
query (str): The FQL query string to execute.
page_content_field (str): The field that contains the content of each page.
secret (str): The secret key for authenticating to FaunaDB.
metadata_fields (Optional[Sequence[str]]):
Optional list of field names to include in metadata.
"""
def __init__(
self,
query: str,
page_content_field: str,
secret: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
try:
from fauna import Page, fql
from fauna.client import Client
from fauna.encoding import QuerySuccess
except ImportError:
raise ImportError(
"Could not import fauna python package. "
"Please install it with `pip install fauna`."
)
# Create Fauna Client
client = Client(secret=self.secret)
# Run FQL Query
response: QuerySuccess = client.query(fql(self.query))
page: Page = response.data
for result in page:
if result is not None:
document_dict = dict(result.items())
page_content = ""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/fauna.html
|
9ca4d2d127b8-1
|
document_dict = dict(result.items())
page_content = ""
for key, value in document_dict.items():
if key == self.page_content_field:
page_content = value
document: Document = Document(
page_content=page_content,
metadata={"id": result.id, "ts": result.ts},
)
yield document
if page.after is not None:
yield Document(
page_content="Next Page Exists",
metadata={"after": page.after},
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/fauna.html
|
a4d0ecf1935d-0
|
Source code for langchain.document_loaders.duckdb_loader
from typing import Dict, List, Optional, cast
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DuckDBLoader(BaseLoader):
"""Loads a query result from DuckDB into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
database: str = ":memory:",
read_only: bool = False,
config: Optional[Dict[str, str]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
[docs] def load(self) -> List[Document]:
try:
import duckdb
except ImportError:
raise ImportError(
"Could not import duckdb python package. "
"Please install it with `pip install duckdb`."
)
docs = []
with duckdb.connect(
database=self.database, read_only=self.read_only, config=self.config
) as con:
query_result = con.execute(self.query)
results = query_result.fetchall()
description = cast(list, query_result.description)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html
|
a4d0ecf1935d-1
|
results = query_result.fetchall()
description = cast(list, query_result.description)
field_names = [c[0] for c in description]
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for result in results:
page_content = "\n".join(
f"{column}: {result[field_names.index(column)]}"
for column in page_content_columns
)
metadata = {
column: result[field_names.index(column)]
for column in metadata_columns
}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html
|
60aaa137ac4b-0
|
Source code for langchain.document_loaders.gutenberg
"""Loader that loads .txt web files."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class GutenbergLoader(BaseLoader):
"""Loader that uses urllib to load .txt web files."""
def __init__(self, file_path: str):
"""Initialize with file path."""
if not file_path.startswith("https://www.gutenberg.org"):
raise ValueError("file path must start with 'https://www.gutenberg.org'")
if not file_path.endswith(".txt"):
raise ValueError("file path must end with '.txt'")
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load file."""
from urllib.request import urlopen
elements = urlopen(self.file_path)
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gutenberg.html
|
1227e3d833ee-0
|
Source code for langchain.document_loaders.youtube
"""Loader that loads YouTube transcript."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import parse_qs, urlparse
from pydantic import root_validator
from pydantic.dataclasses import dataclass
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
@dataclass
class GoogleApiClient:
"""A Generic Google Api Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-1
|
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-2
|
token.write(creds.to_json())
return creds
ALLOWED_SCHEMAS = {"http", "https"}
ALLOWED_NETLOCK = {
"youtu.be",
"m.youtube.com",
"youtube.com",
"www.youtube.com",
"www.youtube-nocookie.com",
"vid.plus",
}
def _parse_video_id(url: str) -> Optional[str]:
"""Parse a youtube url and return the video id if valid, otherwise None."""
parsed_url = urlparse(url)
if parsed_url.scheme not in ALLOWED_SCHEMAS:
return None
if parsed_url.netloc not in ALLOWED_NETLOCK:
return None
path = parsed_url.path
if path.endswith("/watch"):
query = parsed_url.query
parsed_query = parse_qs(query)
if "v" in parsed_query:
ids = parsed_query["v"]
video_id = ids if isinstance(ids, str) else ids[0]
else:
return None
else:
path = parsed_url.path.lstrip("/")
video_id = path.split("/")[-1]
if len(video_id) != 11: # Video IDs are 11 characters long
return None
return video_id
[docs]class YoutubeLoader(BaseLoader):
"""Loader that loads Youtube transcripts."""
def __init__(
self,
video_id: str,
add_video_info: bool = False,
language: Union[str, Sequence[str]] = "en",
translation: str = "en",
continue_on_failure: bool = False,
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-3
|
self.add_video_info = add_video_info
self.language = language
if isinstance(language, str):
self.language = [language]
else:
self.language = language
self.translation = translation
self.continue_on_failure = continue_on_failure
[docs] @staticmethod
def extract_video_id(youtube_url: str) -> str:
"""Extract video id from common YT urls."""
video_id = _parse_video_id(youtube_url)
if not video_id:
raise ValueError(
f"Could not determine the video ID for the URL {youtube_url}"
)
return video_id
[docs] @classmethod
def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given youtube URL, load video."""
video_id = cls.extract_video_id(youtube_url)
return cls(video_id, **kwargs)
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
YouTubeTranscriptApi,
)
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-4
|
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
en_transcript = transcript_list.find_transcript(["en"])
transcript = en_transcript.translate(self.translation)
transcript_pieces = transcript.fetch()
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title or "Unknown",
"description": yt.description or "Unknown",
"view_count": yt.views or 0,
"thumbnail_url": yt.thumbnail_url or "Unknown",
"publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S")
if yt.publish_date
else "Unknown",
"length": yt.length or 0,
"author": yt.author or "Unknown",
}
return video_info
[docs]@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Loader that loads all Videos from a Channel
To use, you should have the ``googleapiclient,youtube_transcript_api``
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-5
|
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
continue_on_failure: bool = False
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib "
"youtube-transcript-api` "
"to use the Google Drive loader"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-6
|
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
[docs] @root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
try:
transcript = transcript_list.find_transcript([self.captions_language])
except NoTranscriptFound:
for available_transcript in transcript_list:
transcript = available_transcript.translate(self.captions_language)
continue
transcript_pieces = transcript.fetch()
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-7
|
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
try:
from youtube_transcript_api import (
NoTranscriptFound,
TranscriptsDisabled,
)
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"youtube-transcript-api` "
"to use the youtube loader"
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
try:
page_content = self._get_transcripe_for_video_id(
item["id"]["videoId"]
)
video_ids.append(
Document(
page_content=page_content,
metadata=meta_data,
)
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
1227e3d833ee-8
|
metadata=meta_data,
)
)
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error(
"Error fetching transscript "
+ f" {item['id']['videoId']}, exception: {e}"
)
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
[docs] def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
|
98c132bf4325-0
|
Source code for langchain.document_loaders.modern_treasury
"""Loader that fetches data from Modern Treasury"""
import json
import urllib.request
from base64 import b64encode
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_value
MODERN_TREASURY_ENDPOINTS = {
"payment_orders": "https://app.moderntreasury.com/api/payment_orders",
"expected_payments": "https://app.moderntreasury.com/api/expected_payments",
"returns": "https://app.moderntreasury.com/api/returns",
"incoming_payment_details": "https://app.moderntreasury.com/api/\
incoming_payment_details",
"counterparties": "https://app.moderntreasury.com/api/counterparties",
"internal_accounts": "https://app.moderntreasury.com/api/internal_accounts",
"external_accounts": "https://app.moderntreasury.com/api/external_accounts",
"transactions": "https://app.moderntreasury.com/api/transactions",
"ledgers": "https://app.moderntreasury.com/api/ledgers",
"ledger_accounts": "https://app.moderntreasury.com/api/ledger_accounts",
"ledger_transactions": "https://app.moderntreasury.com/api/ledger_transactions",
"events": "https://app.moderntreasury.com/api/events",
"invoices": "https://app.moderntreasury.com/api/invoices",
}
[docs]class ModernTreasuryLoader(BaseLoader):
"""Loader that fetches data from Modern Treasury."""
def __init__(
self,
resource: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
|
98c132bf4325-1
|
def __init__(
self,
resource: str,
organization_id: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
self.resource = resource
organization_id = organization_id or get_from_env(
"organization_id", "MODERN_TREASURY_ORGANIZATION_ID"
)
api_key = api_key or get_from_env("api_key", "MODERN_TREASURY_API_KEY")
credentials = f"{organization_id}:{api_key}".encode("utf-8")
basic_auth_token = b64encode(credentials).decode("utf-8")
self.headers = {"Authorization": f"Basic {basic_auth_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_value(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
return self._get_resource()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/modern_treasury.html
|
2385dbb1d587-0
|
Source code for langchain.document_loaders.twitter
"""Twitter document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import tweepy
from tweepy import OAuth2BearerHandler, OAuthHandler
def _dependable_tweepy_import() -> tweepy:
try:
import tweepy
except ImportError:
raise ImportError(
"tweepy package not found, please install it with `pip install tweepy`"
)
return tweepy
[docs]class TwitterTweetLoader(BaseLoader):
"""Twitter tweets loader.
Read tweets of user twitter handle.
First you need to go to
`https://developer.twitter.com/en/docs/twitter-api
/getting-started/getting-access-to-the-twitter-api`
to get your token. And create a v2 version of the app.
"""
def __init__(
self,
auth_handler: Union[OAuthHandler, OAuth2BearerHandler],
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
):
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
[docs] def load(self) -> List[Document]:
"""Load tweets."""
tweepy = _dependable_tweepy_import()
api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser())
results: List[Document] = []
for username in self.twitter_users:
tweets = api.user_timeline(screen_name=username, count=self.number_tweets)
user = api.get_user(screen_name=username)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
2385dbb1d587-1
|
user = api.get_user(screen_name=username)
docs = self._format_tweets(tweets, user)
results.extend(docs)
return results
def _format_tweets(
self, tweets: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {
"created_at": tweet["created_at"],
"user_info": user_info,
}
yield Document(
page_content=tweet["text"],
metadata=metadata,
)
[docs] @classmethod
def from_bearer_token(
cls,
oauth2_bearer_token: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from OAuth2 bearer token."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
[docs] @classmethod
def from_secrets(
cls,
access_token: str,
access_token_secret: str,
consumer_key: str,
consumer_secret: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from access tokens and secrets."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuthHandler(
access_token=access_token,
access_token_secret=access_token_secret,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
2385dbb1d587-2
|
access_token=access_token,
access_token_secret=access_token_secret,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html
|
047f0c43440f-0
|
Source code for langchain.document_loaders.toml
import json
from pathlib import Path
from typing import Iterator, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class TomlLoader(BaseLoader):
"""
A TOML document loader that inherits from the BaseLoader class.
This class can be initialized with either a single source file or a source
directory containing TOML files.
"""
def __init__(self, source: Union[str, Path]):
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
[docs] def load(self) -> List[Document]:
"""Load and return all documents."""
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazily load the TOML documents from the source file or directory."""
import tomli
if self.source.is_file() and self.source.suffix == ".toml":
files = [self.source]
elif self.source.is_dir():
files = list(self.source.glob("**/*.toml"))
else:
raise ValueError("Invalid source path or file type")
for file_path in files:
with file_path.open("r", encoding="utf-8") as file:
content = file.read()
try:
data = tomli.loads(content)
doc = Document(
page_content=json.dumps(data),
metadata={"source": str(file_path)},
)
yield doc
except tomli.TOMLDecodeError as e:
print(f"Error parsing TOML file {file_path}: {e}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
|
ca00ee739fa6-0
|
Source code for langchain.document_loaders.tencent_cos_directory
"""Loading logic for loading documents from Tencent Cloud COS directory."""
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader
[docs]class TencentCOSDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from Tencent Cloud COS."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with COS config, bucket and prefix.
:param conf(CosConfig): COS config.
:param bucket(str): COS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
[docs] def load(self) -> List[Document]:
return list(self.lazy_load())
[docs] def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ValueError(
"Could not import cos-python-sdk-v5 python package. "
"Please install it with `pip install cos-python-sdk-v5`."
)
client = CosS3Client(self.conf)
contents = []
marker = ""
while True:
response = client.list_objects(
Bucket=self.bucket, Prefix=self.prefix, Marker=marker, MaxKeys=1000
)
if "Contents" in response:
contents.extend(response["Contents"])
if response["IsTruncated"] == "false":
break
marker = response["NextMarker"]
for content in contents:
if content["Key"].endswith("/"):
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_directory.html
|
ca00ee739fa6-1
|
for content in contents:
if content["Key"].endswith("/"):
continue
loader = TencentCOSFileLoader(self.conf, self.bucket, content["Key"])
yield loader.load()[0]
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_directory.html
|
2d76eb4740b4-0
|
Source code for langchain.document_loaders.pdf
"""Loader that loads PDF files."""
import json
import logging
import os
import tempfile
import time
from abc import ABC
from io import StringIO
from pathlib import Path
from typing import Any, Iterator, List, Mapping, Optional, Union
from urllib.parse import urlparse
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers.pdf import (
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__file__)
[docs]class UnstructuredPDFLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load PDF files."""
def _get_elements(self) -> List:
from unstructured.partition.pdf import partition_pdf
return partition_pdf(filename=self.file_path, **self.unstructured_kwargs)
[docs]class BasePDFLoader(BaseLoader, ABC):
"""Base loader class for PDF files.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
self.web_path = None
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-1
|
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_dir = tempfile.TemporaryDirectory()
temp_pdf = Path(self.temp_dir.name) / "tmp.pdf"
with open(temp_pdf, mode="wb") as f:
f.write(r.content)
self.file_path = str(temp_pdf)
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_dir"):
self.temp_dir.cleanup()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
@property
def source(self) -> str:
return self.web_path if self.web_path is not None else self.file_path
[docs]class OnlinePDFLoader(BasePDFLoader):
"""Loader that loads online PDFs."""
[docs] def load(self) -> List[Document]:
"""Load documents."""
loader = UnstructuredPDFLoader(str(self.file_path))
return loader.load()
[docs]class PyPDFLoader(BasePDFLoader):
"""Loads a PDF with pypdf and chunks at character level.
Loader also stores page numbers in metadatas.
"""
def __init__(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-2
|
"""
def __init__(
self, file_path: str, password: Optional[Union[str, bytes]] = None
) -> None:
"""Initialize with file path."""
try:
import pypdf # noqa:F401
except ImportError:
raise ImportError(
"pypdf package not found, please install it with " "`pip install pypdf`"
)
self.parser = PyPDFParser(password=password)
super().__init__(file_path)
[docs] def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
[docs]class PyPDFium2Loader(BasePDFLoader):
"""Loads a PDF with pypdfium2 and chunks at character level."""
def __init__(self, file_path: str):
"""Initialize with file path."""
super().__init__(file_path)
self.parser = PyPDFium2Parser()
[docs] def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
[docs]class PyPDFDirectoryLoader(BaseLoader):
"""Loads a directory with PDF files with pypdf and chunks at character level.
Loader also stores page numbers in metadatas.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-3
|
Loader also stores page numbers in metadatas.
"""
def __init__(
self,
path: str,
glob: str = "**/[!.]*.pdf",
silent_errors: bool = False,
load_hidden: bool = False,
recursive: bool = False,
):
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.recursive = recursive
self.silent_errors = silent_errors
@staticmethod
def _is_visible(path: Path) -> bool:
return not any(part.startswith(".") for part in path.parts)
[docs] def load(self) -> List[Document]:
p = Path(self.path)
docs = []
items = p.rglob(self.glob) if self.recursive else p.glob(self.glob)
for i in items:
if i.is_file():
if self._is_visible(i.relative_to(p)) or self.load_hidden:
try:
loader = PyPDFLoader(str(i))
sub_docs = loader.load()
for doc in sub_docs:
doc.metadata["source"] = str(i)
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
return docs
[docs]class PDFMinerLoader(BasePDFLoader):
"""Loader that uses PDFMiner to load PDF files."""
def __init__(self, file_path: str) -> None:
"""Initialize with file path."""
try:
from pdfminer.high_level import extract_text # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-4
|
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path)
self.parser = PDFMinerParser()
[docs] def load(self) -> List[Document]:
"""Eagerly load the content."""
return list(self.lazy_load())
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily lod documents."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
[docs]class PDFMinerPDFasHTMLLoader(BasePDFLoader):
"""Loader that uses PDFMiner to load PDF files as HTML content."""
def __init__(self, file_path: str):
"""Initialize with file path."""
try:
from pdfminer.high_level import extract_text_to_fp # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path)
[docs] def load(self) -> List[Document]:
"""Load file."""
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
from pdfminer.utils import open_filename
output_string = StringIO()
with open_filename(self.file_path, "rb") as fp:
extract_text_to_fp(
fp, # type: ignore[arg-type]
output_string,
codec="",
laparams=LAParams(),
output_type="html",
)
metadata = {"source": self.file_path}
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-5
|
)
metadata = {"source": self.file_path}
return [Document(page_content=output_string.getvalue(), metadata=metadata)]
[docs]class PyMuPDFLoader(BasePDFLoader):
"""Loader that uses PyMuPDF to load PDF files."""
def __init__(self, file_path: str) -> None:
"""Initialize with file path."""
try:
import fitz # noqa:F401
except ImportError:
raise ImportError(
"`PyMuPDF` package not found, please install it with "
"`pip install pymupdf`"
)
super().__init__(file_path)
[docs] def load(self, **kwargs: Optional[Any]) -> List[Document]:
"""Load file."""
parser = PyMuPDFParser(text_kwargs=kwargs)
blob = Blob.from_path(self.file_path)
return parser.parse(blob)
# MathpixPDFLoader implementation taken largely from Daniel Gross's:
# https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21
[docs]class MathpixPDFLoader(BasePDFLoader):
def __init__(
self,
file_path: str,
processed_file_format: str = "mmd",
max_wait_time_seconds: int = 500,
should_clean_pdf: bool = False,
**kwargs: Any,
) -> None:
super().__init__(file_path)
self.mathpix_api_key = get_from_dict_or_env(
kwargs, "mathpix_api_key", "MATHPIX_API_KEY"
)
self.mathpix_api_id = get_from_dict_or_env(
kwargs, "mathpix_api_id", "MATHPIX_API_ID"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-6
|
kwargs, "mathpix_api_id", "MATHPIX_API_ID"
)
self.processed_file_format = processed_file_format
self.max_wait_time_seconds = max_wait_time_seconds
self.should_clean_pdf = should_clean_pdf
@property
def headers(self) -> dict:
return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key}
@property
def url(self) -> str:
return "https://api.mathpix.com/v3/pdf"
@property
def data(self) -> dict:
options = {"conversion_formats": {self.processed_file_format: True}}
return {"options_json": json.dumps(options)}
[docs] def send_pdf(self) -> str:
with open(self.file_path, "rb") as f:
files = {"file": f}
response = requests.post(
self.url, headers=self.headers, files=files, data=self.data
)
response_data = response.json()
if "pdf_id" in response_data:
pdf_id = response_data["pdf_id"]
return pdf_id
else:
raise ValueError("Unable to send PDF to Mathpix.")
[docs] def wait_for_processing(self, pdf_id: str) -> None:
url = self.url + "/" + pdf_id
for _ in range(0, self.max_wait_time_seconds, 5):
response = requests.get(url, headers=self.headers)
response_data = response.json()
status = response_data.get("status", None)
if status == "completed":
return
elif status == "error":
raise ValueError("Unable to retrieve PDF from Mathpix")
else:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-7
|
raise ValueError("Unable to retrieve PDF from Mathpix")
else:
print(f"Status: {status}, waiting for processing to complete")
time.sleep(5)
raise TimeoutError
[docs] def get_processed_pdf(self, pdf_id: str) -> str:
self.wait_for_processing(pdf_id)
url = f"{self.url}/{pdf_id}.{self.processed_file_format}"
response = requests.get(url, headers=self.headers)
return response.content.decode("utf-8")
[docs] def clean_pdf(self, contents: str) -> str:
contents = "\n".join(
[line for line in contents.split("\n") if not line.startswith("![]")]
)
# replace \section{Title} with # Title
contents = contents.replace("\\section{", "# ").replace("}", "")
# replace the "\" slash that Mathpix adds to escape $, %, (, etc.
contents = (
contents.replace(r"\$", "$")
.replace(r"\%", "%")
.replace(r"\(", "(")
.replace(r"\)", ")")
)
return contents
[docs] def load(self) -> List[Document]:
pdf_id = self.send_pdf()
contents = self.get_processed_pdf(pdf_id)
if self.should_clean_pdf:
contents = self.clean_pdf(contents)
metadata = {"source": self.source, "file_path": self.source}
return [Document(page_content=contents, metadata=metadata)]
[docs]class PDFPlumberLoader(BasePDFLoader):
"""Loader that uses pdfplumber to load PDF files."""
def __init__(
self, file_path: str, text_kwargs: Optional[Mapping[str, Any]] = None
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
2d76eb4740b4-8
|
) -> None:
"""Initialize with file path."""
try:
import pdfplumber # noqa:F401
except ImportError:
raise ImportError(
"pdfplumber package not found, please install it with "
"`pip install pdfplumber`"
)
super().__init__(file_path)
self.text_kwargs = text_kwargs or {}
[docs] def load(self) -> List[Document]:
"""Load file."""
parser = PDFPlumberParser(text_kwargs=self.text_kwargs)
blob = Blob.from_path(self.file_path)
return parser.parse(blob)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
|
b94cbae8a157-0
|
Source code for langchain.document_loaders.markdown
"""Loader that loads Markdown files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load markdown files."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.partition.md import partition_md
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")])
if unstructured_version < (0, 4, 16):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning markdown files is only supported in unstructured>=0.4.16."
)
return partition_md(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/markdown.html
|
bbd45aaca7a3-0
|
Source code for langchain.document_loaders.dataframe
"""Load from Dataframe object"""
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DataFrameLoader(BaseLoader):
"""Load Pandas DataFrames."""
def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object."""
import pandas as pd
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}"
)
self.data_frame = data_frame
self.page_content_column = page_content_column
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from dataframe."""
for _, row in self.data_frame.iterrows():
text = row[self.page_content_column]
metadata = row.to_dict()
metadata.pop(self.page_content_column)
yield Document(page_content=text, metadata=metadata)
[docs] def load(self) -> List[Document]:
"""Load full dataframe."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html
|
5a6bb948480b-0
|
Source code for langchain.document_loaders.generic
from __future__ import annotations
from pathlib import Path
from typing import Iterator, List, Literal, Optional, Sequence, Union
from langchain.document_loaders.base import BaseBlobParser, BaseLoader
from langchain.document_loaders.blob_loaders import BlobLoader, FileSystemBlobLoader
from langchain.document_loaders.parsers.registry import get_parser
from langchain.schema import Document
from langchain.text_splitter import TextSplitter
_PathLike = Union[str, Path]
DEFAULT = Literal["default"]
[docs]class GenericLoader(BaseLoader):
"""A generic document loader.
A generic document loader that allows combining an arbitrary blob loader with
a blob parser.
Examples:
.. code-block:: python
from langchain.document_loaders import GenericLoader
from langchain.document_loaders.blob_loaders import FileSystemBlobLoader
loader = GenericLoader.from_filesystem(
path="path/to/directory",
glob="**/[!.]*",
suffixes=[".pdf"],
show_progress=True,
)
docs = loader.lazy_load()
next(docs)
Example instantiations to change which files are loaded:
... code-block:: python
# Recursively load all text files in a directory.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/*.txt")
# Recursively load all non-hidden files in a directory.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="**/[!.]*")
# Load all files in a directory without recursion.
loader = GenericLoader.from_filesystem("/path/to/dir", glob="*")
Example instantiations to change which parser is used:
... code-block:: python
from langchain.document_loaders.parsers.pdf import PyPDFParser
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
5a6bb948480b-1
|
from langchain.document_loaders.parsers.pdf import PyPDFParser
# Recursively load all text files in a directory.
loader = GenericLoader.from_filesystem(
"/path/to/dir",
glob="**/*.pdf",
parser=PyPDFParser()
)
"""
def __init__(
self,
blob_loader: BlobLoader,
blob_parser: BaseBlobParser,
) -> None:
"""A generic document loader.
Args:
blob_loader: A blob loader which knows how to yield blobs
blob_parser: A blob parser which knows how to parse blobs into documents
"""
self.blob_loader = blob_loader
self.blob_parser = blob_parser
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily. Use this when working at a large scale."""
for blob in self.blob_loader.yield_blobs():
yield from self.blob_parser.lazy_parse(blob)
[docs] def load(self) -> List[Document]:
"""Load all documents."""
return list(self.lazy_load())
[docs] def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> List[Document]:
"""Load all documents and split them into sentences."""
raise NotImplementedError(
"Loading and splitting is not yet implemented for generic loaders. "
"When they will be implemented they will be added via the initializer. "
"This method should not be used going forward."
)
[docs] @classmethod
def from_filesystem(
cls,
path: _PathLike,
*,
glob: str = "**/[!.]*",
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
5a6bb948480b-2
|
*,
glob: str = "**/[!.]*",
suffixes: Optional[Sequence[str]] = None,
show_progress: bool = False,
parser: Union[DEFAULT, BaseBlobParser] = "default",
) -> GenericLoader:
"""Create a generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
Returns:
A generic document loader.
"""
blob_loader = FileSystemBlobLoader(
path, glob=glob, suffixes=suffixes, show_progress=show_progress
)
if isinstance(parser, str):
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/generic.html
|
7655bc962825-0
|
Source code for langchain.document_loaders.bigquery
from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
from google.auth.credentials import Credentials
[docs]class BigQueryLoader(BaseLoader):
"""Loads a query result from BigQuery into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
project: Optional[str] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
credentials: Optional[Credentials] = None,
):
"""Initialize BigQuery document loader.
Args:
query: The query to run in BigQuery.
project: Optional. The project to run the query in.
page_content_columns: Optional. The columns to write into the `page_content`
of the document.
metadata_columns: Optional. The columns to write into the `metadata` of the
document.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
(`google.auth.compute_engine.Credentials`) or Service Account
(`google.oauth2.service_account.Credentials`) credentials directly.
"""
self.query = query
self.project = project
self.page_content_columns = page_content_columns
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
|
7655bc962825-1
|
self.project = project
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
self.credentials = credentials
[docs] def load(self) -> List[Document]:
try:
from google.cloud import bigquery
except ImportError as ex:
raise ValueError(
"Could not import google-cloud-bigquery python package. "
"Please install it with `pip install google-cloud-bigquery`."
) from ex
bq_client = bigquery.Client(credentials=self.credentials, project=self.project)
query_result = bq_client.query(self.query).result()
docs: List[Document] = []
page_content_columns = self.page_content_columns
metadata_columns = self.metadata_columns
if page_content_columns is None:
page_content_columns = [column.name for column in query_result.schema]
if metadata_columns is None:
metadata_columns = []
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
|
8964f0de1c8f-0
|
Source code for langchain.document_loaders.mastodon
"""Mastodon document loader."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ValueError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
[docs]class MastodonTootsLoader(BaseLoader):
"""Mastodon toots loader."""
def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account.
exclude_replies: Whether to exclude reply toots from the load.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
8964f0de1c8f-1
|
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
[docs] def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mastodon.html
|
53880898c7c5-0
|
Source code for langchain.document_loaders.iugu
"""Loader that fetches data from IUGU"""
import json
import urllib.request
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env, stringify_dict
IUGU_ENDPOINTS = {
"invoices": "https://api.iugu.com/v1/invoices",
"customers": "https://api.iugu.com/v1/customers",
"charges": "https://api.iugu.com/v1/charges",
"subscriptions": "https://api.iugu.com/v1/subscriptions",
"plans": "https://api.iugu.com/v1/plans",
}
[docs]class IuguLoader(BaseLoader):
"""Loader that fetches data from IUGU."""
def __init__(self, resource: str, api_token: Optional[str] = None) -> None:
self.resource = resource
api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN")
self.headers = {"Authorization": f"Bearer {api_token}"}
def _make_request(self, url: str) -> List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {"source": url}
return [Document(page_content=text, metadata=metadata)]
def _get_resource(self) -> List[Document]:
endpoint = IUGU_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
[docs] def load(self) -> List[Document]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
|
53880898c7c5-1
|
[docs] def load(self) -> List[Document]:
return self._get_resource()
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
|
c003b5158f19-0
|
Source code for langchain.document_loaders.snowflake_loader
from __future__ import annotations
from typing import Any, Dict, Iterator, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class SnowflakeLoader(BaseLoader):
"""Loads a query result from Snowflake into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
user: str,
password: str,
account: str,
warehouse: str,
role: str,
database: str,
schema: str,
parameters: Optional[Dict[str, Any]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
"""Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.user = user
self.password = password
self.account = account
self.warehouse = warehouse
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
|
c003b5158f19-1
|
self.password = password
self.account = account
self.warehouse = warehouse
self.role = role
self.database = database
self.schema = schema
self.parameters = parameters
self.page_content_columns = (
page_content_columns if page_content_columns is not None else ["*"]
)
self.metadata_columns = metadata_columns if metadata_columns is not None else []
def _execute_query(self) -> List[Dict[str, Any]]:
try:
import snowflake.connector
except ImportError as ex:
raise ValueError(
"Could not import snowflake-connector-python package. "
"Please install it with `pip install snowflake-connector-python`."
) from ex
conn = snowflake.connector.connect(
user=self.user,
password=self.password,
account=self.account,
warehouse=self.warehouse,
role=self.role,
database=self.database,
schema=self.schema,
parameters=self.parameters,
)
try:
cur = conn.cursor()
cur.execute("USE DATABASE " + self.database)
cur.execute("USE SCHEMA " + self.schema)
cur.execute(self.query, self.parameters)
query_result = cur.fetchall()
column_names = [column[0] for column in cur.description]
query_result = [dict(zip(column_names, row)) for row in query_result]
except Exception as e:
print(f"An error occurred: {e}")
query_result = []
finally:
cur.close()
return query_result
def _get_columns(
self, query_result: List[Dict[str, Any]]
) -> Tuple[List[str], List[str]]:
page_content_columns = (
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
|
c003b5158f19-2
|
) -> Tuple[List[str], List[str]]:
page_content_columns = (
self.page_content_columns if self.page_content_columns else []
)
metadata_columns = self.metadata_columns if self.metadata_columns else []
if page_content_columns is None and query_result:
page_content_columns = list(query_result[0].keys())
if metadata_columns is None:
metadata_columns = []
return page_content_columns or [], metadata_columns
[docs] def lazy_load(self) -> Iterator[Document]:
query_result = self._execute_query()
if isinstance(query_result, Exception):
print(f"An error occurred during the query: {query_result}")
return []
page_content_columns, metadata_columns = self._get_columns(query_result)
if "*" in page_content_columns:
page_content_columns = list(query_result[0].keys())
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
yield doc
[docs] def load(self) -> List[Document]:
"""Load data into document objects."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
|
8a7fd2ccf1f9-0
|
Source code for langchain.document_loaders.s3_directory
"""Loading logic for loading documents from an s3 directory."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.s3_file import S3FileLoader
[docs]class S3DirectoryLoader(BaseLoader):
"""Loading logic for loading documents from s3."""
def __init__(self, bucket: str, prefix: str = ""):
"""Initialize with bucket and key name."""
self.bucket = bucket
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
loader = S3FileLoader(self.bucket, obj.key)
docs.extend(loader.load())
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_directory.html
|
7e338c5724ab-0
|
Source code for langchain.document_loaders.unstructured
"""Loader that uses unstructured to load files."""
import collections
from abc import ABC, abstractmethod
from typing import IO, Any, Dict, List, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]def satisfies_min_unstructured_version(min_version: str) -> bool:
"""Checks to see if the installed unstructured version exceeds the minimum version
for the feature in question."""
from unstructured.__version__ import __version__ as __unstructured_version__
min_version_tuple = tuple([int(x) for x in min_version.split(".")])
# NOTE(MthwRobinson) - enables the loader to work when you're using pre-release
# versions of unstructured like 0.4.17-dev1
_unstructured_version = __unstructured_version__.split("-")[0]
unstructured_version_tuple = tuple(
[int(x) for x in _unstructured_version.split(".")]
)
return unstructured_version_tuple >= min_version_tuple
[docs]def validate_unstructured_version(min_unstructured_version: str) -> None:
"""Raises an error if the unstructured version does not exceed the
specified minimum."""
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
f"unstructured>={min_unstructured_version} is required in this loader."
)
[docs]class UnstructuredBaseLoader(BaseLoader, ABC):
"""Loader that uses unstructured to load files."""
def __init__(self, mode: str = "single", **unstructured_kwargs: Any):
"""Initialize with file path."""
try:
import unstructured # noqa:F401
except ImportError:
raise ValueError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
7e338c5724ab-1
|
import unstructured # noqa:F401
except ImportError:
raise ValueError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
_valid_modes = {"single", "elements", "paged"}
if mode not in _valid_modes:
raise ValueError(
f"Got {mode} for `mode`, but should be one of `{_valid_modes}`"
)
self.mode = mode
if not satisfies_min_unstructured_version("0.5.4"):
if "strategy" in unstructured_kwargs:
unstructured_kwargs.pop("strategy")
self.unstructured_kwargs = unstructured_kwargs
@abstractmethod
def _get_elements(self) -> List:
"""Get elements."""
@abstractmethod
def _get_metadata(self) -> dict:
"""Get metadata."""
[docs] def load(self) -> List[Document]:
"""Load file."""
elements = self._get_elements()
if self.mode == "elements":
docs: List[Document] = list()
for element in elements:
metadata = self._get_metadata()
# NOTE(MthwRobinson) - the attribute check is for backward compatibility
# with unstructured<0.4.9. The metadata attributed was added in 0.4.9.
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
if hasattr(element, "category"):
metadata["category"] = element.category
docs.append(Document(page_content=str(element), metadata=metadata))
elif self.mode == "paged":
text_dict: Dict[int, str] = {}
meta_dict: Dict[int, Dict] = {}
for idx, element in enumerate(elements):
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
7e338c5724ab-2
|
for idx, element in enumerate(elements):
metadata = self._get_metadata()
if hasattr(element, "metadata"):
metadata.update(element.metadata.to_dict())
page_number = metadata.get("page_number", 1)
# Check if this page_number already exists in docs_dict
if page_number not in text_dict:
# If not, create new entry with initial text and metadata
text_dict[page_number] = str(element) + "\n\n"
meta_dict[page_number] = metadata
else:
# If exists, append to text and update the metadata
text_dict[page_number] += str(element) + "\n\n"
meta_dict[page_number].update(metadata)
# Convert the dict to a list of Document objects
docs = [
Document(page_content=text_dict[key], metadata=meta_dict[key])
for key in text_dict.keys()
]
elif self.mode == "single":
metadata = self._get_metadata()
text = "\n\n".join([str(el) for el in elements])
docs = [Document(page_content=text, metadata=metadata)]
else:
raise ValueError(f"mode of {self.mode} not supported.")
return docs
[docs]class UnstructuredFileLoader(UnstructuredBaseLoader):
"""Loader that uses unstructured to load files."""
def __init__(
self,
file_path: Union[str, List[str]],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file_path = file_path
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
7e338c5724ab-3
|
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
return partition(filename=self.file_path, **self.unstructured_kwargs)
def _get_metadata(self) -> dict:
return {"source": self.file_path}
[docs]def get_elements_from_api(
file_path: Union[str, List[str], None] = None,
file: Union[IO, Sequence[IO], None] = None,
api_url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
) -> List:
"""Retrieves a list of elements from the Unstructured API."""
if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list):
from unstructured.partition.api import partition_multiple_via_api
_doc_elements = partition_multiple_via_api(
filenames=file_path,
files=file,
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
elements = []
for _elements in _doc_elements:
elements.extend(_elements)
return elements
else:
from unstructured.partition.api import partition_via_api
return partition_via_api(
filename=file_path,
file=file,
api_key=api_key,
api_url=api_url,
**unstructured_kwargs,
)
[docs]class UnstructuredAPIFileLoader(UnstructuredFileLoader):
"""Loader that uses the unstructured web API to load files."""
def __init__(
self,
file_path: Union[str, List[str]] = "",
mode: str = "single",
url: str = "https://api.unstructured.io/general/v0/general",
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
7e338c5724ab-4
|
url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
if isinstance(file_path, str):
validate_unstructured_version(min_unstructured_version="0.6.2")
else:
validate_unstructured_version(min_unstructured_version="0.6.3")
self.url = url
self.api_key = api_key
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_metadata(self) -> dict:
return {"source": self.file_path}
def _get_elements(self) -> List:
return get_elements_from_api(
file_path=self.file_path,
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
[docs]class UnstructuredFileIOLoader(UnstructuredBaseLoader):
"""Loader that uses unstructured to load file IO objects."""
def __init__(
self,
file: Union[IO, Sequence[IO]],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
self.file = file
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.auto import partition
return partition(file=self.file, **self.unstructured_kwargs)
def _get_metadata(self) -> dict:
return {}
[docs]class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader):
"""Loader that uses the unstructured web API to load file IO objects."""
def __init__(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
7e338c5724ab-5
|
def __init__(
self,
file: Union[IO, Sequence[IO]],
mode: str = "single",
url: str = "https://api.unstructured.io/general/v0/general",
api_key: str = "",
**unstructured_kwargs: Any,
):
"""Initialize with file path."""
if isinstance(file, collections.abc.Sequence):
validate_unstructured_version(min_unstructured_version="0.6.3")
if file:
validate_unstructured_version(min_unstructured_version="0.6.2")
self.url = url
self.api_key = api_key
super().__init__(file=file, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
return get_elements_from_api(
file=self.file,
api_key=self.api_key,
api_url=self.url,
**self.unstructured_kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
|
c4e771a67bb1-0
|
Source code for langchain.document_loaders.merge
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class MergedDataLoader(BaseLoader):
"""Merge documents from a list of loaders"""
def __init__(self, loaders: List):
"""Initialize with a list of loaders"""
self.loaders = loaders
[docs] def lazy_load(self) -> Iterator[Document]:
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
# Check if lazy_load is implemented
try:
data = loader.lazy_load()
except NotImplementedError:
data = loader.load()
for document in data:
yield document
[docs] def load(self) -> List[Document]:
"""Load docs."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/merge.html
|
f1f78b2b45e8-0
|
Source code for langchain.document_loaders.apify_dataset
"""Logic for loading documents from Apify datasets."""
from typing import Any, Callable, Dict, List
from pydantic import BaseModel, root_validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ApifyDatasetLoader(BaseLoader, BaseModel):
"""Logic for loading documents from Apify datasets."""
apify_client: Any
dataset_id: str
"""The ID of the dataset on the Apify platform."""
dataset_mapping_function: Callable[[Dict], Document]
"""A custom function that takes a single dictionary (an Apify dataset item)
and converts it to an instance of the Document class."""
def __init__(
self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document]
):
"""Initialize the loader with an Apify dataset ID and a mapping function.
Args:
dataset_id (str): The ID of the dataset on the Apify platform.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an instance
of the Document class.
"""
super().__init__(
dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function
)
[docs] @root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment."""
try:
from apify_client import ApifyClient
values["apify_client"] = ApifyClient()
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
|
f1f78b2b45e8-1
|
)
return values
[docs] def load(self) -> List[Document]:
"""Load documents."""
dataset_items = (
self.apify_client.dataset(self.dataset_id).list_items(clean=True).items
)
return list(map(self.dataset_mapping_function, dataset_items))
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
|
40fb9615847c-0
|
Source code for langchain.document_loaders.hugging_face_dataset
"""Loader that loads HuggingFace datasets."""
from typing import Iterator, List, Mapping, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class HuggingFaceDatasetLoader(BaseLoader):
"""Loading logic for loading documents from the Hugging Face Hub."""
def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name.
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
use_auth_token: Bearer token for remote files on the Datasets Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hugging_face_dataset.html
|
40fb9615847c-1
|
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
[docs] def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=row.pop(self.page_content_column),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
[docs] def load(self) -> List[Document]:
"""Load documents."""
return list(self.lazy_load())
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hugging_face_dataset.html
|
90a1e6df9f1e-0
|
Source code for langchain.document_loaders.epub
"""Loader that loads EPub files."""
from typing import List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
satisfies_min_unstructured_version,
)
[docs]class UnstructuredEPubLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load epub files."""
def _get_elements(self) -> List:
min_unstructured_version = "0.5.4"
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
"Partitioning epub files is only supported in "
f"unstructured>={min_unstructured_version}."
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/epub.html
|
e408e4b72331-0
|
Source code for langchain.document_loaders.discord
"""Load from Discord chat dump"""
from __future__ import annotations
from typing import TYPE_CHECKING, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import pandas as pd
[docs]class DiscordChatLoader(BaseLoader):
"""Load Discord chat logs."""
def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"):
"""Initialize with a Pandas DataFrame containing chat logs."""
if not isinstance(chat_log, pd.DataFrame):
raise ValueError(
f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}"
)
self.chat_log = chat_log
self.user_id_col = user_id_col
[docs] def load(self) -> List[Document]:
"""Load all chat messages."""
result = []
for _, row in self.chat_log.iterrows():
user_id = row[self.user_id_col]
metadata = row.to_dict()
metadata.pop(self.user_id_col)
result.append(Document(page_content=user_id, metadata=metadata))
return result
|
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/discord.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.