id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
316adfeb76b6-2
""" self.file_path = path self.include_outputs = include_outputs self.max_output_length = max_output_length self.remove_newline = remove_newline self.traceback = traceback [docs] def load( self, ) -> List[Document]: """Load documents.""" try: import pandas as pd except ImportError: raise ImportError( "pandas is needed for Notebook Loader, " "please install with `pip install pandas`" ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) data = pd.json_normalize(d["cells"]) filtered_data = data[["cell_type", "source", "outputs"]] if self.remove_newline: filtered_data = filtered_data.applymap(remove_newlines) text = filtered_data.apply( lambda x: concatenate_cells( x, self.include_outputs, self.max_output_length, self.traceback ), axis=1, ).str.cat(sep=" ") metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notebook.html
09412c415dca-0
Source code for langchain.document_loaders.imsdb """Loads IMSDb.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class IMSDbLoader(WebBaseLoader): """Loads IMSDb webpages.""" [docs] def load(self) -> List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {"source": self.web_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/imsdb.html
8b896c2eef90-0
Source code for langchain.document_loaders.markdown """Loads Markdown files.""" from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredMarkdownLoader(UnstructuredFileLoader): """Loader that uses Unstructured to load markdown files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredMarkdownLoader loader = UnstructuredMarkdownLoader( "example.md", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-md """ def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.partition.md import partition_md # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) if unstructured_version < (0, 4, 16): raise ValueError( f"You are on unstructured version {__unstructured_version__}. "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/markdown.html
8b896c2eef90-1
f"You are on unstructured version {__unstructured_version__}. " "Partitioning markdown files is only supported in unstructured>=0.4.16." ) return partition_md(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/markdown.html
5dd1106a11bf-0
Source code for langchain.document_loaders.s3_directory """Loading logic for loading documents from an AWS S3 directory.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.s3_file import S3FileLoader [docs]class S3DirectoryLoader(BaseLoader): """Loading logic for loading documents from an AWS S3.""" [docs] def __init__(self, bucket: str, prefix: str = ""): """Initialize with bucket and key name. Args: bucket: The name of the S3 bucket. prefix: The prefix of the S3 key. Defaults to "". """ self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) s3 = boto3.resource("s3") bucket = s3.Bucket(self.bucket) docs = [] for obj in bucket.objects.filter(Prefix=self.prefix): loader = S3FileLoader(self.bucket, obj.key) docs.extend(loader.load()) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_directory.html
8b36e58b012a-0
Source code for langchain.document_loaders.merge from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MergedDataLoader(BaseLoader): """Merge documents from a list of loaders""" [docs] def __init__(self, loaders: List): """Initialize with a list of loaders""" self.loaders = loaders [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load docs from each individual loader.""" for loader in self.loaders: # Check if lazy_load is implemented try: data = loader.lazy_load() except NotImplementedError: data = loader.load() for document in data: yield document [docs] def load(self) -> List[Document]: """Load docs.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/merge.html
2a352d58466b-0
Source code for langchain.document_loaders.json_loader """Loads data from JSON.""" import json from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class JSONLoader(BaseLoader): """Loads a JSON file using a jq schema. Example: [{"text": ...}, {"text": ...}, {"text": ...}] -> schema = .[].text {"key": [{"text": ...}, {"text": ...}, {"text": ...}]} -> schema = .key[].text ["", "", ""] -> schema = .[] """ [docs] def __init__( self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str] = None, metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None, text_content: bool = True, json_lines: bool = False, ): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/json_loader.html
2a352d58466b-1
string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ try: import jq # noqa:F401 except ImportError: raise ImportError( "jq package not found, please install it with `pip install jq`" ) self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines [docs] def load(self) -> List[Document]: """Load and return documents from the JSON file.""" docs: List[Document] = [] if self._json_lines: with self.file_path.open(encoding="utf-8") as f: for line in f: line = line.strip() if line: self._parse(line, docs) else: self._parse(self.file_path.read_text(), docs) return docs def _parse(self, content: str, docs: List[Document]) -> None: """Convert given content to documents.""" data = self._jq_schema.input(json.loads(content)) # Perform some validation # This is not a perfect validation, but it should catch most cases # and prevent the user from getting a cryptic error later on. if self._content_key is not None: self._validate_content_key(data) for i, sample in enumerate(data, len(docs) + 1): metadata = dict( source=str(self.file_path), seq_num=i, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/json_loader.html
2a352d58466b-2
source=str(self.file_path), seq_num=i, ) text = self._get_text(sample=sample, metadata=metadata) docs.append(Document(page_content=text, metadata=metadata)) def _get_text(self, sample: Any, metadata: dict) -> str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) if self._metadata_func is not None: # We pass in the metadata dict to the metadata_func # so that the user can customize the default metadata # based on the content of the JSON object. metadata = self._metadata_func(sample, metadata) else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f"Expected page_content is string, got {type(content)} instead. \ Set `text_content=False` if the desired input for \ `page_content` is not a string" ) # In case the text is None, set it to an empty string elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else "" else: return str(content) if content is not None else "" def _validate_content_key(self, data: Any) -> None: """Check if a content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f"Expected the jq schema to result in a list of objects (dict), \ so sample must be a dict but got `{type(sample)}`" ) if sample.get(self._content_key) is None: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/json_loader.html
2a352d58466b-3
if sample.get(self._content_key) is None: raise ValueError( f"Expected the jq schema to result in a list of objects (dict) \ with the key `{self._content_key}`" ) if self._metadata_func is not None: sample_metadata = self._metadata_func(sample, {}) if not isinstance(sample_metadata, dict): raise ValueError( f"Expected the metadata_func to return a dict but got \ `{type(sample_metadata)}`" )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/json_loader.html
c919b36ab884-0
Source code for langchain.document_loaders.git import os from typing import Callable, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GitLoader(BaseLoader): """Loads files from a Git repository into a list of documents. The Repository can be local on disk available at `repo_path`, or remote at `clone_url` that will be cloned to `repo_path`. Currently, supports only text files. Each document represents one file in the repository. The `path` points to the local Git repository, and the `branch` specifies the branch to load files from. By default, it loads from the `main` branch. """ [docs] def __init__( self, repo_path: str, clone_url: Optional[str] = None, branch: Optional[str] = "main", file_filter: Optional[Callable[[str], bool]] = None, ): """ Args: repo_path: The path to the Git repository. clone_url: Optional. The URL to clone the repository from. branch: Optional. The branch to load files from. Defaults to `main`. file_filter: Optional. A function that takes a file path and returns a boolean indicating whether to load the file. Defaults to None. """ self.repo_path = repo_path self.clone_url = clone_url self.branch = branch self.file_filter = file_filter [docs] def load(self) -> List[Document]: try: from git import Blob, Repo # type: ignore except ImportError as ex: raise ImportError( "Could not import git python package. "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
c919b36ab884-1
raise ImportError( "Could not import git python package. " "Please install it with `pip install GitPython`." ) from ex if not os.path.exists(self.repo_path) and self.clone_url is None: raise ValueError(f"Path {self.repo_path} does not exist") elif self.clone_url: # If the repo_path already contains a git repository, verify that it's the # same repository as the one we're trying to clone. if os.path.isdir(os.path.join(self.repo_path, ".git")): repo = Repo(self.repo_path) # If the existing repository is not the same as the one we're trying to # clone, raise an error. if repo.remotes.origin.url != self.clone_url: raise ValueError( "A different repository is already cloned at this path." ) else: repo = Repo.clone_from(self.clone_url, self.repo_path) repo.git.checkout(self.branch) else: repo = Repo(self.repo_path) repo.git.checkout(self.branch) docs: List[Document] = [] for item in repo.tree().traverse(): if not isinstance(item, Blob): continue file_path = os.path.join(self.repo_path, item.path) ignored_files = repo.ignored([file_path]) # type: ignore if len(ignored_files): continue # uses filter to skip files if self.file_filter and not self.file_filter(file_path): continue rel_file_path = os.path.relpath(file_path, self.repo_path) try: with open(file_path, "rb") as f: content = f.read() file_type = os.path.splitext(item.name)[1]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
c919b36ab884-2
content = f.read() file_type = os.path.splitext(item.name)[1] # loads only text files try: text_content = content.decode("utf-8") except UnicodeDecodeError: continue metadata = { "source": rel_file_path, "file_path": rel_file_path, "file_name": item.name, "file_type": file_type, } doc = Document(page_content=text_content, metadata=metadata) docs.append(doc) except Exception as e: print(f"Error reading file {file_path}: {e}") return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html
ead45e2f60c2-0
Source code for langchain.document_loaders.wikipedia from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.wikipedia import WikipediaAPIWrapper [docs]class WikipediaLoader(BaseLoader): """Loads a query result from www.wikipedia.org into a list of Documents. The hard limit on the number of downloaded Documents is 300 for now. Each wiki page represents one Document. """ [docs] def __init__( self, query: str, lang: str = "en", load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, doc_content_chars_max: Optional[int] = 4000, ): """ Initializes a new instance of the WikipediaLoader class. Args: query (str): The query string to search on Wikipedia. lang (str, optional): The language code for the Wikipedia language edition. Defaults to "en". load_max_docs (int, optional): The maximum number of documents to load. Defaults to 100. load_all_available_meta (bool, optional): Indicates whether to load all available metadata for each document. Defaults to False. doc_content_chars_max (int, optional): The maximum number of characters for the document content. Defaults to 4000. """ self.query = query self.lang = lang self.load_max_docs = load_max_docs self.load_all_available_meta = load_all_available_meta self.doc_content_chars_max = doc_content_chars_max [docs] def load(self) -> List[Document]: """ Loads the query result from Wikipedia into a list of Documents. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/wikipedia.html
ead45e2f60c2-1
Loads the query result from Wikipedia into a list of Documents. Returns: List[Document]: A list of Document objects representing the loaded Wikipedia pages. """ client = WikipediaAPIWrapper( lang=self.lang, top_k_results=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, doc_content_chars_max=self.doc_content_chars_max, ) docs = client.load(self.query) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/wikipedia.html
110196d329fa-0
Source code for langchain.document_loaders.nuclia """Extract text from any file type.""" import json import uuid from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.tools.nuclia.tool import NucliaUnderstandingAPI [docs]class NucliaLoader(BaseLoader): """Extract text from any file type.""" [docs] def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI): self.nua = nuclia_tool self.id = str(uuid.uuid4()) self.nua.run({"action": "push", "id": self.id, "path": path, "text": None}) [docs] def load(self) -> List[Document]: """Load documents.""" data = self.nua.run( {"action": "pull", "id": self.id, "path": None, "text": None} ) if not data: return [] obj = json.loads(data) text = obj["extracted_text"][0]["body"]["text"] print(text) metadata = { "file": obj["file_extracted_data"][0], "metadata": obj["field_metadata"][0], } return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/nuclia.html
b0b2ae45b139-0
Source code for langchain.document_loaders.airbyte_json """Loads local airbyte json files.""" import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import stringify_dict [docs]class AirbyteJSONLoader(BaseLoader): """Loads local airbyte json files.""" [docs] def __init__(self, file_path: str): """Initialize with a file path. This should start with '/tmp/airbyte_local/'.""" self.file_path = file_path """Path to the directory containing the json files.""" [docs] def load(self) -> List[Document]: text = "" for line in open(self.file_path, "r"): data = json.loads(line)["_airbyte_data"] text += stringify_dict(data) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airbyte_json.html
bd8d180c023e-0
Source code for langchain.document_loaders.obs_directory # coding:utf-8 from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.obs_file import OBSFileLoader [docs]class OBSDirectoryLoader(BaseLoader): """Loading logic for loading documents from Huawei OBS.""" [docs] def __init__( self, bucket: str, endpoint: str, config: Optional[dict] = None, prefix: str = "", ): """Initialize the OBSDirectoryLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. endpoint (str): The endpoint URL of your OBS bucket. config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. prefix (str, optional): The prefix to be added to the OBS key. Defaults to "". Note:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_directory.html
bd8d180c023e-1
Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSDirectoryLoader: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } ``` directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix") """ # noqa: E501 try: from obs import ObsClient except ImportError: raise ImportError( "Could not import esdk-obs-python python package. " "Please install it with `pip install esdk-obs-python`." ) if not config: config = dict() if config.get("get_token_from_ecs"): self.client = ObsClient(server=endpoint, security_provider_policy="ECS") else: self.client = ObsClient( access_key_id=config.get("ak"), secret_access_key=config.get("sk"), security_token=config.get("token"), server=endpoint, ) self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: """Load documents.""" max_num = 1000 mark = None docs = [] while True: resp = self.client.listObjects( self.bucket, prefix=self.prefix, marker=mark, max_keys=max_num ) if resp.status < 300:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_directory.html
bd8d180c023e-2
) if resp.status < 300: for content in resp.body.contents: loader = OBSFileLoader(self.bucket, content.key, client=self.client) docs.extend(loader.load()) if resp.body.is_truncated is True: mark = resp.body.next_marker else: break return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_directory.html
22a3fa1d10a4-0
Source code for langchain.document_loaders.snowflake_loader from __future__ import annotations from typing import Any, Dict, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SnowflakeLoader(BaseLoader): """Loads a query result from Snowflake into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ [docs] def __init__( self, query: str, user: str, password: str, account: str, warehouse: str, role: str, database: str, schema: str, parameters: Optional[Dict[str, Any]] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, ): """Initialize Snowflake document loader. Args: query: The query to run in Snowflake. user: Snowflake user. password: Snowflake password. account: Snowflake account. warehouse: Snowflake warehouse. role: Snowflake role. database: Snowflake database schema: Snowflake schema parameters: Optional. Parameters to pass to the query. page_content_columns: Optional. Columns written to Document `page_content`. metadata_columns: Optional. Columns written to Document `metadata`. """ self.query = query self.user = user
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
22a3fa1d10a4-1
""" self.query = query self.user = user self.password = password self.account = account self.warehouse = warehouse self.role = role self.database = database self.schema = schema self.parameters = parameters self.page_content_columns = ( page_content_columns if page_content_columns is not None else ["*"] ) self.metadata_columns = metadata_columns if metadata_columns is not None else [] def _execute_query(self) -> List[Dict[str, Any]]: try: import snowflake.connector except ImportError as ex: raise ImportError( "Could not import snowflake-connector-python package. " "Please install it with `pip install snowflake-connector-python`." ) from ex conn = snowflake.connector.connect( user=self.user, password=self.password, account=self.account, warehouse=self.warehouse, role=self.role, database=self.database, schema=self.schema, parameters=self.parameters, ) try: cur = conn.cursor() cur.execute("USE DATABASE " + self.database) cur.execute("USE SCHEMA " + self.schema) cur.execute(self.query, self.parameters) query_result = cur.fetchall() column_names = [column[0] for column in cur.description] query_result = [dict(zip(column_names, row)) for row in query_result] except Exception as e: print(f"An error occurred: {e}") query_result = [] finally: cur.close() return query_result def _get_columns( self, query_result: List[Dict[str, Any]] ) -> Tuple[List[str], List[str]]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
22a3fa1d10a4-2
) -> Tuple[List[str], List[str]]: page_content_columns = ( self.page_content_columns if self.page_content_columns else [] ) metadata_columns = self.metadata_columns if self.metadata_columns else [] if page_content_columns is None and query_result: page_content_columns = list(query_result[0].keys()) if metadata_columns is None: metadata_columns = [] return page_content_columns or [], metadata_columns [docs] def lazy_load(self) -> Iterator[Document]: query_result = self._execute_query() if isinstance(query_result, Exception): print(f"An error occurred during the query: {query_result}") return [] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc [docs] def load(self) -> List[Document]: """Load data into document objects.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/snowflake_loader.html
67dfc10387bf-0
Source code for langchain.document_loaders.etherscan import os import re from typing import Iterator, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class EtherscanLoader(BaseLoader): """ Load transactions from an account on Ethereum mainnet. The Loader use Etherscan API to interact with Ethereum mainnet. ETHERSCAN_API_KEY environment variable must be set use this loader. """ [docs] def __init__( self, account_address: str, api_key: str = "docs-demo", filter: str = "normal_transaction", page: int = 1, offset: int = 10, start_block: int = 0, end_block: int = 99999999, sort: str = "desc", ): self.account_address = account_address self.api_key = os.environ.get("ETHERSCAN_API_KEY") or api_key self.filter = filter self.page = page self.offset = offset self.start_block = start_block self.end_block = end_block self.sort = sort if not self.api_key: raise ValueError("Etherscan API key not provided") if not re.match(r"^0x[a-fA-F0-9]{40}$", self.account_address): raise ValueError(f"Invalid contract address {self.account_address}") if filter not in [ "normal_transaction", "internal_transaction", "erc20_transaction", "eth_balance", "erc721_transaction", "erc1155_transaction", ]: raise ValueError(f"Invalid filter {filter}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/etherscan.html
67dfc10387bf-1
]: raise ValueError(f"Invalid filter {filter}") [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from table.""" result = [] if self.filter == "normal_transaction": result = self.getNormTx() elif self.filter == "internal_transaction": result = self.getInternalTx() elif self.filter == "erc20_transaction": result = self.getERC20Tx() elif self.filter == "eth_balance": result = self.getEthBalance() elif self.filter == "erc721_transaction": result = self.getERC721Tx() elif self.filter == "erc1155_transaction": result = self.getERC1155Tx() else: raise ValueError(f"Invalid filter {filter}") for doc in result: yield doc [docs] def load(self) -> List[Document]: """Load transactions from spcifc account by Etherscan.""" return list(self.lazy_load()) [docs] def getNormTx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=txlist&address={self.account_address}" f"&startblock={self.start_block}&endblock={self.end_block}&page={self.page}" f"&offset={self.offset}&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/etherscan.html
67dfc10387bf-2
if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) print(len(result)) return result [docs] def getEthBalance(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=balance" f"&address={self.account_address}&tag=latest&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) return [Document(page_content=response.json()["result"])] [docs] def getInternalTx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=txlistinternal" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/etherscan.html
67dfc10387bf-3
for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result [docs] def getERC20Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=tokentx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result [docs] def getERC721Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=tokennfttx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/etherscan.html
67dfc10387bf-4
) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result [docs] def getERC1155Tx(self) -> List[Document]: url = ( f"https://api.etherscan.io/api?module=account&action=token1155tx" f"&address={self.account_address}&startblock={self.start_block}" f"&endblock={self.end_block}&page={self.page}&offset={self.offset}" f"&sort={self.sort}&apikey={self.api_key}" ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print("Error occurred while making the request:", e) items = response.json()["result"] result = [] if len(items) == 0: return [Document(page_content="")] for item in items: content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) return result
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/etherscan.html
56c615275c79-0
Source code for langchain.document_loaders.open_city_data from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class OpenCityDataLoader(BaseLoader): """Loads Open City data.""" [docs] def __init__(self, city_id: str, dataset_id: str, limit: int): """Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. limit: The maximum number of documents to load. """ self.city_id = city_id self.dataset_id = dataset_id self.limit = limit [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records.""" from sodapy import Socrata client = Socrata(self.city_id, None) results = client.get(self.dataset_id, limit=self.limit) for record in results: yield Document( page_content=str(record), metadata={ "source": self.city_id + "_" + self.dataset_id, }, ) [docs] def load(self) -> List[Document]: """Load records.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/open_city_data.html
20470d85f5a2-0
Source code for langchain.document_loaders.dropbox """Loads data from Dropbox.""" # Prerequisites: # 1. Create a Dropbox app. # 2. Give the app these scope permissions: `files.metadata.read` # and `files.content.read`. # 3. Generate access token: https://www.dropbox.com/developers/apps/create. # 4. `pip install dropbox` (requires `pip install unstructured` for PDF filetype). import os import tempfile from pathlib import Path from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DropboxLoader(BaseLoader, BaseModel): """Loads files from Dropbox. In addition to common files such as text and PDF files, it also supports *Dropbox Paper* files. """ dropbox_access_token: str """Dropbox access token.""" dropbox_folder_path: Optional[str] = None """The folder path to load from.""" dropbox_file_paths: Optional[List[str]] = None """The file paths to load from.""" recursive: bool = False """Flag to indicate whether to load files recursively from subfolders.""" @root_validator def validate_inputs(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either folder_path or file_paths is set, but not both.""" if ( values.get("dropbox_folder_path") is not None and values.get("dropbox_file_paths") is not None ): raise ValueError("Cannot specify both folder_path and file_paths") if values.get("dropbox_folder_path") is None and not values.get(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dropbox.html
20470d85f5a2-1
if values.get("dropbox_folder_path") is None and not values.get( "dropbox_file_paths" ): raise ValueError("Must specify either folder_path or file_paths") return values def _create_dropbox_client(self) -> Any: """Create a Dropbox client.""" try: from dropbox import Dropbox, exceptions except ImportError: raise ImportError("You must run " "`pip install dropbox") try: dbx = Dropbox(self.dropbox_access_token) dbx.users_get_current_account() except exceptions.AuthError as ex: raise ValueError( "Invalid Dropbox access token. Please verify your token and try again." ) from ex return dbx def _load_documents_from_folder(self, folder_path: str) -> List[Document]: """Load documents from a Dropbox folder.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions from dropbox.files import FileMetadata except ImportError: raise ImportError("You must run " "`pip install dropbox") try: results = dbx.files_list_folder(folder_path, recursive=self.recursive) except exceptions.ApiError as ex: raise ValueError( f"Could not list files in the folder: {folder_path}. " "Please verify the folder path and try again." ) from ex files = [entry for entry in results.entries if isinstance(entry, FileMetadata)] documents = [ doc for doc in (self._load_file_from_path(file.path_display) for file in files) if doc is not None ] return documents def _load_file_from_path(self, file_path: str) -> Optional[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dropbox.html
20470d85f5a2-2
def _load_file_from_path(self, file_path: str) -> Optional[Document]: """Load a file from a Dropbox path.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions except ImportError: raise ImportError("You must run " "`pip install dropbox") try: file_metadata = dbx.files_get_metadata(file_path) if file_metadata.is_downloadable: _, response = dbx.files_download(file_path) # Some types such as Paper, need to be exported. elif file_metadata.export_info: _, response = dbx.files_export(file_path, "markdown") except exceptions.ApiError as ex: raise ValueError( f"Could not load file: {file_path}. Please verify the file path" "and try again." ) from ex try: text = response.content.decode("utf-8") except UnicodeDecodeError: print(f"File {file_path} could not be decoded as text. Skipping.") file_extension = os.path.splitext(file_path)[1].lower() if file_extension == ".pdf": from langchain.document_loaders import UnstructuredPDFLoader # Download it to a temporary file. temp_dir = tempfile.TemporaryDirectory() temp_pdf = Path(temp_dir.name) / "tmp.pdf" with open(temp_pdf, mode="wb") as f: f.write(response.content) try: loader = UnstructuredPDFLoader(str(temp_pdf)) docs = loader.load() if docs: return docs[0] except Exception as pdf_ex: print(f"Error while trying to parse PDF {file_path}: {pdf_ex}") return None return None
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dropbox.html
20470d85f5a2-3
return None return None metadata = { "source": f"dropbox://{file_path}", "title": os.path.basename(file_path), } return Document(page_content=text, metadata=metadata) def _load_documents_from_paths(self) -> List[Document]: """Load documents from a list of Dropbox file paths.""" if not self.dropbox_file_paths: raise ValueError("file_paths must be set") return [ doc for doc in ( self._load_file_from_path(file_path) for file_path in self.dropbox_file_paths ) if doc is not None ] [docs] def load(self) -> List[Document]: """Load documents.""" if self.dropbox_folder_path is not None: return self._load_documents_from_folder(self.dropbox_folder_path) else: return self._load_documents_from_paths()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/dropbox.html
40bcdd6f357e-0
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): """Loads a file from OneDrive.""" file: File = Field(...) """The file to load.""" class Config: arbitrary_types_allowed = True """Allow arbitrary types. This is needed for the File type. Default is True. See https://pydantic-docs.helpmanual.io/usage/types/#arbitrary-types-allowed""" [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive_file.html
f99ac5a7bf5d-0
Source code for langchain.document_loaders.discord """Load from Discord chat dump""" from __future__ import annotations from typing import TYPE_CHECKING, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: import pandas as pd [docs]class DiscordChatLoader(BaseLoader): """Load Discord chat logs.""" [docs] def __init__(self, chat_log: pd.DataFrame, user_id_col: str = "ID"): """Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isinstance(chat_log, pd.DataFrame): raise ValueError( f"Expected chat_log to be a pd.DataFrame, got {type(chat_log)}" ) self.chat_log = chat_log self.user_id_col = user_id_col [docs] def load(self) -> List[Document]: """Load all chat messages.""" result = [] for _, row in self.chat_log.iterrows(): user_id = row[self.user_id_col] metadata = row.to_dict() metadata.pop(self.user_id_col) result.append(Document(page_content=user_id, metadata=metadata)) return result
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/discord.html
5c72ddb2f619-0
Source code for langchain.document_loaders.apify_dataset from typing import Any, Callable, Dict, List from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ApifyDatasetLoader(BaseLoader, BaseModel): """Loads datasets from Apify-a web scraping, crawling, and data extraction platform. For details, see https://docs.apify.com/platform/integrations/langchain Example: .. code-block:: python from langchain.document_loaders import ApifyDatasetLoader from langchain.schema import Document loader = ApifyDatasetLoader( dataset_id="YOUR-DATASET-ID", dataset_mapping_function=lambda dataset_item: Document( page_content=dataset_item["text"], metadata={"source": dataset_item["url"]} ), ) documents = loader.load() """ # noqa: E501 apify_client: Any """An instance of the ApifyClient class from the apify-client Python package.""" dataset_id: str """The ID of the dataset on the Apify platform.""" dataset_mapping_function: Callable[[Dict], Document] """A custom function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.""" def __init__( self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document] ): """Initialize the loader with an Apify dataset ID and a mapping function. Args: dataset_id (str): The ID of the dataset on the Apify platform. dataset_mapping_function (Callable): A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
5c72ddb2f619-1
of the Document class. """ super().__init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate environment. Args: values: The values to validate. """ try: from apify_client import ApifyClient values["apify_client"] = ApifyClient() except ImportError: raise ImportError( "Could not import apify-client Python package. " "Please install it with `pip install apify-client`." ) return values [docs] def load(self) -> List[Document]: """Load documents.""" dataset_items = ( self.apify_client.dataset(self.dataset_id).list_items(clean=True).items ) return list(map(self.dataset_mapping_function, dataset_items))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/apify_dataset.html
d4ee1560563c-0
Source code for langchain.document_loaders.email """Loads email files.""" import os from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredEmailLoader(UnstructuredFileLoader): """Loader that uses unstructured to load email files. Works with both .eml and .msg files. You can process attachments in addition to the e-mail message itself by passing process_attachments=True into the constructor for the loader. By default, attachments will be processed with the unstructured partition function. If you already know the document types of the attachments, you can specify another partitioning function with the attachment partitioner kwarg. Example ------- from langchain.document_loaders import UnstructuredEmailLoader loader = UnstructuredEmailLoader("example_data/fake-email.eml", mode="elements") loader.load() Example ------- from langchain.document_loaders import UnstructuredEmailLoader loader = UnstructuredEmailLoader( "example_data/fake-email-attachment.eml", mode="elements", process_attachments=True, ) loader.load() """ [docs] def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): process_attachments = unstructured_kwargs.get("process_attachments") attachment_partitioner = unstructured_kwargs.get("attachment_partitioner") if process_attachments and attachment_partitioner is None: from unstructured.partition.auto import partition unstructured_kwargs["attachment_partitioner"] = partition
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/email.html
d4ee1560563c-1
unstructured_kwargs["attachment_partitioner"] = partition super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version("0.5.8") and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f"Filetype {filetype} is not supported in UnstructuredEmailLoader." ) [docs]class OutlookMessageLoader(BaseLoader): """ Loads Outlook Message files using extract_msg. https://github.com/TeamMsgExtractor/msg-extractor """ [docs] def __init__(self, file_path: str): """Initialize with a file path. Args: file_path: The path to the Outlook Message file. """ self.file_path = file_path if not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file" % self.file_path) try: import extract_msg # noqa:F401 except ImportError: raise ImportError( "extract_msg is not installed. Please install it with " "`pip install extract_msg`" ) [docs] def load(self) -> List[Document]: """Load data into document objects.""" import extract_msg msg = extract_msg.Message(self.file_path) return [
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/email.html
d4ee1560563c-2
import extract_msg msg = extract_msg.Message(self.file_path) return [ Document( page_content=msg.body, metadata={ "subject": msg.subject, "sender": msg.sender, "date": msg.date, }, ) ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/email.html
89716a9ac01c-0
Source code for langchain.document_loaders.roam """Loads Roam directory dump.""" from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RoamLoader(BaseLoader): """Loads Roam files from disk.""" [docs] def __init__(self, path: str): """Initialize with a path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p) as f: text = f.read() metadata = {"source": str(p)} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/roam.html
a36b4384268b-0
Source code for langchain.document_loaders.async_html import asyncio import logging import warnings from typing import Any, Dict, Iterator, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } [docs]class AsyncHtmlLoader(BaseLoader): """Loads HTML asynchronously.""" [docs] def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None, verify_ssl: Optional[bool] = True, proxies: Optional[dict] = None, requests_per_second: int = 2, requests_kwargs: Dict[str, Any] = {}, raise_for_status: bool = False, ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path headers = header_template or default_header_template if not headers.get("User-Agent"): try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/async_html.html
a36b4384268b-1
if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session = requests.Session() self.session.headers = dict(headers) self.session.verify = verify_ssl if proxies: self.session.proxies.update(proxies) self.requests_per_second = requests_per_second self.requests_kwargs = requests_kwargs self.raise_for_status = raise_for_status async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: async with aiohttp.ClientSession() as session: for i in range(retries): try: async with session.get( url, headers=self.session.headers, ssl=None if self.session.verify else False, ) as response: try: text = await response.text() except UnicodeDecodeError: logger.error(f"Failed to decode content from {url}") text = "" return text except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/async_html.html
a36b4384268b-2
raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load text from the url(s) in web_path.""" for doc in self.load(): yield doc [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" results = asyncio.run(self.fetch_all(self.web_paths)) docs = [] for i, text in enumerate(results): metadata = {"source": self.web_paths[i]} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/async_html.html
0cd8fb526bb8-0
Source code for langchain.document_loaders.fauna from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class FaunaLoader(BaseLoader): """FaunaDB Loader. Attributes: query (str): The FQL query string to execute. page_content_field (str): The field that contains the content of each page. secret (str): The secret key for authenticating to FaunaDB. metadata_fields (Optional[Sequence[str]]): Optional list of field names to include in metadata. """ [docs] def __init__( self, query: str, page_content_field: str, secret: str, metadata_fields: Optional[Sequence[str]] = None, ): self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: try: from fauna import Page, fql from fauna.client import Client from fauna.encoding import QuerySuccess except ImportError: raise ImportError( "Could not import fauna python package. " "Please install it with `pip install fauna`." ) # Create Fauna Client client = Client(secret=self.secret) # Run FQL Query response: QuerySuccess = client.query(fql(self.query)) page: Page = response.data for result in page: if result is not None: document_dict = dict(result.items()) page_content = ""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/fauna.html
0cd8fb526bb8-1
document_dict = dict(result.items()) page_content = "" for key, value in document_dict.items(): if key == self.page_content_field: page_content = value document: Document = Document( page_content=page_content, metadata={"id": result.id, "ts": result.ts}, ) yield document if page.after is not None: yield Document( page_content="Next Page Exists", metadata={"after": page.after}, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/fauna.html
f101e8c8735b-0
Source code for langchain.document_loaders.mediawikidump """Load Data from a MediaWiki dump xml.""" import logging from pathlib import Path from typing import List, Optional, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class MWDumpLoader(BaseLoader): """ Load MediaWiki dump from XML file Example: .. code-block:: python from langchain.document_loaders import MWDumpLoader loader = MWDumpLoader( file_path="myWiki.xml", encoding="utf8" ) docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) texts = text_splitter.split_documents(docs) :param file_path: XML local file path :type file_path: str :param encoding: Charset encoding, defaults to "utf8" :type encoding: str, optional :param namespaces: The namespace of pages you want to parse. See https://www.mediawiki.org/wiki/Help:Namespaces#Localisation for a list of all common namespaces :type namespaces: List[int],optional :param skip_redirects: TR=rue to skip pages that redirect to other pages, False to keep them. False by default :type skip_redirects: bool, optional :param stop_on_error: False to skip over pages that cause parsing errors, True to stop. True by default :type stop_on_error: bool, optional """ [docs] def __init__( self,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
f101e8c8735b-1
""" [docs] def __init__( self, file_path: Union[str, Path], encoding: Optional[str] = "utf8", namespaces: Optional[Sequence[int]] = None, skip_redirects: Optional[bool] = False, stop_on_error: Optional[bool] = True, ): self.file_path = file_path if isinstance(file_path, str) else str(file_path) self.encoding = encoding # Namespaces range from -2 to 15, inclusive. self.namespaces = namespaces or list(range(-2, 16)) self.skip_redirects = skip_redirects self.stop_on_error = stop_on_error [docs] def load(self) -> List[Document]: """Load from a file path.""" try: import mwparserfromhell import mwxml except ImportError as e: raise ImportError( "Unable to import 'mwparserfromhell' or 'mwxml'. Please install with" " `pip install mwparserfromhell mwxml`." ) from e dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) docs = [] for page in dump.pages: if self.skip_redirects and page.redirect: continue if page.namespace not in self.namespaces: continue try: for revision in page: code = mwparserfromhell.parse(revision.text) text = code.strip_code( normalize=True, collapse=True, keep_template_params=False ) metadata = {"source": page.title} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: logger.error("Parsing error: {}".format(e))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
f101e8c8735b-2
except Exception as e: logger.error("Parsing error: {}".format(e)) if self.stop_on_error: raise e else: continue return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
9412b94a7c68-0
Source code for langchain.document_loaders.notiondb """Notion DB loader for langchain""" from typing import Any, Dict, List, Optional import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader NOTION_BASE_URL = "https://api.notion.com/v1" DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query" PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}" BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children" [docs]class NotionDBLoader(BaseLoader): """Notion DB Loader. Reads content from pages within a Notion Database. Args: integration_token (str): Notion integration token. database_id (str): Notion database id. request_timeout_sec (int): Timeout for Notion requests in seconds. Defaults to 10. """ [docs] def __init__( self, integration_token: str, database_id: str, request_timeout_sec: Optional[int] = 10, ) -> None: """Initialize with parameters.""" if not integration_token: raise ValueError("integration_token must be provided") if not database_id: raise ValueError("database_id must be provided") self.token = integration_token self.database_id = database_id self.headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "Notion-Version": "2022-06-28", } self.request_timeout_sec = request_timeout_sec [docs] def load(self) -> List[Document]: """Load documents from the Notion database. Returns:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
9412b94a7c68-1
"""Load documents from the Notion database. Returns: List[Document]: List of documents. """ page_summaries = self._retrieve_page_summaries() return list(self.load_page(page_summary) for page_summary in page_summaries) def _retrieve_page_summaries( self, query_dict: Dict[str, Any] = {"page_size": 100} ) -> List[Dict[str, Any]]: """Get all the pages from a Notion database.""" pages: List[Dict[str, Any]] = [] while True: data = self._request( DATABASE_URL.format(database_id=self.database_id), method="POST", query_dict=query_dict, ) pages.extend(data.get("results")) if not data.get("has_more"): break query_dict["start_cursor"] = data.get("next_cursor") return pages [docs] def load_page(self, page_summary: Dict[str, Any]) -> Document: """Read a page. Args: page_summary: Page summary from Notion API. """ page_id = page_summary["id"] # load properties as metadata metadata: Dict[str, Any] = {} for prop_name, prop_data in page_summary["properties"].items(): prop_type = prop_data["type"] if prop_type == "rich_text": value = ( prop_data["rich_text"][0]["plain_text"] if prop_data["rich_text"] else None ) elif prop_type == "title": value = ( prop_data["title"][0]["plain_text"] if prop_data["title"] else None ) elif prop_type == "multi_select":
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
9412b94a7c68-2
) elif prop_type == "multi_select": value = ( [item["name"] for item in prop_data["multi_select"]] if prop_data["multi_select"] else [] ) elif prop_type == "url": value = prop_data["url"] elif prop_type == "unique_id": value = ( f'{prop_data["unique_id"]["prefix"]}-{prop_data["unique_id"]["number"]}' if prop_data["unique_id"] else None ) elif prop_type == "status": value = prop_data["status"]["name"] if prop_data["status"] else None elif prop_type == "people": value = ( [item["name"] for item in prop_data["people"]] if prop_data["people"] else [] ) else: value = None metadata[prop_name.lower()] = value metadata["id"] = page_id return Document(page_content=self._load_blocks(page_id), metadata=metadata) def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data["results"]: result_obj = result[result["type"]] if "rich_text" not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj["rich_text"]: if "text" in rich_text: cur_result_text_arr.append(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
9412b94a7c68-3
if "text" in rich_text: cur_result_text_arr.append( "\t" * num_tabs + rich_text["text"]["content"] ) if result["has_children"]: children_text = self._load_blocks( result["id"], num_tabs=num_tabs + 1 ) cur_result_text_arr.append(children_text) result_lines_arr.append("\n".join(cur_result_text_arr)) cur_block_id = data.get("next_cursor") return "\n".join(result_lines_arr) def _request( self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {} ) -> Any: res = requests.request( method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec, ) res.raise_for_status() return res.json()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html
3118f59513d3-0
Source code for langchain.document_loaders.max_compute from __future__ import annotations from typing import Any, Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.max_compute import MaxComputeAPIWrapper [docs]class MaxComputeLoader(BaseLoader): """Loads a query result from Alibaba Cloud MaxCompute table into documents.""" [docs] def __init__( self, query: str, api_wrapper: MaxComputeAPIWrapper, *, page_content_columns: Optional[Sequence[str]] = None, metadata_columns: Optional[Sequence[str]] = None, ): """Initialize Alibaba Cloud MaxCompute document loader. Args: query: SQL query to execute. api_wrapper: MaxCompute API wrapper. page_content_columns: The columns to write into the `page_content` of the Document. If unspecified, all columns will be written to `page_content`. metadata_columns: The columns to write into the `metadata` of the Document. If unspecified, all columns not added to `page_content` will be written. """ self.query = query self.api_wrapper = api_wrapper self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns [docs] @classmethod def from_params( cls, query: str, endpoint: str, project: str, *, access_id: Optional[str] = None, secret_access_key: Optional[str] = None, **kwargs: Any, ) -> MaxComputeLoader: """Convenience constructor that builds the MaxCompute API wrapper from given parameters. Args: query: SQL query to execute.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/max_compute.html
3118f59513d3-1
given parameters. Args: query: SQL query to execute. endpoint: MaxCompute endpoint. project: A project is a basic organizational unit of MaxCompute, which is similar to a database. access_id: MaxCompute access ID. Should be passed in directly or set as the environment variable `MAX_COMPUTE_ACCESS_ID`. secret_access_key: MaxCompute secret access key. Should be passed in directly or set as the environment variable `MAX_COMPUTE_SECRET_ACCESS_KEY`. """ api_wrapper = MaxComputeAPIWrapper.from_params( endpoint, project, access_id=access_id, secret_access_key=secret_access_key ) return cls(query, api_wrapper, **kwargs) [docs] def lazy_load(self) -> Iterator[Document]: for row in self.api_wrapper.query(self.query): if self.page_content_columns: page_content_data = { k: v for k, v in row.items() if k in self.page_content_columns } else: page_content_data = row page_content = "\n".join(f"{k}: {v}" for k, v in page_content_data.items()) if self.metadata_columns: metadata = {k: v for k, v in row.items() if k in self.metadata_columns} else: metadata = {k: v for k, v in row.items() if k not in page_content_data} yield Document(page_content=page_content, metadata=metadata) [docs] def load(self) -> List[Document]: return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/max_compute.html
7837c35bae6f-0
Source code for langchain.document_loaders.datadog_logs """Load Datadog logs.""" from datetime import datetime, timedelta from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class DatadogLogsLoader(BaseLoader): """Loads a query result from Datadog into a list of documents. Logs are written into the `page_content` and into the `metadata`. """ [docs] def __init__( self, query: str, api_key: str, app_key: str, from_time: Optional[int] = None, to_time: Optional[int] = None, limit: int = 100, ) -> None: """Initialize Datadog document loader. Requirements: - Must have datadog_api_client installed. Install with `pip install datadog_api_client`. Args: query: The query to run in Datadog. api_key: The Datadog API key. app_key: The Datadog APP key. from_time: Optional. The start of the time range to query. Supports date math and regular timestamps (milliseconds) like '1688732708951' Defaults to 20 minutes ago. to_time: Optional. The end of the time range to query. Supports date math and regular timestamps (milliseconds) like '1688732708951' Defaults to now. limit: The maximum number of logs to return. Defaults to 100. """ # noqa: E501 try: from datadog_api_client import Configuration except ImportError as ex: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/datadog_logs.html
7837c35bae6f-1
except ImportError as ex: raise ImportError( "Could not import datadog_api_client python package. " "Please install it with `pip install datadog_api_client`." ) from ex self.query = query configuration = Configuration() configuration.api_key["apiKeyAuth"] = api_key configuration.api_key["appKeyAuth"] = app_key self.configuration = configuration self.from_time = from_time self.to_time = to_time self.limit = limit [docs] def parse_log(self, log: dict) -> Document: """ Create Document objects from Datadog log items. """ attributes = log.get("attributes", {}) metadata = { "id": log.get("id", ""), "status": attributes.get("status"), "service": attributes.get("service", ""), "tags": attributes.get("tags", []), "timestamp": attributes.get("timestamp", ""), } message = attributes.get("message", "") inside_attributes = attributes.get("attributes", {}) content_dict = {**inside_attributes, "message": message} content = ", ".join(f"{k}: {v}" for k, v in content_dict.items()) return Document(page_content=content, metadata=metadata) [docs] def load(self) -> List[Document]: """ Get logs from Datadog. Returns: A list of Document objects. - page_content - metadata - id - service - status - tags - timestamp """ try: from datadog_api_client import ApiClient from datadog_api_client.v2.api.logs_api import LogsApi
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/datadog_logs.html
7837c35bae6f-2
from datadog_api_client.v2.api.logs_api import LogsApi from datadog_api_client.v2.model.logs_list_request import LogsListRequest from datadog_api_client.v2.model.logs_list_request_page import ( LogsListRequestPage, ) from datadog_api_client.v2.model.logs_query_filter import LogsQueryFilter from datadog_api_client.v2.model.logs_sort import LogsSort except ImportError as ex: raise ImportError( "Could not import datadog_api_client python package. " "Please install it with `pip install datadog_api_client`." ) from ex now = datetime.now() twenty_minutes_before = now - timedelta(minutes=20) now_timestamp = int(now.timestamp() * 1000) twenty_minutes_before_timestamp = int(twenty_minutes_before.timestamp() * 1000) _from = ( self.from_time if self.from_time is not None else twenty_minutes_before_timestamp ) body = LogsListRequest( filter=LogsQueryFilter( query=self.query, _from=_from, to=f"{self.to_time if self.to_time is not None else now_timestamp}", ), sort=LogsSort.TIMESTAMP_ASCENDING, page=LogsListRequestPage( limit=self.limit, ), ) with ApiClient(configuration=self.configuration) as api_client: api_instance = LogsApi(api_client) response = api_instance.list_logs(body=body).to_dict() docs: List[Document] = [] for row in response["data"]: docs.append(self.parse_log(row)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/datadog_logs.html
547a35837163-0
Source code for langchain.document_loaders.sitemap """Loader that fetches a sitemap and loads those URLs.""" import itertools import re from typing import Any, Callable, Generator, Iterable, List, Optional from langchain.document_loaders.web_base import WebBaseLoader from langchain.schema import Document def _default_parsing_function(content: Any) -> str: return str(content.get_text()) def _default_meta_function(meta: dict, _content: Any) -> dict: return {"source": meta["loc"], **meta} def _batch_block(iterable: Iterable, size: int) -> Generator[List[dict], None, None]: it = iter(iterable) while item := list(itertools.islice(it, size)): yield item [docs]class SitemapLoader(WebBaseLoader): """Loader that fetches a sitemap and loads those URLs.""" [docs] def __init__( self, web_path: str, filter_urls: Optional[List[str]] = None, parsing_function: Optional[Callable] = None, blocksize: Optional[int] = None, blocknum: int = 0, meta_function: Optional[Callable] = None, is_local: bool = False, continue_on_failure: bool = False, ): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: list of strings or regexes that will be applied to filter the urls that are parsed and loaded parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
547a35837163-1
blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False """ if blocksize is not None and blocksize < 1: raise ValueError("Sitemap blocksize should be at least 1") if blocknum < 0: raise ValueError("Sitemap blocknum can not be lower then 0") try: import lxml # noqa:F401 except ImportError: raise ImportError( "lxml package not found, please install it with " "`pip install lxml`" ) super().__init__(web_path) self.filter_urls = filter_urls self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local self.continue_on_failure = continue_on_failure [docs] def parse_sitemap(self, soup: Any) -> List[dict]: """Parse sitemap xml and load into a list of dicts. Args: soup: BeautifulSoup object. Returns: List of dicts. """ els = []
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
547a35837163-2
Returns: List of dicts. """ els = [] for url in soup.find_all("url"): loc = url.find("loc") if not loc: continue # Strip leading and trailing whitespace and newlines loc_text = loc.text.strip() if self.filter_urls and not any( re.match(r, loc_text) for r in self.filter_urls ): continue els.append( { tag: prop.text for tag in ["loc", "lastmod", "changefreq", "priority"] if (prop := url.find(tag)) } ) for sitemap in soup.find_all("sitemap"): loc = sitemap.find("loc") if not loc: continue soup_child = self.scrape_all([loc.text], "xml")[0] els.extend(self.parse_sitemap(soup_child)) return els [docs] def load(self) -> List[Document]: """Load sitemap.""" if self.is_local: try: import bs4 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it" " with `pip install beautifulsoup4`" ) fp = open(self.web_path) soup = bs4.BeautifulSoup(fp, "xml") else: soup = self.scrape("xml") els = self.parse_sitemap(soup) if self.blocksize is not None: elblocks = list(_batch_block(els, self.blocksize)) blockcount = len(elblocks) if blockcount - 1 < self.blocknum: raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
547a35837163-3
if blockcount - 1 < self.blocknum: raise ValueError( "Selected sitemap does not contain enough blocks for given blocknum" ) else: els = elblocks[self.blocknum] results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el]) return [ Document( page_content=self.parsing_function(results[i]), metadata=self.meta_function(els[i], results[i]), ) for i in range(len(results)) ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html
35d5ed3f26ee-0
Source code for langchain.document_loaders.airtable from typing import Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class AirtableLoader(BaseLoader): """Loader for Airtable tables.""" [docs] def __init__(self, api_token: str, table_id: str, base_id: str): """Initialize with API token and the IDs for table and base""" self.api_token = api_token """Airtable API token.""" self.table_id = table_id """Airtable table ID.""" self.base_id = base_id """Airtable base ID.""" [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load Documents from table.""" from pyairtable import Table table = Table(self.api_token, self.base_id, self.table_id) records = table.all() for record in records: # Need to convert record from dict to str yield Document( page_content=str(record), metadata={ "source": self.base_id + "_" + self.table_id, "base_id": self.base_id, "table_id": self.table_id, }, ) [docs] def load(self) -> List[Document]: """Load Documents from table.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/airtable.html
3b0ed98ad7ca-0
Source code for langchain.document_loaders.org_mode """Loads Org-Mode files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredOrgModeLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Org-Mode files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredOrgModeLoader loader = UnstructuredOrgModeLoader( "example.org", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-org """ [docs] def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): """ Args: file_path: The path to the file to load. mode: The mode to load the file from. Default is "single". **unstructured_kwargs: Any additional keyword arguments to pass to the unstructured. """ validate_unstructured_version(min_unstructured_version="0.7.9") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/org_mode.html
3b0ed98ad7ca-1
def _get_elements(self) -> List: from unstructured.partition.org import partition_org return partition_org(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/org_mode.html
fb4c8ff01667-0
Source code for langchain.document_loaders.tencent_cos_file """Loading logic for loading documents from Tencent Cloud COS file.""" import os import tempfile from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class TencentCOSFileLoader(BaseLoader): """Loader for Tencent Cloud COS file.""" [docs] def __init__(self, conf: Any, bucket: str, key: str): """Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key. """ self.conf = conf self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ImportError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) # Initialise a client client = CosS3Client(self.conf) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.bucket}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination client.download_file( Bucket=self.bucket, Key=self.key, DestFilePath=file_path ) loader = UnstructuredFileLoader(file_path)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_file.html
fb4c8ff01667-1
) loader = UnstructuredFileLoader(file_path) # UnstructuredFileLoader not implement lazy_load yet return iter(loader.load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_file.html
f0c7527b89f8-0
Source code for langchain.document_loaders.unstructured """Loader that uses unstructured to load files.""" import collections from abc import ABC, abstractmethod from typing import IO, Any, Callable, Dict, List, Sequence, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]def satisfies_min_unstructured_version(min_version: str) -> bool: """Checks to see if the installed unstructured version exceeds the minimum version for the feature in question.""" from unstructured.__version__ import __version__ as __unstructured_version__ min_version_tuple = tuple([int(x) for x in min_version.split(".")]) # NOTE(MthwRobinson) - enables the loader to work when you're using pre-release # versions of unstructured like 0.4.17-dev1 _unstructured_version = __unstructured_version__.split("-")[0] unstructured_version_tuple = tuple( [int(x) for x in _unstructured_version.split(".")] ) return unstructured_version_tuple >= min_version_tuple [docs]def validate_unstructured_version(min_unstructured_version: str) -> None: """Raises an error if the unstructured version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f"unstructured>={min_unstructured_version} is required in this loader." ) [docs]class UnstructuredBaseLoader(BaseLoader, ABC): """Loader that uses Unstructured to load files.""" [docs] def __init__( self, mode: str = "single", post_processors: List[Callable] = [], **unstructured_kwargs: Any, ):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-1
**unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) _valid_modes = {"single", "elements", "paged"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) self.mode = mode if not satisfies_min_unstructured_version("0.5.4"): if "strategy" in unstructured_kwargs: unstructured_kwargs.pop("strategy") self.unstructured_kwargs = unstructured_kwargs self.post_processors = post_processors @abstractmethod def _get_elements(self) -> List: """Get elements.""" @abstractmethod def _get_metadata(self) -> dict: """Get metadata.""" def _post_process_elements(self, elements: list) -> list: """Applies post processing functions to extracted unstructured elements. Post processing functions are Element -> Element callables are passed in using the post_processors kwarg when the loader is instantiated.""" for element in elements: for post_processor in self.post_processors: element.apply(post_processor) return elements [docs] def load(self) -> List[Document]: """Load file.""" elements = self._get_elements() if self.mode == "elements": docs: List[Document] = list() for element in elements: metadata = self._get_metadata() # NOTE(MthwRobinson) - the attribute check is for backward compatibility
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-2
# NOTE(MthwRobinson) - the attribute check is for backward compatibility # with unstructured<0.4.9. The metadata attributed was added in 0.4.9. if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) if hasattr(element, "category"): metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) elif self.mode == "paged": text_dict: Dict[int, str] = {} meta_dict: Dict[int, Dict] = {} for idx, element in enumerate(elements): metadata = self._get_metadata() if hasattr(element, "metadata"): metadata.update(element.metadata.to_dict()) page_number = metadata.get("page_number", 1) # Check if this page_number already exists in docs_dict if page_number not in text_dict: # If not, create new entry with initial text and metadata text_dict[page_number] = str(element) + "\n\n" meta_dict[page_number] = metadata else: # If exists, append to text and update the metadata text_dict[page_number] += str(element) + "\n\n" meta_dict[page_number].update(metadata) # Convert the dict to a list of Document objects docs = [ Document(page_content=text_dict[key], metadata=meta_dict[key]) for key in text_dict.keys() ] elif self.mode == "single": metadata = self._get_metadata() text = "\n\n".join([str(el) for el in elements]) docs = [Document(page_content=text, metadata=metadata)] else: raise ValueError(f"mode of {self.mode} not supported.")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-3
else: raise ValueError(f"mode of {self.mode} not supported.") return docs [docs]class UnstructuredFileLoader(UnstructuredBaseLoader): """Loader that uses Unstructured to load files. The file loader uses the unstructured partition function and will automatically detect the file type. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredFileLoader loader = UnstructuredFileLoader( "example.pdf", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition """ [docs] def __init__( self, file_path: Union[str, List[str]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(filename=self.file_path, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path} [docs]def get_elements_from_api( file_path: Union[str, List[str], None] = None,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-4
file_path: Union[str, List[str], None] = None, file: Union[IO, Sequence[IO], None] = None, api_url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ) -> List: """Retrieves a list of elements from the Unstructured API.""" if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list): from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api( filenames=file_path, files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) elements = [] for _elements in _doc_elements: elements.extend(_elements) return elements else: from unstructured.partition.api import partition_via_api return partition_via_api( filename=file_path, file=file, api_key=api_key, api_url=api_url, **unstructured_kwargs, ) [docs]class UnstructuredAPIFileLoader(UnstructuredFileLoader): """Loader that uses the Unstructured API to load files. By default, the loader makes a call to the hosted Unstructured API. If you are running the unstructured API locally, you can change the API rule by passing in the url parameter when you initialize the loader. The hosted Unstructured API requires an API key. See https://www.unstructured.io/api-key/ if you need to generate a key. You can run the loader in one of two modes: "single" and "elements".
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-5
You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples ```python from langchain.document_loaders import UnstructuredAPIFileLoader loader = UnstructuredFileAPILoader( "example.pdf", mode="elements", strategy="fast", api_key="MY_API_KEY", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition https://www.unstructured.io/api-key/ https://github.com/Unstructured-IO/unstructured-api """ [docs] def __init__( self, file_path: Union[str, List[str]] = "", mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file_path, str): validate_unstructured_version(min_unstructured_version="0.6.2") else: validate_unstructured_version(min_unstructured_version="0.6.3") self.url = url self.api_key = api_key super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_metadata(self) -> dict: return {"source": self.file_path}
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-6
return {"source": self.file_path} def _get_elements(self) -> List: return get_elements_from_api( file_path=self.file_path, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, ) [docs]class UnstructuredFileIOLoader(UnstructuredBaseLoader): """Loader that uses Unstructured to load files. The file loader uses the unstructured partition function and will automatically detect the file type. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredFileIOLoader with open("example.pdf", "rb") as f: loader = UnstructuredFileIOLoader( f, mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition """ [docs] def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", **unstructured_kwargs: Any, ): """Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.auto import partition
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-7
def _get_elements(self) -> List: from unstructured.partition.auto import partition return partition(file=self.file, **self.unstructured_kwargs) def _get_metadata(self) -> dict: return {} [docs]class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader): """Loader that uses the Unstructured API to load files. By default, the loader makes a call to the hosted Unstructured API. If you are running the unstructured API locally, you can change the API rule by passing in the url parameter when you initialize the loader. The hosted Unstructured API requires an API key. See https://www.unstructured.io/api-key/ if you need to generate a key. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredAPIFileLoader with open("example.pdf", "rb") as f: loader = UnstructuredFileAPILoader( f, mode="elements", strategy="fast", api_key="MY_API_KEY", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition https://www.unstructured.io/api-key/ https://github.com/Unstructured-IO/unstructured-api """ [docs] def __init__( self,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
f0c7527b89f8-8
""" [docs] def __init__( self, file: Union[IO, Sequence[IO]], mode: str = "single", url: str = "https://api.unstructured.io/general/v0/general", api_key: str = "", **unstructured_kwargs: Any, ): """Initialize with file path.""" if isinstance(file, collections.abc.Sequence): validate_unstructured_version(min_unstructured_version="0.6.3") if file: validate_unstructured_version(min_unstructured_version="0.6.2") self.url = url self.api_key = api_key super().__init__(file=file, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: return get_elements_from_api( file=self.file, api_key=self.api_key, api_url=self.url, **self.unstructured_kwargs, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/unstructured.html
653bbae23214-0
Source code for langchain.document_loaders.trello """Loads cards from Trello""" from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env if TYPE_CHECKING: from trello import Board, Card, TrelloClient [docs]class TrelloLoader(BaseLoader): """Trello loader. Reads all cards from a Trello board.""" [docs] def __init__( self, client: TrelloClient, board_name: str, *, include_card_name: bool = True, include_comments: bool = True, include_checklist: bool = True, card_filter: Literal["closed", "open", "all"] = "all", extra_metadata: Tuple[str, ...] = ("due_date", "labels", "list", "closed"), ): """Initialize Trello loader. Args: client: Trello API client. board_name: The name of the Trello board. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ self.client = client self.board_name = board_name self.include_card_name = include_card_name
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
653bbae23214-1
self.board_name = board_name self.include_card_name = include_card_name self.include_comments = include_comments self.include_checklist = include_checklist self.extra_metadata = extra_metadata self.card_filter = card_filter [docs] @classmethod def from_credentials( cls, board_name: str, *, api_key: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> TrelloLoader: """Convenience constructor that builds TrelloClient init param for you. Args: board_name: The name of the Trello board. api_key: Trello API key. Can also be specified as environment variable TRELLO_API_KEY. token: Trello token. Can also be specified as environment variable TRELLO_TOKEN. include_card_name: Whether to include the name of the card in the document. include_comments: Whether to include the comments on the card in the document. include_checklist: Whether to include the checklist on the card in the document. card_filter: Filter on card status. Valid values are "closed", "open", "all". extra_metadata: List of additional metadata fields to include as document metadata.Valid values are "due_date", "labels", "list", "closed". """ try: from trello import TrelloClient # type: ignore except ImportError as ex: raise ImportError( "Could not import trello python package. " "Please install it with `pip install py-trello`." ) from ex api_key = api_key or get_from_env("api_key", "TRELLO_API_KEY")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
653bbae23214-2
token = token or get_from_env("token", "TRELLO_TOKEN") client = TrelloClient(api_key=api_key, token=token) return cls(client, board_name, **kwargs) [docs] def load(self) -> List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup # noqa: F401 except ImportError as ex: raise ImportError( "`beautifulsoup4` package not found, please run" " `pip install beautifulsoup4`" ) from ex board = self._get_board() # Create a dictionary with the list IDs as keys and the list names as values list_dict = {list_item.id: list_item.name for list_item in board.list_lists()} # Get Cards on the board cards = board.get_cards(card_filter=self.card_filter) return [self._card_to_doc(card, list_dict) for card in cards] def _get_board(self) -> Board: # Find the first board with a matching name board = next( (b for b in self.client.list_boards() if b.name == self.board_name), None ) if not board: raise ValueError(f"Board `{self.board_name}` not found.") return board def _card_to_doc(self, card: Card, list_dict: dict) -> Document: from bs4 import BeautifulSoup # type: ignore text_content = "" if self.include_card_name: text_content = card.name + "\n"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
653bbae23214-3
if self.include_card_name: text_content = card.name + "\n" if card.description.strip(): text_content += BeautifulSoup(card.description, "lxml").get_text() if self.include_checklist: # Get all the checklist items on the card for checklist in card.checklists: if checklist.items: items = [ f"{item['name']}:{item['state']}" for item in checklist.items ] text_content += f"\n{checklist.name}\n" + "\n".join(items) if self.include_comments: # Get all the comments on the card comments = [ BeautifulSoup(comment["data"]["text"], "lxml").get_text() for comment in card.comments ] text_content += "Comments:" + "\n".join(comments) # Default metadata fields metadata = { "title": card.name, "id": card.id, "url": card.url, } # Extra metadata fields. Card object is not subscriptable. if "labels" in self.extra_metadata: metadata["labels"] = [label.name for label in card.labels] if "list" in self.extra_metadata: if card.list_id in list_dict: metadata["list"] = list_dict[card.list_id] if "closed" in self.extra_metadata: metadata["closed"] = card.closed if "due_date" in self.extra_metadata: metadata["due_date"] = card.due_date return Document(page_content=text_content, metadata=metadata)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/trello.html
e58321749527-0
Source code for langchain.document_loaders.weather """Simple reader that reads weather data from OpenWeatherMap API""" from __future__ import annotations from datetime import datetime from typing import Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper [docs]class WeatherDataLoader(BaseLoader): """Weather Reader. Reads the forecast & current weather of any location using OpenWeatherMap's free API. Checkout 'https://openweathermap.org/appid' for more on how to generate a free OpenWeatherMap API. """ [docs] def __init__( self, client: OpenWeatherMapAPIWrapper, places: Sequence[str], ) -> None: """Initialize with parameters.""" super().__init__() self.client = client self.places = places [docs] @classmethod def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) return cls(client, places) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load weather data for the given locations.""" for place in self.places: metadata = {"queried_at": datetime.now()} content = self.client.run(place) yield Document(page_content=content, metadata=metadata) [docs] def load( self, ) -> List[Document]: """Load weather data for the given locations.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/weather.html
112d87a37563-0
Source code for langchain.document_loaders.excel """Loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredExcelLoader(UnstructuredFileLoader): """Loader that uses unstructured to load Excel files. Like other Unstructured loaders, UnstructuredExcelLoader can be used in both "single" and "elements" mode. If you use the loader in "elements" mode, each sheet in the Excel file will be a an Unstructured Table element. If you use the loader in "elements" mode, an HTML representation of the table will be available in the "text_as_html" key in the document metadata. Examples -------- from langchain.document_loaders.excel import UnstructuredExcelLoader loader = UnstructuredExcelLoader("stanley-cups.xlsd", mode="elements") docs = loader.load() """ [docs] def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): """ Args: file_path: The path to the Microsoft Excel file. mode: The mode to use when partitioning the file. See unstructured docs for more info. Optional. Defaults to "single". **unstructured_kwargs: Keyword arguments to pass to unstructured. """ validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xlsx import partition_xlsx
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html
112d87a37563-1
from unstructured.partition.xlsx import partition_xlsx return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html
b2b3ce9824aa-0
Source code for langchain.document_loaders.embaas import base64 import warnings from typing import Any, Dict, Iterator, List, Optional import requests from pydantic import BaseModel, root_validator, validator from typing_extensions import NotRequired, TypedDict from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser, BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.text_splitter import TextSplitter from langchain.utils import get_from_dict_or_env EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/" [docs]class EmbaasDocumentExtractionParameters(TypedDict): """Parameters for the embaas document extraction API.""" mime_type: NotRequired[str] """The mime type of the document.""" file_extension: NotRequired[str] """The file extension of the document.""" file_name: NotRequired[str] """The file name of the document.""" should_chunk: NotRequired[bool] """Whether to chunk the document into pages.""" chunk_size: NotRequired[int] """The maximum size of the text chunks.""" chunk_overlap: NotRequired[int] """The maximum overlap allowed between chunks.""" chunk_splitter: NotRequired[str] """The text splitter class name for creating chunks.""" separators: NotRequired[List[str]] """The separators for chunks.""" should_embed: NotRequired[bool] """Whether to create embeddings for the document in the response.""" model: NotRequired[str] """The model to pass to the Embaas document extraction API.""" instruction: NotRequired[str] """The instruction to pass to the Embaas document extraction API."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
b2b3ce9824aa-1
"""The instruction to pass to the Embaas document extraction API.""" [docs]class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters): """Payload for the Embaas document extraction API.""" bytes: str """The base64 encoded bytes of the document to extract text from.""" [docs]class BaseEmbaasLoader(BaseModel): """Base class for embedding a model into an Embaas document extraction API.""" embaas_api_key: Optional[str] = None """The API key for the embaas document extraction API.""" api_url: str = EMBAAS_DOC_API_URL """The URL of the embaas document extraction API.""" params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters() """Additional parameters to pass to the embaas document extraction API.""" @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( values, "embaas_api_key", "EMBAAS_API_KEY" ) values["embaas_api_key"] = embaas_api_key return values [docs]class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser): """Embaas's document byte loader. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Default parsing from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
b2b3ce9824aa-2
loader = EmbaasBlobLoader() blob = Blob.from_path(path="example.mp3") documents = loader.parse(blob=blob) # Custom api parameters (create embeddings automatically) from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader( params={ "should_embed": True, "model": "e5-large-v2", "chunk_size": 256, "chunk_splitter": "CharacterTextSplitter" } ) blob = Blob.from_path(path="example.pdf") documents = loader.parse(blob=blob) """ [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Parses the blob lazily. Args: blob: The blob to parse. """ yield from self._get_documents(blob=blob) @staticmethod def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]: """Convert the API response to a list of documents.""" docs = [] for chunk in chunks: metadata = chunk["metadata"] if chunk.get("embedding", None) is not None: metadata["embedding"] = chunk["embedding"] doc = Document(page_content=chunk["text"], metadata=metadata) docs.append(doc) return docs def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload: """Generates payload for the API request.""" base64_byte_str = base64.b64encode(blob.as_bytes()).decode() payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload( bytes=base64_byte_str,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
b2b3ce9824aa-3
bytes=base64_byte_str, # Workaround for mypy issue: https://github.com/python/mypy/issues/9408 # type: ignore **self.params, ) if blob.mimetype is not None and payload.get("mime_type", None) is None: payload["mime_type"] = blob.mimetype return payload def _handle_request( self, payload: EmbaasDocumentExtractionPayload ) -> List[Document]: """Sends a request to the embaas API and handles the response.""" headers = { "Authorization": f"Bearer {self.embaas_api_key}", "Content-Type": "application/json", } response = requests.post(self.api_url, headers=headers, json=payload) response.raise_for_status() parsed_response = response.json() return EmbaasBlobLoader._api_response_to_documents( chunks=parsed_response["data"]["chunks"] ) def _get_documents(self, blob: Blob) -> Iterator[Document]: """Get the documents from the blob.""" payload = self._generate_payload(blob=blob) try: documents = self._handle_request(payload=payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError( f"Error raised by embaas document text extraction API: {e}" ) parsed_response = e.response.json() if "message" in parsed_response: raise ValueError( f"Validation Error raised by embaas document text extraction API:" f" {parsed_response['message']}" ) raise yield from documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
b2b3ce9824aa-4
) raise yield from documents [docs]class EmbaasLoader(BaseEmbaasLoader, BaseLoader): """Embaas's document loader. To use, you should have the environment variable ``EMBAAS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python # Default parsing from langchain.document_loaders.embaas import EmbaasLoader loader = EmbaasLoader(file_path="example.mp3") documents = loader.load() # Custom api parameters (create embeddings automatically) from langchain.document_loaders.embaas import EmbaasBlobLoader loader = EmbaasBlobLoader( file_path="example.pdf", params={ "should_embed": True, "model": "e5-large-v2", "chunk_size": 256, "chunk_splitter": "CharacterTextSplitter" } ) documents = loader.load() """ file_path: str """The path to the file to load.""" blob_loader: Optional[EmbaasBlobLoader] """The blob loader to use. If not provided, a default one will be created.""" @validator("blob_loader", always=True) def validate_blob_loader( cls, v: EmbaasBlobLoader, values: Dict ) -> EmbaasBlobLoader: return v or EmbaasBlobLoader( embaas_api_key=values["embaas_api_key"], api_url=values["api_url"], params=values["params"], ) [docs] def lazy_load(self) -> Iterator[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
b2b3ce9824aa-5
) [docs] def lazy_load(self) -> Iterator[Document]: """Load the documents from the file path lazily.""" blob = Blob.from_path(path=self.file_path) assert self.blob_loader is not None # Should never be None, but mypy doesn't know that. yield from self.blob_loader.lazy_parse(blob=blob) [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: if self.params.get("should_embed", False): warnings.warn( "Embeddings are not supported with load_and_split." " Use the API splitter to properly generate embeddings." " For more information see embaas.io docs." ) return super().load_and_split(text_splitter=text_splitter)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/embaas.html
14748460bad6-0
Source code for langchain.document_loaders.tencent_cos_directory """Loading logic for loading documents from Tencent Cloud COS directory.""" from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader [docs]class TencentCOSDirectoryLoader(BaseLoader): """Loader for Tencent Cloud COS directory.""" [docs] def __init__(self, conf: Any, bucket: str, prefix: str = ""): """Initialize with COS config, bucket and prefix. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param prefix(str): prefix. """ self.conf = conf self.bucket = bucket self.prefix = prefix [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ImportError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) client = CosS3Client(self.conf) contents = [] marker = "" while True: response = client.list_objects( Bucket=self.bucket, Prefix=self.prefix, Marker=marker, MaxKeys=1000 ) if "Contents" in response: contents.extend(response["Contents"]) if response["IsTruncated"] == "false": break marker = response["NextMarker"] for content in contents: if content["Key"].endswith("/"):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_directory.html
14748460bad6-1
for content in contents: if content["Key"].endswith("/"): continue loader = TencentCOSFileLoader(self.conf, self.bucket, content["Key"]) yield loader.load()[0]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_directory.html
68a73dceeec3-0
Source code for langchain.document_loaders.iugu """Loader that fetches data from IUGU""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict IUGU_ENDPOINTS = { "invoices": "https://api.iugu.com/v1/invoices", "customers": "https://api.iugu.com/v1/customers", "charges": "https://api.iugu.com/v1/charges", "subscriptions": "https://api.iugu.com/v1/subscriptions", "plans": "https://api.iugu.com/v1/plans", } [docs]class IuguLoader(BaseLoader): """Loader that fetches data from IUGU.""" [docs] def __init__(self, resource: str, api_token: Optional[str] = None) -> None: """Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use. """ self.resource = resource api_token = api_token or get_from_env("api_token", "IUGU_API_TOKEN") self.headers = {"Authorization": f"Bearer {api_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
68a73dceeec3-1
def _get_resource(self) -> List[Document]: endpoint = IUGU_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/iugu.html
8530aab4d9e1-0
Source code for langchain.document_loaders.gutenberg """Loads .txt web files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class GutenbergLoader(BaseLoader): """Loader that uses urllib to load .txt web files.""" [docs] def __init__(self, file_path: str): """Initialize with a file path.""" if not file_path.startswith("https://www.gutenberg.org"): raise ValueError("file path must start with 'https://www.gutenberg.org'") if not file_path.endswith(".txt"): raise ValueError("file path must end with '.txt'") self.file_path = file_path [docs] def load(self) -> List[Document]: """Load file.""" from urllib.request import urlopen elements = urlopen(self.file_path) text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gutenberg.html
68026ccf838e-0
Source code for langchain.document_loaders.gcs_file """Load documents from a GCS file.""" import os import tempfile from typing import Callable, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Load Documents from a GCS file.""" [docs] def __init__( self, project_name: str, bucket: str, blob: str, loader_func: Optional[Callable[[str], BaseLoader]] = None, ): """Initialize with bucket and key name. Args: project_name: The name of the project to load bucket: The name of the GCS bucket. blob: The name of the GCS blob to load. loader_func: A loader function that instatiates a loader based on a file_path argument. If nothing is provided, the UnstructuredFileLoader is used. Examples: To use an alternative PDF loader: >> from from langchain.document_loaders import PyPDFLoader >> loader = GCSFileLoader(..., loader_func=PyPDFLoader) To use UnstructuredFileLoader with additional arguments: >> loader = GCSFileLoader(..., >> loader_func=lambda x: UnstructuredFileLoader(x, mode="elements")) """ self.bucket = bucket self.blob = blob self.project_name = project_name def default_loader_func(file_path: str) -> BaseLoader: return UnstructuredFileLoader(file_path) self._loader_func = loader_func if loader_func else default_loader_func [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_file.html
68026ccf838e-1
[docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ImportError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = self._loader_func(file_path) docs = loader.load() for doc in docs: if "source" in doc.metadata: doc.metadata["source"] = f"gs://{self.bucket}/{self.blob}" return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_file.html
80472d4de2f3-0
Source code for langchain.document_loaders.brave_search from typing import Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.brave_search import BraveSearchWrapper [docs]class BraveSearchLoader(BaseLoader): """Loads a query result from Brave Search engine into a list of Documents.""" [docs] def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None): """Initializes the BraveLoader. Args: query: The query to search for. api_key: The API key to use. search_kwargs: The search kwargs to use. """ self.query = query self.api_key = api_key self.search_kwargs = search_kwargs or {} [docs] def load(self) -> List[Document]: brave_client = BraveSearchWrapper( api_key=self.api_key, search_kwargs=self.search_kwargs, ) return brave_client.download_documents(self.query) [docs] def lazy_load(self) -> Iterator[Document]: for doc in self.load(): yield doc
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/brave_search.html
3c065acc05c3-0
Source code for langchain.document_loaders.mhtml """Load MHTML files, enriching metadata with page title.""" import email import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class MHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" [docs] def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: Path to file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when getting the text from the soup. """ try: import bs4 # noqa:F401 except ImportError: raise ImportError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load MHTML document into document objects."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mhtml.html
3c065acc05c3-1
from bs4 import BeautifulSoup """Load MHTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: message = email.message_from_string(f.read()) parts = message.get_payload() if type(parts) is not list: parts = [message] for part in parts: if part.get_content_type() == "text/html": html = part.get_payload(decode=True).decode() soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)] return []
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mhtml.html
3c23554713b7-0
Source code for langchain.document_loaders.srt """Loader for .srt (subtitle) files.""" from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SRTLoader(BaseLoader): """Loader for .srt (subtitle) files.""" [docs] def __init__(self, file_path: str): """Initialize with a file path.""" try: import pysrt # noqa:F401 except ImportError: raise ImportError( "package `pysrt` not found, please install it with `pip install pysrt`" ) self.file_path = file_path [docs] def load(self) -> List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = " ".join([t.text for t in parsed_info]) metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/srt.html