id
stringlengths
14
15
text
stringlengths
35
2.51k
source
stringlengths
61
154
159ac61bed59-3
data = res.json() text = "\n".join( [ data[key] for key in ["title", "description", "contents_raw"] if key in data ] ).strip() metadata = {"source": self.web_path, "title": data["title"]} documents.append(Document(page_content=text, metadata=metadata)) if include_guides: """Load and return documents for each guide linked to from the device""" guide_urls = [guide["url"] for guide in data["guides"]] for guide_url in guide_urls: documents.append(IFixitLoader(guide_url).load()[0]) return documents [docs] def load_guide(self, url_override: Optional[str] = None) -> List[Document]: if url_override is None: url = IFIXIT_BASE_URL + "/guides/" + self.id else: url = url_override res = requests.get(url) if res.status_code != 200: raise ValueError( "Could not load guide: " + self.web_path + "\n" + res.json() ) data = res.json() doc_parts = ["# " + data["title"], data["introduction_raw"]] doc_parts.append("\n\n###Tools Required:") if len(data["tools"]) == 0: doc_parts.append("\n - None") else: for tool in data["tools"]: doc_parts.append("\n - " + tool["text"]) doc_parts.append("\n\n###Parts Required:") if len(data["parts"]) == 0: doc_parts.append("\n - None") else: for part in data["parts"]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
159ac61bed59-4
else: for part in data["parts"]: doc_parts.append("\n - " + part["text"]) for row in data["steps"]: doc_parts.append( "\n\n## " + ( row["title"] if row["title"] != "" else "Step {}".format(row["orderby"]) ) ) for line in row["lines"]: doc_parts.append(line["text_raw"]) doc_parts.append(data["conclusion_raw"]) text = "\n".join(doc_parts) metadata = {"source": self.web_path, "title": data["title"]} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html
c31fe87c2f6b-0
Source code for langchain.document_loaders.url """Loader that uses unstructured to load HTML files.""" import logging from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class UnstructuredURLLoader(BaseLoader): """Loader that uses unstructured to load HTML files.""" def __init__( self, urls: List[str], continue_on_failure: bool = True, mode: str = "single", show_progress_bar: bool = False, **unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 from unstructured.__version__ import __version__ as __unstructured_version__ self.__version = __unstructured_version__ except ImportError: raise ValueError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self._validate_mode(mode) self.mode = mode headers = unstructured_kwargs.pop("headers", {}) if len(headers.keys()) != 0: warn_about_headers = False if self.__is_non_html_available(): warn_about_headers = not self.__is_headers_available_for_non_html() else: warn_about_headers = not self.__is_headers_available_for_html() if warn_about_headers: logger.warning( "You are using an old version of unstructured. " "The headers parameter is ignored" ) self.urls = urls self.continue_on_failure = continue_on_failure self.headers = headers self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
c31fe87c2f6b-1
self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar def _validate_mode(self, mode: str) -> None: _valid_modes = {"single", "elements"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) def __is_headers_available_for_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 7) def __is_headers_available_for_non_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 13) def __is_non_html_available(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 12) [docs] def load(self) -> List[Document]: """Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
c31fe87c2f6b-2
"Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition(url=url, **self.unstructured_kwargs) else: if self.__is_headers_available_for_html(): elements = partition_html( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exeption: {e}") continue else: raise e if self.mode == "single": text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == "elements": for element in elements: metadata = element.metadata.to_dict() metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
e1b0f462dec1-0
Source code for langchain.document_loaders.docugami """Loader that loads processed documents from Docugami.""" import io import logging import os import re from pathlib import Path from typing import Any, Dict, List, Mapping, Optional, Sequence, Union import requests from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader TD_NAME = "{http://www.w3.org/1999/xhtml}td" TABLE_NAME = "{http://www.w3.org/1999/xhtml}table" XPATH_KEY = "xpath" DOCUMENT_ID_KEY = "id" DOCUMENT_NAME_KEY = "name" STRUCTURE_KEY = "structure" TAG_KEY = "tag" PROJECTS_KEY = "projects" DEFAULT_API_ENDPOINT = "https://api.docugami.com/v1preview1" logger = logging.getLogger(__name__) [docs]class DocugamiLoader(BaseLoader, BaseModel): """Loader that loads processed docs from Docugami. To use, you should have the ``lxml`` python package installed. """ api: str = DEFAULT_API_ENDPOINT access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY") docset_id: Optional[str] document_ids: Optional[Sequence[str]] file_paths: Optional[Sequence[Union[Path, str]]] min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking [docs] @root_validator def validate_local_or_remote(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that either local file paths are given, or remote API docset ID.""" if values.get("file_paths") and values.get("docset_id"):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-1
if values.get("file_paths") and values.get("docset_id"): raise ValueError("Cannot specify both file_paths and remote API docset_id") if not values.get("file_paths") and not values.get("docset_id"): raise ValueError("Must specify either file_paths or remote API docset_id") if values.get("docset_id") and not values.get("access_token"): raise ValueError("Must specify access token if using remote API docset_id") return values def _parse_dgml( self, document: Mapping, content: bytes, doc_metadata: Optional[Mapping] = None ) -> List[Document]: """Parse a single DGML document into a list of Documents.""" try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) # helpers def _xpath_qname_for_chunk(chunk: Any) -> str: """Get the xpath qname for a chunk.""" qname = f"{chunk.prefix}:{chunk.tag.split('}')[-1]}" parent = chunk.getparent() if parent is not None: doppelgangers = [x for x in parent if x.tag == chunk.tag] if len(doppelgangers) > 1: idx_of_self = doppelgangers.index(chunk) qname = f"{qname}[{idx_of_self + 1}]" return qname def _xpath_for_chunk(chunk: Any) -> str: """Get the xpath for a chunk.""" ancestor_chain = chunk.xpath("ancestor-or-self::*")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-2
ancestor_chain = chunk.xpath("ancestor-or-self::*") return "/" + "/".join(_xpath_qname_for_chunk(x) for x in ancestor_chain) def _structure_value(node: Any) -> str: """Get the structure value for a node.""" structure = ( "table" if node.tag == TABLE_NAME else node.attrib["structure"] if "structure" in node.attrib else None ) return structure def _is_structural(node: Any) -> bool: """Check if a node is structural.""" return _structure_value(node) is not None def _is_heading(node: Any) -> bool: """Check if a node is a heading.""" structure = _structure_value(node) return structure is not None and structure.lower().startswith("h") def _get_text(node: Any) -> str: """Get the text of a node.""" return " ".join(node.itertext()).strip() def _has_structural_descendant(node: Any) -> bool: """Check if a node has a structural descendant.""" for child in node: if _is_structural(child) or _has_structural_descendant(child): return True return False def _leaf_structural_nodes(node: Any) -> List: """Get the leaf structural nodes of a node.""" if _is_structural(node) and not _has_structural_descendant(node): return [node] else: leaf_nodes = [] for child in node: leaf_nodes.extend(_leaf_structural_nodes(child)) return leaf_nodes def _create_doc(node: Any, text: str) -> Document: """Create a Document from a node and text."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-3
"""Create a Document from a node and text.""" metadata = { XPATH_KEY: _xpath_for_chunk(node), DOCUMENT_ID_KEY: document["id"], DOCUMENT_NAME_KEY: document["name"], STRUCTURE_KEY: node.attrib.get("structure", ""), TAG_KEY: re.sub(r"\{.*\}", "", node.tag), } if doc_metadata: metadata.update(doc_metadata) return Document( page_content=text, metadata=metadata, ) # parse the tree and return chunks tree = etree.parse(io.BytesIO(content)) root = tree.getroot() chunks: List[Document] = [] prev_small_chunk_text = None for node in _leaf_structural_nodes(root): text = _get_text(node) if prev_small_chunk_text: text = prev_small_chunk_text + " " + text prev_small_chunk_text = None if _is_heading(node) or len(text) < self.min_chunk_size: # Save headings or other small chunks to be appended to the next chunk prev_small_chunk_text = text else: chunks.append(_create_doc(node, text)) if prev_small_chunk_text and len(chunks) > 0: # small chunk at the end left over, just append to last chunk chunks[-1].page_content += " " + prev_small_chunk_text return chunks def _document_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all document details for the given docset ID""" url = f"{self.api}/docsets/{docset_id}/documents" all_documents = [] while url: response = requests.get( url,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-4
while url: response = requests.get( url, headers={"Authorization": f"Bearer {self.access_token}"}, ) if response.ok: data = response.json() all_documents.extend(data["documents"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_documents def _project_details_for_docset_id(self, docset_id: str) -> List[Dict]: """Gets all project details for the given docset ID""" url = f"{self.api}/projects?docset.id={docset_id}" all_projects = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json() all_projects.extend(data["projects"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) return all_projects def _metadata_for_project(self, project: Dict) -> Dict: """Gets project metadata for all files""" project_id = project.get("id") url = f"{self.api}/projects/{project_id}/artifacts/latest" all_artifacts = [] while url: response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: data = response.json()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-5
data={}, ) if response.ok: data = response.json() all_artifacts.extend(data["artifacts"]) url = data.get("next", None) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) per_file_metadata = {} for artifact in all_artifacts: artifact_name = artifact.get("name") artifact_url = artifact.get("url") artifact_doc = artifact.get("document") if artifact_name == "report-values.xml" and artifact_url and artifact_doc: doc_id = artifact_doc["id"] metadata: Dict = {} # the evaluated XML for each document is named after the project response = requests.request( "GET", f"{artifact_url}/content", headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: try: from lxml import etree except ImportError: raise ImportError( "Could not import lxml python package. " "Please install it with `pip install lxml`." ) artifact_tree = etree.parse(io.BytesIO(response.content)) artifact_root = artifact_tree.getroot() ns = artifact_root.nsmap entries = artifact_root.xpath("//pr:Entry", namespaces=ns) for entry in entries: heading = entry.xpath("./pr:Heading", namespaces=ns)[0].text value = " ".join( entry.xpath("./pr:Value", namespaces=ns)[0].itertext() ).strip() metadata[heading] = value per_file_metadata[doc_id] = metadata else: raise Exception(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-6
per_file_metadata[doc_id] = metadata else: raise Exception( f"Failed to download {artifact_url}/content " + "(status: {response.status_code})" ) return per_file_metadata def _load_chunks_for_document( self, docset_id: str, document: Dict, doc_metadata: Optional[Dict] = None ) -> List[Document]: """Load chunks for a document.""" document_id = document["id"] url = f"{self.api}/docsets/{docset_id}/documents/{document_id}/dgml" response = requests.request( "GET", url, headers={"Authorization": f"Bearer {self.access_token}"}, data={}, ) if response.ok: return self._parse_dgml(document, response.content, doc_metadata) else: raise Exception( f"Failed to download {url} (status: {response.status_code})" ) [docs] def load(self) -> List[Document]: """Load documents.""" chunks: List[Document] = [] if self.access_token and self.docset_id: # remote mode _document_details = self._document_details_for_docset_id(self.docset_id) if self.document_ids: _document_details = [ d for d in _document_details if d["id"] in self.document_ids ] _project_details = self._project_details_for_docset_id(self.docset_id) combined_project_metadata = {} if _project_details: # if there are any projects for this docset, load project metadata for project in _project_details: metadata = self._metadata_for_project(project)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
e1b0f462dec1-7
for project in _project_details: metadata = self._metadata_for_project(project) combined_project_metadata.update(metadata) for doc in _document_details: doc_metadata = combined_project_metadata.get(doc["id"]) chunks += self._load_chunks_for_document( self.docset_id, doc, doc_metadata ) elif self.file_paths: # local mode (for integration testing, or pre-downloaded XML) for path in self.file_paths: path = Path(path) with open(path, "rb") as file: chunks += self._parse_dgml( { DOCUMENT_ID_KEY: path.name, DOCUMENT_NAME_KEY: path.name, }, file.read(), ) return chunks
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/docugami.html
d902a1612c4a-0
Source code for langchain.document_loaders.gitbook """Loader that loads GitBook.""" from typing import Any, List, Optional from urllib.parse import urljoin, urlparse from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class GitbookLoader(WebBaseLoader): """Load GitBook data. 1. load from either a single page, or 2. load all (relative) paths in the navbar. """ def __init__( self, web_page: str, load_all_paths: bool = False, base_url: Optional[str] = None, content_selector: str = "main", ): """Initialize with web page and whether to load all paths. Args: web_page: The web page to load or the starting point from where relative paths are discovered. load_all_paths: If set to True, all relative paths in the navbar are loaded instead of only `web_page`. base_url: If `load_all_paths` is True, the relative paths are appended to this base url. Defaults to `web_page` if not set. """ self.base_url = base_url or web_page if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] if load_all_paths: # set web_path to the sitemap if we want to crawl all paths web_paths = f"{self.base_url}/sitemap.xml" else: web_paths = web_page super().__init__(web_paths) self.load_all_paths = load_all_paths self.content_selector = content_selector [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
d902a1612c4a-1
[docs] def load(self) -> List[Document]: """Fetch text from one single GitBook page.""" if self.load_all_paths: soup_info = self.scrape() relative_paths = self._get_paths(soup_info) documents = [] for path in relative_paths: url = urljoin(self.base_url, path) print(f"Fetching text from {url}") soup_info = self._scrape(url) documents.append(self._get_document(soup_info, url)) return [d for d in documents if d] else: soup_info = self.scrape() documents = [self._get_document(soup_info, self.web_path)] return [d for d in documents if d] def _get_document( self, soup: Any, custom_url: Optional[str] = None ) -> Optional[Document]: """Fetch content from page and return Document.""" page_content_raw = soup.find(self.content_selector) if not page_content_raw: return None content = page_content_raw.get_text(separator="\n").strip() title_if_exists = page_content_raw.find("h1") title = title_if_exists.text if title_if_exists else "" metadata = {"source": custom_url or self.web_path, "title": title} return Document(page_content=content, metadata=metadata) def _get_paths(self, soup: Any) -> List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all("loc")]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html
f923c56c4337-0
Source code for langchain.document_loaders.psychic """Loader that loads documents from Psychic.dev.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loader that loads documents from Psychic.dev.""" def __init__( self, api_key: str, account_id: str, connector_id: Optional[str] = None ): """Initialize with API key, connector id, and account id.""" try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.account_id = account_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents( connector_id=self.connector_id, account_id=self.account_id ) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs.documents ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html
a422b6e7d949-0
Source code for langchain.document_loaders.web_base """Web base loader class.""" import asyncio import logging import warnings from typing import Any, Dict, Iterator, List, Optional, Union import aiohttp import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) default_header_template = { "User-Agent": "", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*" ";q=0.8", "Accept-Language": "en-US,en;q=0.5", "Referer": "https://www.google.com/", "DNT": "1", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", } def _build_metadata(soup: Any, url: str) -> dict: """Build metadata from BeautifulSoup output.""" metadata = {"source": url} if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata [docs]class WebBaseLoader(BaseLoader): """Loader that uses urllib and beautiful soup to load webpages.""" web_paths: List[str] requests_per_second: int = 2 """Max number of concurrent requests to make.""" default_parser: str = "html.parser" """Default parser to use for BeautifulSoup.""" requests_kwargs: Dict[str, Any] = {} """kwargs for requests"""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
a422b6e7d949-1
requests_kwargs: Dict[str, Any] = {} """kwargs for requests""" raise_for_status: bool = False """Raise an exception if http status code denotes an error.""" bs_get_text_kwargs: Dict[str, Any] = {} """kwargs for beatifulsoup4 get_text""" def __init__( self, web_path: Union[str, List[str]], header_template: Optional[dict] = None, verify: Optional[bool] = True, proxies: Optional[dict] = None, ): """Initialize with webpage path.""" # TODO: Deprecate web_path in favor of web_paths, and remove this # left like this because there are a number of loaders that expect single # urls if isinstance(web_path, str): self.web_paths = [web_path] elif isinstance(web_path, List): self.web_paths = web_path self.session = requests.Session() try: import bs4 # noqa:F401 except ImportError: raise ValueError( "bs4 package not found, please install it with " "`pip install bs4`" ) # Choose to verify self.verify = verify headers = header_template or default_header_template if not headers.get("User-Agent"): try: from fake_useragent import UserAgent headers["User-Agent"] = UserAgent().random except ImportError: logger.info( "fake_useragent not found, using default user agent." "To get a realistic header for requests, " "`pip install fake_useragent`." ) self.session.headers = dict(headers) if proxies: self.session.proxies.update(proxies) @property
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
a422b6e7d949-2
if proxies: self.session.proxies.update(proxies) @property def web_path(self) -> str: if len(self.web_paths) > 1: raise ValueError("Multiple webpaths found.") return self.web_paths[0] async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: # For SiteMap SSL verification if not self.requests_kwargs.get("verify", True): connector = aiohttp.TCPConnector(ssl=False) else: connector = None async with aiohttp.ClientSession(connector=connector) as session: for i in range(retries): try: async with session.get( url, headers=self.session.headers, verify=self.verify ) as response: return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: raise else: logger.warning( f"Error fetching {url} with attempt " f"{i + 1}/{retries}: {e}. Retrying..." ) await asyncio.sleep(cooldown * backoff**i) raise ValueError("retry count exceeded") async def _fetch_with_rate_limit( self, url: str, semaphore: asyncio.Semaphore ) -> str: async with semaphore: return await self._fetch(url) [docs] async def fetch_all(self, urls: List[str]) -> Any: """Fetch all urls concurrently with rate limiting.""" semaphore = asyncio.Semaphore(self.requests_per_second) tasks = [] for url in urls:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
a422b6e7d949-3
tasks = [] for url in urls: task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore)) tasks.append(task) try: from tqdm.asyncio import tqdm_asyncio return await tqdm_asyncio.gather( *tasks, desc="Fetching pages", ascii=True, mininterval=1 ) except ImportError: warnings.warn("For better logging of progress, `pip install tqdm`") return await asyncio.gather(*tasks) @staticmethod def _check_parser(parser: str) -> None: """Check that parser is valid for bs4.""" valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"] if parser not in valid_parsers: raise ValueError( "`parser` must be one of " + ", ".join(valid_parsers) + "." ) [docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser)) return final_results def _scrape(self, url: str, parser: Union[str, None] = None) -> Any: from bs4 import BeautifulSoup if parser is None: if url.endswith(".xml"):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
a422b6e7d949-4
if parser is None: if url.endswith(".xml"): parser = "xml" else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, verify=self.verify, **self.requests_kwargs) if self.raise_for_status: html_doc.raise_for_status() html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser) [docs] def scrape(self, parser: Union[str, None] = None) -> Any: """Scrape data from webpage and return it in BeautifulSoup format.""" if parser is None: parser = self.default_parser return self._scrape(self.web_path, parser) [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load text from the url(s) in web_path.""" for path in self.web_paths: soup = self._scrape(path) text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, path) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load text from the url(s) in web_path.""" return list(self.lazy_load()) [docs] def aload(self) -> List[Document]: """Load text from the urls in web_path async into Documents.""" results = self.scrape_all(self.web_paths) docs = [] for i in range(len(results)): soup = results[i] text = soup.get_text(**self.bs_get_text_kwargs) metadata = _build_metadata(soup, self.web_paths[i]) docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html
aab3917220d1-0
Source code for langchain.document_loaders.conllu """Load CoNLL-U files.""" import csv from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class CoNLLULoader(BaseLoader): """Load CoNLL-U files.""" def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path [docs] def load(self) -> List[Document]: """Load from file path.""" with open(self.file_path, encoding="utf8") as f: tsv = list(csv.reader(f, delimiter="\t")) # If len(line) > 1, the line is not a comment lines = [line for line in tsv if len(line) > 1] text = "" for i, line in enumerate(lines): # Do not add a space after a punctuation mark or at the end of the sentence if line[9] == "SpaceAfter=No" or i == len(lines) - 1: text += line[1] else: text += line[1] + " " metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/conllu.html
d58ad04281eb-0
Source code for langchain.document_loaders.slack_directory """Loader for documents from a Slack export.""" import json import zipfile from pathlib import Path from typing import Dict, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class SlackDirectoryLoader(BaseLoader): """Loader for loading documents from a Slack directory dump.""" def __init__(self, zip_path: str, workspace_url: Optional[str] = None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path) @staticmethod def _get_channel_id_map(zip_path: Path) -> Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, "r") as zip_file: try: with zip_file.open("channels.json", "r") as f: channels = json.load(f) return {channel["name"]: channel["id"] for channel in channels} except KeyError: return {} [docs] def load(self) -> List[Document]: """Load and return documents from the Slack directory dump.""" docs = [] with zipfile.ZipFile(self.zip_path, "r") as zip_file: for channel_path in zip_file.namelist(): channel_name = Path(channel_path).parent.name if not channel_name: continue
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
d58ad04281eb-1
if not channel_name: continue if channel_path.endswith(".json"): messages = self._read_json(zip_file, channel_path) for message in messages: document = self._convert_message_to_document( message, channel_name ) docs.append(document) return docs def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]: """Read JSON data from a zip subfile.""" with zip_file.open(file_path, "r") as f: data = json.load(f) return data def _convert_message_to_document( self, message: dict, channel_name: str ) -> Document: """ Convert a message to a Document object. Args: message (dict): A message in the form of a dictionary. channel_name (str): The name of the channel the message belongs to. Returns: Document: A Document object representing the message. """ text = message.get("text", "") metadata = self._get_message_metadata(message, channel_name) return Document( page_content=text, metadata=metadata, ) def _get_message_metadata(self, message: dict, channel_name: str) -> dict: """Create and return metadata for a given message and channel.""" timestamp = message.get("ts", "") user = message.get("user", "") source = self._get_message_source(channel_name, user, timestamp) return { "source": source, "channel": channel_name, "timestamp": timestamp, "user": user, }
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
d58ad04281eb-2
"timestamp": timestamp, "user": user, } def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str: """ Get the message source as a string. Args: channel_name (str): The name of the channel the message belongs to. user (str): The user ID who sent the message. timestamp (str): The timestamp of the message. Returns: str: The message source. """ if self.workspace_url: channel_id = self.channel_id_map.get(channel_name, "") return ( f"{self.workspace_url}/archives/{channel_id}" + f"/p{timestamp.replace('.', '')}" ) else: return f"{channel_name} - {user} - {timestamp}"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html
23c0c681609a-0
Source code for langchain.document_loaders.base """Abstract interface for document loader implementations.""" from abc import ABC, abstractmethod from typing import Iterator, List, Optional from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter [docs]class BaseLoader(ABC): """Interface for loading documents. Implementations should implement the lazy-loading method using generators to avoid loading all documents into memory at once. The `load` method will remain as is for backwards compatibility, but its implementation should be just `list(self.lazy_load())`. """ # Sub-classes should implement this method # as return list(self.lazy_load()). # This method returns a List which is materialized in memory. [docs] @abstractmethod def load(self) -> List[Document]: """Load data into document objects.""" [docs] def load_and_split( self, text_splitter: Optional[TextSplitter] = None ) -> List[Document]: """Load documents and split into chunks.""" if text_splitter is None: _text_splitter: TextSplitter = RecursiveCharacterTextSplitter() else: _text_splitter = text_splitter docs = self.load() return _text_splitter.split_documents(docs) # Attention: This method will be upgraded into an abstractmethod once it's # implemented in all the existing subclasses. [docs] def lazy_load( self, ) -> Iterator[Document]: """A lazy loader for document content.""" raise NotImplementedError( f"{self.__class__.__name__} does not implement lazy_load()" )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/base.html
23c0c681609a-1
f"{self.__class__.__name__} does not implement lazy_load()" ) [docs]class BaseBlobParser(ABC): """Abstract interface for blob parsers. A blob parser is provides a way to parse raw data stored in a blob into one or more documents. The parser can be composed with blob loaders, making it easy to re-use a parser independent of how the blob was originally loaded. """ [docs] @abstractmethod def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazy parsing interface. Subclasses are required to implement this method. Args: blob: Blob instance Returns: Generator of documents """ [docs] def parse(self, blob: Blob) -> List[Document]: """Eagerly parse the blob into a document or documents. This is a convenience method for interactive development environment. Production applications should favor the lazy_parse method instead. Subclasses should generally not over-ride this parse method. Args: blob: Blob instance Returns: List of documents """ return list(self.lazy_parse(blob))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/base.html
c49f1d4ca379-0
Source code for langchain.document_loaders.whatsapp_chat import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]def concatenate_rows(date: str, sender: str, text: str) -> str: """Combine message information in a readable format ready to be used.""" return f"{sender} on {date}: {text}\n\n" [docs]class WhatsAppChatLoader(BaseLoader): """Loader that loads WhatsApp messages text file.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) text_content = "" with open(p, encoding="utf8") as f: lines = f.readlines() message_line_regex = r""" \[? ( \d{1,4} [\/.] \d{1,2} [\/.] \d{1,4} ,\s \d{1,2} :\d{2} (?: :\d{2} )? (?:[\s_](?:AM|PM))? ) \]? [\s-]* ([~\w\s]+) [:]+ \s (.+) """ ignore_lines = ["This message was deleted", "<Media omitted>"] for line in lines: result = re.match( message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE ) if result:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
c49f1d4ca379-1
) if result: date, sender, text = result.groups() if text not in ignore_lines: text_content += concatenate_rows(date, sender, text) metadata = {"source": str(p)} return [Document(page_content=text_content, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/whatsapp_chat.html
9438a334eea5-0
Source code for langchain.document_loaders.html_bs """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" import logging from typing import Dict, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class BSHTMLLoader(BaseLoader): """Loader that uses beautiful soup to parse HTML files.""" def __init__( self, file_path: str, open_encoding: Union[str, None] = None, bs_kwargs: Union[dict, None] = None, get_text_separator: str = "", ) -> None: """Initialise with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {"features": "lxml"} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator [docs] def load(self) -> List[Document]: from bs4 import BeautifulSoup """Load HTML document into document objects.""" with open(self.file_path, "r", encoding=self.open_encoding) as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = {
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html
9438a334eea5-1
title = "" metadata: Dict[str, Union[str, None]] = { "source": self.file_path, "title": title, } return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html
159fdfcd17f9-0
Source code for langchain.document_loaders.word_document """Loader that loads word documents.""" import os import tempfile from abc import ABC from typing import List from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class Docx2txtLoader(BaseLoader, ABC): """Loads a DOCX with docx2txt and chunks at character level. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_file"): self.temp_file.close()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
159fdfcd17f9-1
if hasattr(self, "temp_file"): self.temp_file.close() [docs] def load(self) -> List[Document]: """Load given path as single page.""" import docx2txt return [ Document( page_content=docx2txt.process(self.file_path), metadata={"source": self.file_path}, ) ] @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) [docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader): """Loader that uses unstructured to load word documents.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_doc = detect_filetype(self.file_path) == FileType.DOC except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_doc = extension == ".doc" if is_doc and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
159fdfcd17f9-2
f"You are on unstructured version {__unstructured_version__}. " "Partitioning .doc files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_doc: from unstructured.partition.doc import partition_doc return partition_doc(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.docx import partition_docx return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html
6a26d14f29d6-0
Source code for langchain.document_loaders.recursive_url_loader from typing import Iterator, List, Optional, Set from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RecursiveUrlLoader(BaseLoader): """Loader that loads all child links from a given url.""" def __init__(self, url: str, exclude_dirs: Optional[str] = None) -> None: """Initialize with URL to crawl and any sub-directories to exclude.""" self.url = url self.exclude_dirs = exclude_dirs [docs] def get_child_links_recursive( self, url: str, visited: Optional[Set[str]] = None ) -> Set[str]: """Recursively get all child links starting with the path of the input URL.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "The BeautifulSoup package is required for the RecursiveUrlLoader." ) # Construct the base and parent URLs parsed_url = urlparse(url) base_url = f"{parsed_url.scheme}://{parsed_url.netloc}" parent_url = "/".join(parsed_url.path.split("/")[:-1]) current_path = parsed_url.path # Add a trailing slash if not present if not base_url.endswith("/"): base_url += "/" if not parent_url.endswith("/"): parent_url += "/" # Exclude the root and parent from list visited = set() if visited is None else visited # Exclude the links that start with any of the excluded directories if self.exclude_dirs and any( url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs ): return visited
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
6a26d14f29d6-1
): return visited # Get all links that are relative to the root of the website response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") all_links = [link.get("href") for link in soup.find_all("a")] # Extract only the links that are children of the current URL child_links = list( { link for link in all_links if link and link.startswith(current_path) and link != current_path } ) # Get absolute path for all root relative links listed absolute_paths = [ f"{urlparse(base_url).scheme}://{urlparse(base_url).netloc}{link}" for link in child_links ] # Store the visited links and recursively visit the children for link in absolute_paths: # Check all unvisited links if link not in visited: visited.add(link) # If the link is a directory (w/ children) then visit it if link.endswith("/"): visited.update(self.get_child_links_recursive(link, visited)) return visited [docs] def lazy_load(self) -> Iterator[Document]: from langchain.document_loaders import WebBaseLoader """Lazy load web pages.""" child_links = self.get_child_links_recursive(self.url) loader = WebBaseLoader(list(child_links)) return loader.lazy_load() [docs] def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
580d9a90f133-0
Source code for langchain.document_loaders.onedrive_file from __future__ import annotations import tempfile from typing import TYPE_CHECKING, List from pydantic import BaseModel, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader if TYPE_CHECKING: from O365.drive import File CHUNK_SIZE = 1024 * 1024 * 5 [docs]class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) [docs] class Config: arbitrary_types_allowed = True [docs] def load(self) -> List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.file.name}" self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/onedrive_file.html
c74b082b9d77-0
Source code for langchain.document_loaders.mediawikidump """Load Data from a MediaWiki dump xml.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class MWDumpLoader(BaseLoader): """ Load MediaWiki dump from XML file Example: .. code-block:: python from langchain.document_loaders import MWDumpLoader loader = MWDumpLoader( file_path="myWiki.xml", encoding="utf8" ) docs = loader.load() from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=0 ) texts = text_splitter.split_documents(docs) :param file_path: XML local file path :type file_path: str :param encoding: Charset encoding, defaults to "utf8" :type encoding: str, optional """ def __init__(self, file_path: str, encoding: Optional[str] = "utf8"): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding [docs] def load(self) -> List[Document]: """Load from file path.""" import mwparserfromhell import mwxml dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding)) docs = [] for page in dump.pages: for revision in page: code = mwparserfromhell.parse(revision.text) text = code.strip_code( normalize=True, collapse=True, keep_template_params=False ) metadata = {"source": page.title}
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
c74b082b9d77-1
) metadata = {"source": page.title} docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/mediawikidump.html
d1546fa0098e-0
Source code for langchain.document_loaders.helpers """Document loader helpers.""" import concurrent.futures from typing import List, NamedTuple, Optional, cast [docs]class FileEncoding(NamedTuple): encoding: Optional[str] confidence: float language: Optional[str] [docs]def detect_file_encodings(file_path: str, timeout: int = 5) -> List[FileEncoding]: """Try to detect the file encoding. Returns a list of `FileEncoding` tuples with the detected encodings ordered by confidence. """ import chardet def read_and_detect(file_path: str) -> List[dict]: with open(file_path, "rb") as f: rawdata = f.read() return cast(List[dict], chardet.detect_all(rawdata)) with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(read_and_detect, file_path) try: encodings = future.result(timeout=timeout) except concurrent.futures.TimeoutError: raise TimeoutError( f"Timeout reached while detecting encoding for {file_path}" ) if all(encoding["encoding"] is None for encoding in encodings): raise RuntimeError(f"Could not detect encoding for {file_path}") return [FileEncoding(**enc) for enc in encodings if enc["encoding"] is not None]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/helpers.html
c147353bc066-0
Source code for langchain.document_loaders.bilibili import json import re import warnings from typing import List, Tuple import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class BiliBiliLoader(BaseLoader): """Loader that loads bilibili transcripts.""" def __init__(self, video_urls: List[str]): """Initialize with bilibili url.""" self.video_urls = video_urls [docs] def load(self) -> List[Document]: """Load from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results def _get_bilibili_subs_and_info(self, url: str) -> Tuple[str, dict]: try: from bilibili_api import sync, video except ImportError: raise ValueError( "requests package not found, please install it with " "`pip install bilibili-api-python`" ) bvid = re.search(r"BV\w+", url) if bvid is not None: v = video.Video(bvid=bvid.group()) else: aid = re.search(r"av[0-9]+", url) if aid is not None: try: v = video.Video(aid=int(aid.group()[2:])) except AttributeError: raise ValueError(f"{url} is not bilibili url.") else: raise ValueError(f"{url} is not bilibili url.") video_info = sync(v.get_info())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html
c147353bc066-1
video_info = sync(v.get_info()) video_info.update({"url": url}) # Get subtitle url subtitle = video_info.pop("subtitle") sub_list = subtitle["list"] if sub_list: sub_url = sub_list[0]["subtitle_url"] result = requests.get(sub_url) raw_sub_titles = json.loads(result.content)["body"] raw_transcript = " ".join([c["content"] for c in raw_sub_titles]) raw_transcript_with_meta_info = ( f"Video Title: {video_info['title']}," f"description: {video_info['desc']}\n\n" f"Transcript: {raw_transcript}" ) return raw_transcript_with_meta_info, video_info else: raw_transcript = "" warnings.warn( f""" No subtitles found for video: {url}. Return Empty transcript. """ ) return raw_transcript, video_info
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bilibili.html
70c2fe86e04e-0
Source code for langchain.document_loaders.xml """Loader that loads Microsoft Excel files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredXMLLoader(UnstructuredFileLoader): """Loader that uses unstructured to load XML files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.7") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.xml import partition_xml return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/xml.html
7e4dd43c4336-0
Source code for langchain.document_loaders.powerpoint """Loader that loads powerpoint files.""" import os from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredPowerPointLoader(UnstructuredFileLoader): """Loader that uses unstructured to load powerpoint files.""" def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401 is_ppt = detect_filetype(self.file_path) == FileType.PPT except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_ppt = extension == ".ppt" if is_ppt and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning .ppt files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_ppt: from unstructured.partition.ppt import partition_ppt return partition_ppt(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.pptx import partition_pptx return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/powerpoint.html
567e70744c87-0
Source code for langchain.document_loaders.chatgpt """Load conversations from ChatGPT data export""" import datetime import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]def concatenate_rows(message: dict, title: str) -> str: """ Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return "" sender = message["author"]["role"] if message["author"] else "unknown" text = message["content"]["parts"][0] date = datetime.datetime.fromtimestamp(message["create_time"]).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{title} - {sender} on {date}: {text}\n\n" [docs]class ChatGPTLoader(BaseLoader): """Loader that loads conversations from exported ChatGPT data.""" def __init__(self, log_file: str, num_logs: int = -1): self.log_file = log_file self.num_logs = num_logs [docs] def load(self) -> List[Document]: with open(self.log_file, encoding="utf8") as f: data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d["title"] messages = d["mapping"] text = "".join( [ concatenate_rows(messages[key]["message"], title) for idx, key in enumerate(messages) if not ( idx == 0
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html
567e70744c87-1
if not ( idx == 0 and messages[key]["message"]["author"]["role"] == "system" ) ] ) metadata = {"source": str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html
3458a7a9d094-0
Source code for langchain.document_loaders.odt """Loader that loads Open Office ODT files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, validate_unstructured_version, ) [docs]class UnstructuredODTLoader(UnstructuredFileLoader): """Loader that uses unstructured to load open office ODT files.""" def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): validate_unstructured_version(min_unstructured_version="0.6.3") super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/odt.html
9bc93c2fa5f7-0
Source code for langchain.document_loaders.python import tokenize from langchain.document_loaders.text import TextLoader [docs]class PythonLoader(TextLoader): """ Load Python files, respecting any non-default encoding if specified. """ def __init__(self, file_path: str): with open(file_path, "rb") as f: encoding, _ = tokenize.detect_encoding(f.readline) super().__init__(file_path=file_path, encoding=encoding)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/python.html
6acf9601d050-0
Source code for langchain.document_loaders.telegram """Loader that loads Telegram chat json dump.""" from __future__ import annotations import asyncio import json from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter if TYPE_CHECKING: import pandas as pd from telethon.hints import EntityLike [docs]def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" date = row["date"] sender = row["from"] text = row["text"] return f"{sender} on {date}: {text}\n\n" [docs]class TelegramChatFileLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__(self, path: str): """Initialize with path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message["type"] == "message" and isinstance(message["text"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] [docs]def text_to_docs(text: Union[str, List[str]]) -> List[Document]: """Converts a string or list of strings to a list of Documents with metadata.""" if isinstance(text, str):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6acf9601d050-1
if isinstance(text, str): # Take a single string as one page text = [text] page_docs = [Document(page_content=page) for page in text] # Add page numbers as metadata for i, doc in enumerate(page_docs): doc.metadata["page"] = i + 1 # Split pages into chunks doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter( chunk_size=800, separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""], chunk_overlap=20, ) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document( page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i} ) # Add sources a metadata doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks [docs]class TelegramChatApiLoader(BaseLoader): """Loader that loads Telegram chat json directory dump.""" def __init__( self, chat_entity: Optional[EntityLike] = None, api_id: Optional[int] = None, api_hash: Optional[str] = None, username: Optional[str] = None, file_path: str = "telegram_data.json", ): """Initialize with API parameters.""" self.chat_entity = chat_entity self.api_id = api_id self.api_hash = api_hash self.username = username self.file_path = file_path [docs] async def fetch_data_from_telegram(self) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6acf9601d050-2
[docs] async def fetch_data_from_telegram(self) -> None: """Fetch data from Telegram API and save it as a JSON file.""" from telethon.sync import TelegramClient data = [] async with TelegramClient(self.username, self.api_id, self.api_hash) as client: async for message in client.iter_messages(self.chat_entity): is_reply = message.reply_to is not None reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None data.append( { "sender_id": message.sender_id, "text": message.text, "date": message.date.isoformat(), "message.id": message.id, "is_reply": is_reply, "reply_to_id": reply_to_id, } ) with open(self.file_path, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False, indent=4) def _get_message_threads(self, data: pd.DataFrame) -> dict: """Create a dictionary of message threads from the given data. Args: data (pd.DataFrame): A DataFrame containing the conversation \ data with columns: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: dict: A dictionary where the key is the parent message ID and \ the value is a list of message IDs in ascending order. """ def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]: """ Recursively find all replies to a given parent message ID. Args: parent_id (int): The parent message ID.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6acf9601d050-3
Args: parent_id (int): The parent message ID. reply_data (pd.DataFrame): A DataFrame containing reply messages. Returns: list: A list of message IDs that are replies to the parent message ID. """ # Find direct replies to the parent message ID direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][ "message.id" ].tolist() # Recursively find replies to the direct replies all_replies = [] for reply_id in direct_replies: all_replies += [reply_id] + find_replies(reply_id, reply_data) return all_replies # Filter out parent messages parent_messages = data[~data["is_reply"]] # Filter out reply messages and drop rows with NaN in 'reply_to_id' reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"]) # Convert 'reply_to_id' to integer reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int) # Create a dictionary of message threads with parent message IDs as keys and \ # lists of reply message IDs as values message_threads = { parent_id: [parent_id] + find_replies(parent_id, reply_messages) for parent_id in parent_messages["message.id"] } return message_threads def _combine_message_texts( self, message_threads: Dict[int, List[int]], data: pd.DataFrame ) -> str: """ Combine the message texts for each parent message ID based \ on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message \
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6acf9601d050-4
message_threads (dict): A dictionary where the key is the parent message \ ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = "" # Iterate through sorted parent message IDs for parent_id, message_ids in message_threads.items(): # Get the message texts for the message IDs and sort them by date message_texts = ( data[data["message.id"].isin(message_ids)] .sort_values(by="date")["text"] .tolist() ) message_texts = [str(elem) for elem in message_texts] # Combine the message texts combined_text += " ".join(message_texts) + ".\n" return combined_text.strip() [docs] def load(self) -> List[Document]: """Load documents.""" if self.chat_entity is not None: try: import nest_asyncio nest_asyncio.apply() asyncio.run(self.fetch_data_from_telegram()) except ImportError: raise ImportError( """`nest_asyncio` package not found. please install with `pip install nest_asyncio` """ ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) try: import pandas as pd except ImportError: raise ImportError( """`pandas` package not found. please install with `pip install pandas` """
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
6acf9601d050-5
please install with `pip install pandas` """ ) normalized_messages = pd.json_normalize(d) df = pd.DataFrame(normalized_messages) message_threads = self._get_message_threads(df) combined_texts = self._combine_message_texts(message_threads, df) return text_to_docs(combined_texts)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
53e27423a128-0
Source code for langchain.document_loaders.url_selenium """Loader that uses Selenium to load a page, then uses unstructured to load the html. """ import logging from typing import TYPE_CHECKING, List, Literal, Optional, Union if TYPE_CHECKING: from selenium.webdriver import Chrome, Firefox from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class SeleniumURLLoader(BaseLoader): """Loader that uses Selenium and to load a page and unstructured to load the html. This is useful for loading pages that require javascript to render. Attributes: urls (List[str]): List of URLs to load. continue_on_failure (bool): If True, continue loading other URLs on failure. browser (str): The browser to use, either 'chrome' or 'firefox'. binary_location (Optional[str]): The location of the browser binary. executable_path (Optional[str]): The path to the browser executable. headless (bool): If True, the browser will run in headless mode. arguments [List[str]]: List of arguments to pass to the browser. """ def __init__( self, urls: List[str], continue_on_failure: bool = True, browser: Literal["chrome", "firefox"] = "chrome", binary_location: Optional[str] = None, executable_path: Optional[str] = None, headless: bool = True, arguments: List[str] = [], ): """Load a list of URLs using Selenium and unstructured.""" try: import selenium # noqa:F401 except ImportError: raise ImportError( "selenium package not found, please install it with "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
53e27423a128-1
raise ImportError( "selenium package not found, please install it with " "`pip install selenium`" ) try: import unstructured # noqa:F401 except ImportError: raise ImportError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments def _get_driver(self) -> Union["Chrome", "Firefox"]: """Create and return a WebDriver instance based on the specified browser. Raises: ValueError: If an invalid browser is specified. Returns: Union[Chrome, Firefox]: A WebDriver instance for the specified browser. """ if self.browser.lower() == "chrome": from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options as ChromeOptions chrome_options = ChromeOptions() for arg in self.arguments: chrome_options.add_argument(arg) if self.headless: chrome_options.add_argument("--headless") chrome_options.add_argument("--no-sandbox") if self.binary_location is not None: chrome_options.binary_location = self.binary_location if self.executable_path is None: return Chrome(options=chrome_options) return Chrome(executable_path=self.executable_path, options=chrome_options) elif self.browser.lower() == "firefox": from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options as FirefoxOptions firefox_options = FirefoxOptions() for arg in self.arguments: firefox_options.add_argument(arg)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
53e27423a128-2
for arg in self.arguments: firefox_options.add_argument(arg) if self.headless: firefox_options.add_argument("--headless") if self.binary_location is not None: firefox_options.binary_location = self.binary_location if self.executable_path is None: return Firefox(options=firefox_options) return Firefox( executable_path=self.executable_path, options=firefox_options ) else: raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.") [docs] def load(self) -> List[Document]: """Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") else: raise e driver.quit() return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url_selenium.html
297e03b416f4-0
Source code for langchain.document_loaders.s3_file """Loading logic for loading documents from an s3 file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class S3FileLoader(BaseLoader): """Loading logic for loading documents from s3.""" def __init__(self, bucket: str, key: str): """Initialize with bucket and key name.""" self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import `boto3` python package. " "Please install it with `pip install boto3`." ) s3 = boto3.client("s3") with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) s3.download_file(self.bucket, self.key, file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_file.html
c9fbfedaeec2-0
Source code for langchain.document_loaders.gcs_file """Loading logic for loading documents from a GCS file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class GCSFileLoader(BaseLoader): """Loading logic for loading documents from GCS.""" def __init__(self, project_name: str, bucket: str, blob: str): """Initialize with bucket and key name.""" self.bucket = bucket self.blob = blob self.project_name = project_name [docs] def load(self) -> List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ValueError( "Could not import google-cloud-storage python package. " "Please install it with `pip install google-cloud-storage`." ) # Initialise a client storage_client = storage.Client(self.project_name) # Create a bucket object for our bucket bucket = storage_client.get_bucket(self.bucket) # Create a blob object from the filepath blob = bucket.blob(self.blob) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination blob.download_to_filename(file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_file.html
177b7fc24be5-0
Source code for langchain.document_loaders.tomarkdown """Loader that loads HTML to markdown using 2markdown.""" from __future__ import annotations from typing import Iterator, List import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ToMarkdownLoader(BaseLoader): """Loader that loads HTML to markdown using 2markdown.""" def __init__(self, url: str, api_key: str): """Initialize with url and api key.""" self.url = url self.api_key = api_key [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load the file.""" response = requests.post( "https://2markdown.com/api/2md", headers={"X-Api-Key": self.api_key}, json={"url": self.url}, ) text = response.json()["article"] metadata = {"source": self.url} yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load file.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tomarkdown.html
2f16b59bc02c-0
Source code for langchain.document_loaders.pyspark_dataframe """Load from a Spark Dataframe object""" import itertools import logging import sys from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Tuple from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__file__) if TYPE_CHECKING: from pyspark.sql import SparkSession [docs]class PySparkDataFrameLoader(BaseLoader): """Load PySpark DataFrames""" def __init__( self, spark_session: Optional["SparkSession"] = None, df: Optional[Any] = None, page_content_column: str = "text", fraction_of_memory: float = 0.1, ): """Initialize with a Spark DataFrame object.""" try: from pyspark.sql import DataFrame, SparkSession except ImportError: raise ImportError( "pyspark is not installed. " "Please install it with `pip install pyspark`" ) self.spark = ( spark_session if spark_session else SparkSession.builder.getOrCreate() ) if not isinstance(df, DataFrame): raise ValueError( f"Expected data_frame to be a PySpark DataFrame, got {type(df)}" ) self.df = df self.page_content_column = page_content_column self.fraction_of_memory = fraction_of_memory self.num_rows, self.max_num_rows = self.get_num_rows() self.rdd_df = self.df.rdd.map(list) self.column_names = self.df.columns [docs] def get_num_rows(self) -> Tuple[int, int]: """Gets the amount of "feasible" rows for the DataFrame""" try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
2f16b59bc02c-1
"""Gets the amount of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( "psutil not installed. Please install it with `pip install psutil`." ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int( (available_memory / estimated_row_size) * self.fraction_of_memory ) return min(max_num_rows, self.df.count()), max_num_rows [docs] def lazy_load(self) -> Iterator[Document]: """A lazy loader for document content.""" for row in self.rdd_df.toLocalIterator(): metadata = {self.column_names[i]: row[i] for i in range(len(row))} text = metadata[self.page_content_column] metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load from the dataframe.""" if self.df.count() > self.max_num_rows: logger.warning( f"The number of DataFrame rows is {self.df.count()}, " f"but we will only include the amount " f"of rows that can reasonably fit in memory: {self.num_rows}." ) lazy_load_iterator = self.lazy_load() return list(itertools.islice(lazy_load_iterator, self.num_rows))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pyspark_dataframe.html
8c428a873dcd-0
Source code for langchain.document_loaders.evernote """Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class EverNoteLoader(BaseLoader): """EverNote Loader. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
8c428a873dcd-1
self.file_path = file_path self.load_single_document = load_single_document [docs] def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: logging.error( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) raise e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
8c428a873dcd-2
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
8c428a873dcd-3
# Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be "&nbsp;", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logging.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
1fe2193066c3-0
Source code for langchain.document_loaders.tencent_cos_file """Loading logic for loading documents from Tencent Cloud COS file.""" import os import tempfile from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class TencentCOSFileLoader(BaseLoader): """Loading logic for loading documents from Tencent Cloud COS.""" def __init__(self, conf: Any, bucket: str, key: str): """Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key. """ self.conf = conf self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Load documents.""" try: from qcloud_cos import CosS3Client except ImportError: raise ValueError( "Could not import cos-python-sdk-v5 python package. " "Please install it with `pip install cos-python-sdk-v5`." ) # Initialise a client client = CosS3Client(self.conf) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.bucket}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination client.download_file( Bucket=self.bucket, Key=self.key, DestFilePath=file_path ) loader = UnstructuredFileLoader(file_path)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_file.html
1fe2193066c3-1
) loader = UnstructuredFileLoader(file_path) # UnstructuredFileLoader not implement lazy_load yet return iter(loader.load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/tencent_cos_file.html
3bbcc0a92cff-0
Source code for langchain.document_loaders.parsers.grobid from typing import Dict, Iterator, List, Union import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob [docs]class ServerUnavailableException(Exception): pass [docs]class GrobidParser(BaseBlobParser): """Loader that uses Grobid to load article PDF files.""" def __init__( self, segment_sentences: bool, grobid_server: str = "http://localhost:8070/api/processFulltextDocument", ) -> None: self.segment_sentences = segment_sentences self.grobid_server = grobid_server try: requests.get(grobid_server) except requests.exceptions.RequestException: print( "GROBID server does not appear up and running, \ please ensure Grobid is installed and the server is running" ) raise ServerUnavailableException [docs] def process_xml( self, file_path: str, xml_data: str, segment_sentences: bool ) -> Iterator[Document]: """Process the XML file from Grobin.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( "`bs4` package not found, please install it with " "`pip install bs4`" ) soup = BeautifulSoup(xml_data, "xml") sections = soup.find_all("div") title = soup.find_all("title")[0].text chunks = [] for section in sections: sect = section.find("head") if sect is not None: for i, paragraph in enumerate(section.find_all("p")):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
3bbcc0a92cff-1
for i, paragraph in enumerate(section.find_all("p")): chunk_bboxes = [] paragraph_text = [] for i, sentence in enumerate(paragraph.find_all("s")): paragraph_text.append(sentence.text) sbboxes = [] for bbox in sentence.get("coords").split(";"): box = bbox.split(",") sbboxes.append( { "page": box[0], "x": box[1], "y": box[2], "h": box[3], "w": box[4], } ) chunk_bboxes.append(sbboxes) if segment_sentences is True: fpage, lpage = sbboxes[0]["page"], sbboxes[-1]["page"] sentence_dict = { "text": sentence.text, "para": str(i), "bboxes": [sbboxes], "section_title": sect.text, "section_number": sect.get("n"), "pages": (fpage, lpage), } chunks.append(sentence_dict) if segment_sentences is not True: fpage, lpage = ( chunk_bboxes[0][0]["page"], chunk_bboxes[-1][-1]["page"], ) paragraph_dict = { "text": "".join(paragraph_text), "para": str(i), "bboxes": chunk_bboxes, "section_title": sect.text, "section_number": sect.get("n"), "pages": (fpage, lpage), } chunks.append(paragraph_dict) yield from [ Document( page_content=chunk["text"], metadata=dict(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
3bbcc0a92cff-2
Document( page_content=chunk["text"], metadata=dict( { "text": str(chunk["text"]), "para": str(chunk["para"]), "bboxes": str(chunk["bboxes"]), "pages": str(chunk["pages"]), "section_title": str(chunk["section_title"]), "section_number": str(chunk["section_number"]), "paper_title": str(title), "file_path": str(file_path), } ), ) for chunk in chunks ] [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: file_path = blob.source if file_path is None: raise ValueError("blob.source cannot be None.") pdf = open(file_path, "rb") files = {"input": (file_path, pdf, "application/pdf", {"Expires": "0"})} try: data: Dict[str, Union[str, List[str]]] = {} for param in ["generateIDs", "consolidateHeader", "segmentSentences"]: data[param] = "1" data["teiCoordinates"] = ["head", "s"] files = files or {} r = requests.request( "POST", self.grobid_server, headers=None, params=None, files=files, data=data, timeout=60, ) xml_data = r.text except requests.exceptions.ReadTimeout: xml_data = None if xml_data is None: return iter([]) else: return self.process_xml(file_path, xml_data, self.segment_sentences)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/grobid.html
71f60526a274-0
Source code for langchain.document_loaders.parsers.audio from typing import Iterator from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document [docs]class OpenAIWhisperParser(BaseBlobParser): """Transcribe and parse audio files. Audio transcription is with OpenAI Whisper model.""" [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import io try: import openai except ImportError: raise ValueError( "openai package not found, please install it with " "`pip install openai`" ) try: from pydub import AudioSegment except ImportError: raise ValueError( "pydub package not found, please install it with " "`pip install pydub`" ) # Audio file from disk audio = AudioSegment.from_file(blob.path) # Define the duration of each chunk in minutes # Need to meet 25MB size limit for Whisper API chunk_duration = 20 chunk_duration_ms = chunk_duration * 60 * 1000 # Split the audio into chunk_duration_ms chunks for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)): # Audio chunk chunk = audio[i : i + chunk_duration_ms] file_obj = io.BytesIO(chunk.export(format="mp3").read()) if blob.source is not None: file_obj.name = blob.source + f"_part_{split_number}.mp3" else: file_obj.name = f"part_{split_number}.mp3" # Transcribe
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
71f60526a274-1
# Transcribe print(f"Transcribing part {split_number+1}!") transcript = openai.Audio.transcribe("whisper-1", file_obj) yield Document( page_content=transcript.text, metadata={"source": blob.source, "chunk": split_number}, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/audio.html
b155063f625d-0
Source code for langchain.document_loaders.parsers.pdf """Module contains common parsers for PDFs.""" from typing import Any, Iterator, Mapping, Optional, Union from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document [docs]class PyPDFParser(BaseBlobParser): """Loads a PDF with pypdf and chunks at character level.""" def __init__(self, password: Optional[Union[str, bytes]] = None): self.password = password [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [ Document( page_content=page.extract_text(), metadata={"source": blob.source, "page": page_number}, ) for page_number, page in enumerate(pdf_reader.pages) ] [docs]class PDFMinerParser(BaseBlobParser): """Parse PDFs with PDFMiner.""" [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" from pdfminer.high_level import extract_text with blob.as_bytes_io() as pdf_file_obj: text = extract_text(pdf_file_obj) metadata = {"source": blob.source} yield Document(page_content=text, metadata=metadata) [docs]class PyMuPDFParser(BaseBlobParser): """Parse PDFs with PyMuPDF.""" def __init__(self, text_kwargs: Optional[Mapping[str, Any]] = None) -> None: """Initialize the parser.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
b155063f625d-1
"""Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``. """ self.text_kwargs = text_kwargs or {} [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import fitz with blob.as_bytes_io() as file_path: doc = fitz.open(file_path) # open document yield from [ Document( page_content=page.get_text(**self.text_kwargs), metadata=dict( { "source": blob.source, "file_path": blob.source, "page": page.number, "total_pages": len(doc), }, **{ k: doc.metadata[k] for k in doc.metadata if type(doc.metadata[k]) in [str, int] }, ), ) for page in doc ] [docs]class PyPDFium2Parser(BaseBlobParser): """Parse PDFs with PyPDFium2.""" def __init__(self) -> None: """Initialize the parser.""" try: import pypdfium2 # noqa:F401 except ImportError: raise ValueError( "pypdfium2 package not found, please install it with" " `pip install pypdfium2`" ) [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pypdfium2 # pypdfium2 is really finicky with respect to closing things, # if done incorrectly creates seg faults.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
b155063f625d-2
# if done incorrectly creates seg faults. with blob.as_bytes_io() as file_path: pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True) try: for page_number, page in enumerate(pdf_reader): text_page = page.get_textpage() content = text_page.get_text_range() text_page.close() page.close() metadata = {"source": blob.source, "page": page_number} yield Document(page_content=content, metadata=metadata) finally: pdf_reader.close() [docs]class PDFPlumberParser(BaseBlobParser): """Parse PDFs with PDFPlumber.""" def __init__(self, text_kwargs: Optional[Mapping[str, Any]] = None) -> None: """Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()`` """ self.text_kwargs = text_kwargs or {} [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" import pdfplumber with blob.as_bytes_io() as file_path: doc = pdfplumber.open(file_path) # open document yield from [ Document( page_content=page.extract_text(**self.text_kwargs), metadata=dict( { "source": blob.source, "file_path": blob.source, "page": page.page_number, "total_pages": len(doc.pages), }, **{ k: doc.metadata[k] for k in doc.metadata if type(doc.metadata[k]) in [str, int] }, ), ) for page in doc.pages
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
b155063f625d-3
}, ), ) for page in doc.pages ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/pdf.html
4cd92ac7dba1-0
Source code for langchain.document_loaders.parsers.generic """Code for generic / auxiliary parsers. This module contains some logic to help assemble more sophisticated parsers. """ from typing import Iterator, Mapping, Optional from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders.schema import Blob from langchain.schema import Document [docs]class MimeTypeBasedParser(BaseBlobParser): """A parser that uses mime-types to determine how to parse a blob. This parser is useful for simple pipelines where the mime-type is sufficient to determine how to parse a blob. To use, configure handlers based on mime-types and pass them to the initializer. Example: .. code-block:: python from langchain.document_loaders.parsers.generic import MimeTypeBasedParser parser = MimeTypeBasedParser( handlers={ "application/pdf": ..., }, fallback_parser=..., ) """ def __init__( self, handlers: Mapping[str, BaseBlobParser], *, fallback_parser: Optional[BaseBlobParser] = None, ) -> None: """Define a parser that uses mime-types to determine how to parse a blob. Args: handlers: A mapping from mime-types to functions that take a blob, parse it and return a document. fallback_parser: A fallback_parser parser to use if the mime-type is not found in the handlers. If provided, this parser will be used to parse blobs with all mime-types not found in the handlers. If not provided, a ValueError will be raised if the mime-type is not found in the handlers. """ self.handlers = handlers self.fallback_parser = fallback_parser
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/generic.html
4cd92ac7dba1-1
""" self.handlers = handlers self.fallback_parser = fallback_parser [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Load documents from a blob.""" mimetype = blob.mimetype if mimetype is None: raise ValueError(f"{blob} does not have a mimetype.") if mimetype in self.handlers: handler = self.handlers[mimetype] yield from handler.lazy_parse(blob) else: if self.fallback_parser is not None: yield from self.fallback_parser.lazy_parse(blob) else: raise ValueError(f"Unsupported mime type: {mimetype}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/generic.html
7e7265154bc9-0
Source code for langchain.document_loaders.parsers.txt """Module for parsing text files..""" from typing import Iterator from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.schema import Document [docs]class TextParser(BaseBlobParser): """Parser for text blobs.""" [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={"source": blob.source})
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/txt.html
a5d0a177b784-0
Source code for langchain.document_loaders.parsers.registry """Module includes a registry of default parser configurations.""" from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.parsers.generic import MimeTypeBasedParser from langchain.document_loaders.parsers.pdf import PyMuPDFParser from langchain.document_loaders.parsers.txt import TextParser def _get_default_parser() -> BaseBlobParser: """Get default mime-type based parser.""" return MimeTypeBasedParser( handlers={ "application/pdf": PyMuPDFParser(), "text/plain": TextParser(), }, fallback_parser=None, ) _REGISTRY = { "default": _get_default_parser, } # PUBLIC API [docs]def get_parser(parser_name: str) -> BaseBlobParser: """Get a parser by parser name.""" if parser_name not in _REGISTRY: raise ValueError(f"Unknown parser combination: {parser_name}") return _REGISTRY[parser_name]()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/registry.html
c6126d0c8db0-0
Source code for langchain.document_loaders.parsers.html.bs4 """Loader that uses bs4 to load HTML files, enriching metadata with page title.""" import logging from typing import Any, Dict, Iterator, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob logger = logging.getLogger(__name__) [docs]class BS4HTMLParser(BaseBlobParser): """Parser that uses beautiful soup to parse HTML files.""" def __init__( self, *, features: str = "lxml", get_text_separator: str = "", **kwargs: Any, ) -> None: """Initialize a bs4 based HTML parser.""" try: import bs4 # noqa:F401 except ImportError: raise ValueError( "beautifulsoup4 package not found, please install it with " "`pip install beautifulsoup4`" ) self.bs_kwargs = {"features": features, **kwargs} self.get_text_separator = get_text_separator [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Load HTML document into document objects.""" from bs4 import BeautifulSoup with blob.as_bytes_io() as f: soup = BeautifulSoup(f, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) if soup.title: title = str(soup.title.string) else: title = "" metadata: Dict[str, Union[str, None]] = { "source": blob.source, "title": title, } yield Document(page_content=text, metadata=metadata)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/html/bs4.html
62ed8298bced-0
Source code for langchain.document_loaders.parsers.language.code_segmenter from abc import ABC, abstractmethod from typing import List [docs]class CodeSegmenter(ABC): def __init__(self, code: str): self.code = code [docs] def is_valid(self) -> bool: return True [docs] @abstractmethod def simplify_code(self) -> str: raise NotImplementedError # pragma: no cover [docs] @abstractmethod def extract_functions_classes(self) -> List[str]: raise NotImplementedError # pragma: no cover
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/code_segmenter.html
bec8ced10a2f-0
Source code for langchain.document_loaders.parsers.language.javascript from typing import Any, List from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter [docs]class JavaScriptSegmenter(CodeSegmenter): def __init__(self, code: str): super().__init__(code) self.source_lines = self.code.splitlines() try: import esprima # noqa: F401 except ImportError: raise ImportError( "Could not import esprima Python package. " "Please install it with `pip install esprima`." ) [docs] def is_valid(self) -> bool: import esprima try: esprima.parseScript(self.code) return True except esprima.Error: return False def _extract_code(self, node: Any) -> str: start = node.loc.start.line - 1 end = node.loc.end.line return "\n".join(self.source_lines[start:end]) [docs] def extract_functions_classes(self) -> List[str]: import esprima tree = esprima.parseScript(self.code, loc=True) functions_classes = [] for node in tree.body: if isinstance( node, (esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration), ): functions_classes.append(self._extract_code(node)) return functions_classes [docs] def simplify_code(self) -> str: import esprima tree = esprima.parseScript(self.code, loc=True) simplified_lines = self.source_lines[:] for node in tree.body: if isinstance( node,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/javascript.html
bec8ced10a2f-1
for node in tree.body: if isinstance( node, (esprima.nodes.FunctionDeclaration, esprima.nodes.ClassDeclaration), ): start = node.loc.start.line - 1 simplified_lines[start] = f"// Code for: {simplified_lines[start]}" for line_num in range(start + 1, node.loc.end.line): simplified_lines[line_num] = None # type: ignore return "\n".join(line for line in simplified_lines if line is not None)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/javascript.html
1a219eef5d7d-0
Source code for langchain.document_loaders.parsers.language.language_parser from typing import Any, Dict, Iterator, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseBlobParser from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers.language.javascript import JavaScriptSegmenter from langchain.document_loaders.parsers.language.python import PythonSegmenter from langchain.text_splitter import Language LANGUAGE_EXTENSIONS: Dict[str, str] = { "py": Language.PYTHON, "js": Language.JS, } LANGUAGE_SEGMENTERS: Dict[str, Any] = { Language.PYTHON: PythonSegmenter, Language.JS: JavaScriptSegmenter, } [docs]class LanguageParser(BaseBlobParser): """ Language parser that split code using the respective language syntax. Each top-level function and class in the code is loaded into separate documents. Furthermore, an extra document is generated, containing the remaining top-level code that excludes the already segmented functions and classes. This approach can potentially improve the accuracy of QA models over source code. Currently, the supported languages for code parsing are Python and JavaScript. The language used for parsing can be configured, along with the minimum number of lines required to activate the splitting based on syntax. Examples: .. code-block:: python from langchain.text_splitter.Language from langchain.document_loaders.generic import GenericLoader from langchain.document_loaders.parsers import LanguageParser loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py", ".js"], parser=LanguageParser() ) docs = loader.load() Example instantiations to manually select the language:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
1a219eef5d7d-1
docs = loader.load() Example instantiations to manually select the language: ... code-block:: python from langchain.text_splitter import Language loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py"], parser=LanguageParser(language=Language.PYTHON) ) Example instantiations to set number of lines threshold: ... code-block:: python loader = GenericLoader.from_filesystem( "./code", glob="**/*", suffixes=[".py"], parser=LanguageParser(parser_threshold=200) ) """ def __init__(self, language: Optional[Language] = None, parser_threshold: int = 0): """ Language parser that split code using the respective language syntax. Args: language: If None (default), it will try to infer language from source. parser_threshold: Minimum lines needed to activate parsing (0 by default). """ self.language = language self.parser_threshold = parser_threshold [docs] def lazy_parse(self, blob: Blob) -> Iterator[Document]: code = blob.as_string() language = self.language or ( LANGUAGE_EXTENSIONS.get(blob.source.rsplit(".", 1)[-1]) if isinstance(blob.source, str) else None ) if language is None: yield Document( page_content=code, metadata={ "source": blob.source, }, ) return if self.parser_threshold >= len(code.splitlines()): yield Document( page_content=code, metadata={ "source": blob.source, "language": language, }, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
1a219eef5d7d-2
"language": language, }, ) return self.Segmenter = LANGUAGE_SEGMENTERS[language] segmenter = self.Segmenter(blob.as_string()) if not segmenter.is_valid(): yield Document( page_content=code, metadata={ "source": blob.source, }, ) return for functions_classes in segmenter.extract_functions_classes(): yield Document( page_content=functions_classes, metadata={ "source": blob.source, "content_type": "functions_classes", "language": language, }, ) yield Document( page_content=segmenter.simplify_code(), metadata={ "source": blob.source, "content_type": "simplified_code", "language": language, }, )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/language_parser.html
fc6956191354-0
Source code for langchain.document_loaders.parsers.language.python import ast from typing import Any, List from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter [docs]class PythonSegmenter(CodeSegmenter): def __init__(self, code: str): super().__init__(code) self.source_lines = self.code.splitlines() [docs] def is_valid(self) -> bool: try: ast.parse(self.code) return True except SyntaxError: return False def _extract_code(self, node: Any) -> str: start = node.lineno - 1 end = node.end_lineno return "\n".join(self.source_lines[start:end]) [docs] def extract_functions_classes(self) -> List[str]: tree = ast.parse(self.code) functions_classes = [] for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): functions_classes.append(self._extract_code(node)) return functions_classes [docs] def simplify_code(self) -> str: tree = ast.parse(self.code) simplified_lines = self.source_lines[:] for node in ast.iter_child_nodes(tree): if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): start = node.lineno - 1 simplified_lines[start] = f"# Code for: {simplified_lines[start]}" assert isinstance(node.end_lineno, int) for line_num in range(start + 1, node.end_lineno): simplified_lines[line_num] = None # type: ignore return "\n".join(line for line in simplified_lines if line is not None)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/parsers/language/python.html
d02f9d1cad25-0
Source code for langchain.document_loaders.blob_loaders.file_system """Use to load blobs from the local file system.""" from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader T = TypeVar("T") def _make_iterator( length_func: Callable[[], int], show_progress: bool = False ) -> Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( "You must install tqdm to use show_progress=True." "You can install tqdm with `pip install tqdm`." ) # Make sure to provide `total` here so that tqdm can show # a progress bar that takes into account the total number of files. def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func()) iterator = _with_tqdm else: iterator = iter # type: ignore return iterator # PUBLIC API [docs]class FileSystemBlobLoader(BlobLoader): """Blob loader for the local file system. Example: .. code-block:: python from langchain.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): print(blob) """ def __init__( self, path: Union[str, Path], *, glob: str = "**/[!.]*",
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
d02f9d1cad25-1
*, glob: str = "**/[!.]*", suffixes: Optional[Sequence[str]] = None, show_progress: bool = False, ) -> None: """Initialize with path to directory and how to glob over it. Args: path: Path to directory to load from glob: Glob pattern relative to the specified path by default set to pick up all non-hidden files suffixes: Provide to keep only files with these suffixes Useful when wanting to keep files with different suffixes Suffixes must include the dot, e.g. ".txt" show_progress: If true, will show a progress bar as the files are loaded. This forces an iteration through all matching files to count them prior to loading them. Examples: ... code-block:: python # Recursively load all text files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/*.txt") # Recursively load all non-hidden files in a directory. loader = FileSystemBlobLoader("/path/to/directory", glob="**/[!.]*") # Load all files in a directory without recursion. loader = FileSystemBlobLoader("/path/to/directory", glob="*") """ if isinstance(path, Path): _path = path elif isinstance(path, str): _path = Path(path) else: raise TypeError(f"Expected str or Path, got {type(path)}") self.path = _path self.glob = glob self.suffixes = set(suffixes or []) self.show_progress = show_progress [docs] def yield_blobs( self, ) -> Iterable[Blob]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
d02f9d1cad25-2
self, ) -> Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator( length_func=self.count_matching_files, show_progress=self.show_progress ) for path in iterator(self._yield_paths()): yield Blob.from_path(path) def _yield_paths(self) -> Iterable[Path]: """Yield paths that match the requested pattern.""" paths = self.path.glob(self.glob) for path in paths: if path.is_file(): if self.suffixes and path.suffix not in self.suffixes: continue yield path [docs] def count_matching_files(self) -> int: """Count files that match the pattern without loading them.""" # Carry out a full iteration to count the files without # materializing anything expensive in memory. num = 0 for _ in self._yield_paths(): num += 1 return num
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/file_system.html
d14be4bb58da-0
Source code for langchain.document_loaders.blob_loaders.youtube_audio from typing import Iterable, List from langchain.document_loaders.blob_loaders import FileSystemBlobLoader from langchain.document_loaders.blob_loaders.schema import Blob, BlobLoader [docs]class YoutubeAudioLoader(BlobLoader): """Load YouTube urls as audio file(s).""" def __init__(self, urls: List[str], save_dir: str): if not isinstance(urls, list): raise TypeError("urls must be a list") self.urls = urls self.save_dir = save_dir [docs] def yield_blobs(self) -> Iterable[Blob]: """Yield audio blobs for each url.""" try: import yt_dlp except ImportError: raise ValueError( "yt_dlp package not found, please install it with " "`pip install yt_dlp`" ) # Use yt_dlp to download audio given a YouTube url ydl_opts = { "format": "m4a/bestaudio/best", "noplaylist": True, "outtmpl": self.save_dir + "/%(title)s.%(ext)s", "postprocessors": [ { "key": "FFmpegExtractAudio", "preferredcodec": "m4a", } ], } for url in self.urls: # Download file with yt_dlp.YoutubeDL(ydl_opts) as ydl: ydl.download(url) # Yield the written blobs loader = FileSystemBlobLoader(self.save_dir, glob="*.m4a") for blob in loader.yield_blobs(): yield blob
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/youtube_audio.html
52bea49e6b46-0
Source code for langchain.document_loaders.blob_loaders.schema """Schema for Blobs and Blob Loaders. The goal is to facilitate decoupling of content loading from content parsing code. In addition, content loading code should provide a lazy loading interface by default. """ from __future__ import annotations import contextlib import mimetypes from abc import ABC, abstractmethod from io import BufferedReader, BytesIO from pathlib import PurePath from typing import Any, Generator, Iterable, Mapping, Optional, Union from pydantic import BaseModel, root_validator PathLike = Union[str, PurePath] [docs]class Blob(BaseModel): """A blob is used to represent raw data by either reference or value. Provides an interface to materialize the blob in different representations, and help to decouple the development of data loaders from the downstream parsing of the raw data. Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob """ data: Union[bytes, str, None] # Raw data mimetype: Optional[str] = None # Not to be confused with a file extension encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string # Location where the original content was found # Represent location on the local file system # Useful for situations where downstream code assumes it must work with file paths # rather than in-memory content. path: Optional[PathLike] = None [docs] class Config: arbitrary_types_allowed = True frozen = True @property def source(self) -> Optional[str]: """The source location of the blob as string if known otherwise none.""" return str(self.path) if self.path else None
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
52bea49e6b46-1
return str(self.path) if self.path else None [docs] @root_validator(pre=True) def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: raise ValueError("Either data or path must be provided") return values [docs] def as_string(self) -> str: """Read data as a string.""" if self.data is None and self.path: with open(str(self.path), "r", encoding=self.encoding) as f: return f.read() elif isinstance(self.data, bytes): return self.data.decode(self.encoding) elif isinstance(self.data, str): return self.data else: raise ValueError(f"Unable to get string for blob {self}") [docs] def as_bytes(self) -> bytes: """Read data as bytes.""" if isinstance(self.data, bytes): return self.data elif isinstance(self.data, str): return self.data.encode(self.encoding) elif self.data is None and self.path: with open(str(self.path), "rb") as f: return f.read() else: raise ValueError(f"Unable to get bytes for blob {self}") [docs] @contextlib.contextmanager def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: """Read data as a byte stream.""" if isinstance(self.data, bytes): yield BytesIO(self.data) elif self.data is None and self.path: with open(str(self.path), "rb") as f: yield f else:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
52bea49e6b46-2
yield f else: raise NotImplementedError(f"Unable to convert blob {self}") [docs] @classmethod def from_path( cls, path: PathLike, *, encoding: str = "utf-8", mime_type: Optional[str] = None, guess_type: bool = True, ) -> Blob: """Load the blob from a path like object. Args: path: path like object to file to be read encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data guess_type: If True, the mimetype will be guessed from the file extension, if a mime-type was not provided Returns: Blob instance """ if mime_type is None and guess_type: _mimetype = mimetypes.guess_type(path)[0] if guess_type else None else: _mimetype = mime_type # We do not load the data immediately, instead we treat the blob as a # reference to the underlying data. return cls(data=None, mimetype=_mimetype, encoding=encoding, path=path) [docs] @classmethod def from_data( cls, data: Union[str, bytes], *, encoding: str = "utf-8", mime_type: Optional[str] = None, path: Optional[str] = None, ) -> Blob: """Initialize the blob from in-memory data. Args: data: the in-memory data associated with the blob encoding: Encoding to use if decoding the bytes into a string mime_type: if provided, will be set as the mime-type of the data
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
52bea49e6b46-3
mime_type: if provided, will be set as the mime-type of the data path: if provided, will be set as the source from which the data came Returns: Blob instance """ return cls(data=data, mimetype=mime_type, encoding=encoding, path=path) def __repr__(self) -> str: """Define the blob representation.""" str_repr = f"Blob {id(self)}" if self.source: str_repr += f" {self.source}" return str_repr [docs]class BlobLoader(ABC): """Abstract interface for blob loaders implementation. Implementer should be able to load raw content from a storage system according to some criteria and return the raw content lazily as a stream of blobs. """ [docs] @abstractmethod def yield_blobs( self, ) -> Iterable[Blob]: """A lazy loader for raw data represented by LangChain's Blob object. Returns: A generator over blobs """
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/blob_loaders/schema.html
c83fb8943a93-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import Extra, root_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" [docs] @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None)
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
c83fb8943a93-1
examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values [docs] @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values [docs] class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html
c83fb8943a93-2
"""Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
https://api.python.langchain.com/en/latest/_modules/langchain/prompts/few_shot_with_templates.html