id
stringlengths
14
15
text
stringlengths
49
2.47k
source
stringlengths
61
166
8329be7fba50-0
Source code for langchain.memory.chat_message_histories.file import json import logging from pathlib import Path from typing import List from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage, messages_from_dict, messages_to_dict logger = logging.getLogger(__name__) [docs]class FileChatMessageHistory(BaseChatMessageHistory): """ Chat message history that stores history in a local file. Args: file_path: path of the local file to store the messages. """ [docs] def __init__(self, file_path: str): self.file_path = Path(file_path) if not self.file_path.exists(): self.file_path.touch() self.file_path.write_text(json.dumps([])) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from the local file""" items = json.loads(self.file_path.read_text()) messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages)) [docs] def clear(self) -> None: """Clear session memory from the local file""" self.file_path.write_text(json.dumps([]))
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/file.html
0049f2258d9d-0
Source code for langchain.memory.chat_message_histories.postgres import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict logger = logging.getLogger(__name__) DEFAULT_CONNECTION_STRING = "postgresql://postgres:mypassword@localhost/chat_history" [docs]class PostgresChatMessageHistory(BaseChatMessageHistory): """Chat message history stored in a Postgres database.""" [docs] def __init__( self, session_id: str, connection_string: str = DEFAULT_CONNECTION_STRING, table_name: str = "message_store", ): import psycopg from psycopg.rows import dict_row try: self.connection = psycopg.connect(connection_string) self.cursor = self.connection.cursor(row_factory=dict_row) except psycopg.OperationalError as error: logger.error(error) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() def _create_table_if_not_exists(self) -> None: create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} ( id SERIAL PRIMARY KEY, session_id TEXT NOT NULL, message JSONB NOT NULL );""" self.cursor.execute(create_table_query) self.connection.commit() @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from PostgreSQL""" query = ( f"SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;" ) self.cursor.execute(query, (self.session_id,)) items = [record["message"] for record in self.cursor.fetchall()]
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html
0049f2258d9d-1
items = [record["message"] for record in self.cursor.fetchall()] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in PostgreSQL""" from psycopg import sql query = sql.SQL("INSERT INTO {} (session_id, message) VALUES (%s, %s);").format( sql.Identifier(self.table_name) ) self.cursor.execute( query, (self.session_id, json.dumps(_message_to_dict(message))) ) self.connection.commit() [docs] def clear(self) -> None: """Clear session memory from PostgreSQL""" query = f"DELETE FROM {self.table_name} WHERE session_id = %s;" self.cursor.execute(query, (self.session_id,)) self.connection.commit() def __del__(self) -> None: if self.cursor: self.cursor.close() if self.connection: self.connection.close()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/postgres.html
dde66c42395f-0
Source code for langchain.memory.chat_message_histories.mongodb import json import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict logger = logging.getLogger(__name__) DEFAULT_DBNAME = "chat_history" DEFAULT_COLLECTION_NAME = "message_store" [docs]class MongoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in MongoDB. Args: connection_string: connection string to connect to MongoDB session_id: arbitrary key that is used to store the messages of a single chat session. database_name: name of the database to use collection_name: name of the collection to use """ [docs] def __init__( self, connection_string: str, session_id: str, database_name: str = DEFAULT_DBNAME, collection_name: str = DEFAULT_COLLECTION_NAME, ): from pymongo import MongoClient, errors self.connection_string = connection_string self.session_id = session_id self.database_name = database_name self.collection_name = collection_name try: self.client: MongoClient = MongoClient(connection_string) except errors.ConnectionFailure as error: logger.error(error) self.db = self.client[database_name] self.collection = self.db[collection_name] self.collection.create_index("SessionId") @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the messages from MongoDB""" from pymongo import errors try: cursor = self.collection.find({"SessionId": self.session_id}) except errors.OperationFailure as error: logger.error(error) if cursor:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html
dde66c42395f-1
except errors.OperationFailure as error: logger.error(error) if cursor: items = [json.loads(document["History"]) for document in cursor] else: items = [] messages = messages_from_dict(items) return messages [docs] def add_message(self, message: BaseMessage) -> None: """Append the message to the record in MongoDB""" from pymongo import errors try: self.collection.insert_one( { "SessionId": self.session_id, "History": json.dumps(_message_to_dict(message)), } ) except errors.WriteError as err: logger.error(err) [docs] def clear(self) -> None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({"SessionId": self.session_id}) except errors.WriteError as err: logger.error(err)
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/mongodb.html
076a75875f80-0
Source code for langchain.memory.chat_message_histories.rocksetdb from datetime import datetime from time import sleep from typing import Any, Callable, List, Union from uuid import uuid4 from langchain.schema import BaseChatMessageHistory from langchain.schema.messages import BaseMessage, _message_to_dict, messages_from_dict [docs]class RocksetChatMessageHistory(BaseChatMessageHistory): """Uses Rockset to store chat messages. To use, ensure that the `rockset` python package installed. Example: .. code-block:: python from langchain.memory.chat_message_histories import ( RocksetChatMessageHistory ) from rockset import RocksetClient history = RocksetChatMessageHistory( session_id="MySession", client=RocksetClient(), collection="langchain_demo", sync=True ) history.add_user_message("hi!") history.add_ai_message("whats up?") print(history.messages) """ # You should set these values based on your VI. # These values are configured for the typical # free VI. Read more about VIs here: # https://rockset.com/docs/instances SLEEP_INTERVAL_MS = 5 ADD_TIMEOUT_MS = 5000 CREATE_TIMEOUT_MS = 20000 def _wait_until(self, method: Callable, timeout: int, **method_params: Any) -> None: """Sleeps until meth() evaluates to true. Passes kwargs into meth. """ start = datetime.now() while not method(**method_params): curr = datetime.now() if (curr - start).total_seconds() * 1000 > timeout:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
076a75875f80-1
if (curr - start).total_seconds() * 1000 > timeout: raise TimeoutError(f"{method} timed out at {timeout} ms") sleep(RocksetChatMessageHistory.SLEEP_INTERVAL_MS / 1000) def _query(self, query: str, **query_params: Any) -> List[Any]: """Executes an SQL statement and returns the result Args: - query: The SQL string - **query_params: Parameters to pass into the query """ return self.client.sql(query, params=query_params).results def _create_collection(self) -> None: """Creates a collection for this message history""" self.client.Collections.create_s3_collection( name=self.collection, workspace=self.workspace ) def _collection_exists(self) -> bool: """Checks whether a collection exists for this message history""" try: self.client.Collections.get(collection=self.collection) except self.rockset.exceptions.NotFoundException: return False return True def _collection_is_ready(self) -> bool: """Checks whether the collection for this message history is ready to be queried """ return ( self.client.Collections.get(collection=self.collection).data.status == "READY" ) def _document_exists(self) -> bool: return ( len( self._query( f""" SELECT 1 FROM {self.location} WHERE _id=:session_id LIMIT 1 """, session_id=self.session_id, ) ) != 0 ) def _wait_until_collection_created(self) -> None: """Sleeps until the collection for this message history is ready to be queried
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
076a75875f80-2
"""Sleeps until the collection for this message history is ready to be queried """ self._wait_until( lambda: self._collection_is_ready(), RocksetChatMessageHistory.CREATE_TIMEOUT_MS, ) def _wait_until_message_added(self, message_id: str) -> None: """Sleeps until a message is added to the messages list""" self._wait_until( lambda message_id: len( self._query( f""" SELECT * FROM UNNEST(( SELECT {self.messages_key} FROM {self.location} WHERE _id = :session_id )) AS message WHERE message.data.additional_kwargs.id = :message_id LIMIT 1 """, session_id=self.session_id, message_id=message_id, ), ) != 0, RocksetChatMessageHistory.ADD_TIMEOUT_MS, message_id=message_id, ) def _create_empty_doc(self) -> None: """Creates or replaces a document for this message history with no messages""" self.client.Documents.add_documents( collection=self.collection, workspace=self.workspace, data=[{"_id": self.session_id, self.messages_key: []}], ) [docs] def __init__( self, session_id: str, client: Any, collection: str, workspace: str = "commons", messages_key: str = "messages", sync: bool = False, message_uuid_method: Callable[[], Union[str, int]] = lambda: str(uuid4()), ) -> None: """Constructs a new RocksetChatMessageHistory. Args:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
076a75875f80-3
"""Constructs a new RocksetChatMessageHistory. Args: - session_id: The ID of the chat session - client: The RocksetClient object to use to query - collection: The name of the collection to use to store chat messages. If a collection with the given name does not exist in the workspace, it is created. - workspace: The workspace containing `collection`. Defaults to `"commons"` - messages_key: The DB column containing message history. Defaults to `"messages"` - sync: Whether to wait for messages to be added. Defaults to `False`. NOTE: setting this to `True` will slow down performance. - message_uuid_method: The method that generates message IDs. If set, all messages will have an `id` field within the `additional_kwargs` property. If this param is not set and `sync` is `False`, message IDs will not be created. If this param is not set and `sync` is `True`, the `uuid.uuid4` method will be used to create message IDs. """ try: import rockset except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, rockset.RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) self.session_id = session_id self.client = client self.collection = collection self.workspace = workspace self.location = f'"{self.workspace}"."{self.collection}"'
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
076a75875f80-4
self.location = f'"{self.workspace}"."{self.collection}"' self.rockset = rockset self.messages_key = messages_key self.message_uuid_method = message_uuid_method self.sync = sync if not self._collection_exists(): self._create_collection() self._wait_until_collection_created() self._create_empty_doc() elif not self._document_exists(): self._create_empty_doc() @property def messages(self) -> List[BaseMessage]: # type: ignore """Messages in this chat history.""" return messages_from_dict( self._query( f""" SELECT * FROM UNNEST (( SELECT "{self.messages_key}" FROM {self.location} WHERE _id = :session_id )) """, session_id=self.session_id, ) ) [docs] def add_message(self, message: BaseMessage) -> None: """Add a Message object to the history. Args: message: A BaseMessage object to store. """ if self.sync and "id" not in message.additional_kwargs: message.additional_kwargs["id"] = self.message_uuid_method() self.client.Documents.patch_documents( collection=self.collection, workspace=self.workspace, data=[ self.rockset.model.patch_document.PatchDocument( id=self.session_id, patch=[ self.rockset.model.patch_operation.PatchOperation( op="ADD", path=f"/{self.messages_key}/-", value=_message_to_dict(message), ) ], ) ], ) if self.sync:
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
076a75875f80-5
], ) ], ) if self.sync: self._wait_until_message_added(message.additional_kwargs["id"]) [docs] def clear(self) -> None: """Removes all messages from the chat history""" self._create_empty_doc() if self.sync: self._wait_until( lambda: not self.messages, RocksetChatMessageHistory.ADD_TIMEOUT_MS, )
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/rocksetdb.html
8b7dce9024a9-0
Source code for langchain.memory.chat_message_histories.streamlit from typing import List from langchain.schema import ( BaseChatMessageHistory, ) from langchain.schema.messages import BaseMessage [docs]class StreamlitChatMessageHistory(BaseChatMessageHistory): """ Chat message history that stores messages in Streamlit session state. Args: key: The key to use in Streamlit session state for storing messages. """ [docs] def __init__(self, key: str = "langchain_messages"): try: import streamlit as st except ImportError as e: raise ImportError( "Unable to import streamlit, please run `pip install streamlit`." ) from e if key not in st.session_state: st.session_state[key] = [] self._messages = st.session_state[key] @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve the current list of messages""" return self._messages [docs] def add_message(self, message: BaseMessage) -> None: """Add a message to the session memory""" self._messages.append(message) [docs] def clear(self) -> None: """Clear session memory""" self._messages.clear()
https://api.python.langchain.com/en/latest/_modules/langchain/memory/chat_message_histories/streamlit.html
eac0fc9e46f9-0
Source code for langchain.document_loaders.text import logging from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.helpers import detect_file_encodings logger = logging.getLogger(__name__) [docs]class TextLoader(BaseLoader): """Load text files. Args: file_path: Path to the file to load. encoding: File encoding to use. If `None`, the file will be loaded with the default system encoding. autodetect_encoding: Whether to try to autodetect the file encoding if the specified encoding fails. """ [docs] def __init__( self, file_path: str, encoding: Optional[str] = None, autodetect_encoding: bool = False, ): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding self.autodetect_encoding = autodetect_encoding [docs] def load(self) -> List[Document]: """Load from file path.""" text = "" try: with open(self.file_path, encoding=self.encoding) as f: text = f.read() except UnicodeDecodeError as e: if self.autodetect_encoding: detected_encodings = detect_file_encodings(self.file_path) for encoding in detected_encodings: logger.debug(f"Trying encoding: {encoding.encoding}") try: with open(self.file_path, encoding=encoding.encoding) as f: text = f.read() break except UnicodeDecodeError: continue else: raise RuntimeError(f"Error loading {self.file_path}") from e except Exception as e:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/text.html
eac0fc9e46f9-1
except Exception as e: raise RuntimeError(f"Error loading {self.file_path}") from e metadata = {"source": self.file_path} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/text.html
a48a0e042aec-0
Source code for langchain.document_loaders.rtf """Loads rich text files.""" from typing import Any, List from langchain.document_loaders.unstructured import ( UnstructuredFileLoader, satisfies_min_unstructured_version, ) [docs]class UnstructuredRTFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load RTF files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredRTFLoader loader = UnstructuredRTFLoader( "example.rtf", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-rtf """ [docs] def __init__( self, file_path: str, mode: str = "single", **unstructured_kwargs: Any ): """ Initialize with a file path. Args: file_path: The path to the file to load. mode: The mode to use for partitioning. See unstructured for details. Defaults to "single". **unstructured_kwargs: Additional keyword arguments to pass to unstructured. """ min_unstructured_version = "0.5.12" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rtf.html
a48a0e042aec-1
if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( "Partitioning rtf files is only supported in " f"unstructured>={min_unstructured_version}." ) super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) def _get_elements(self) -> List: from unstructured.partition.rtf import partition_rtf return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rtf.html
c89c9655e346-0
Source code for langchain.document_loaders.youtube """Loads YouTube transcript.""" from __future__ import annotations import logging from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union from urllib.parse import parse_qs, urlparse from pydantic import root_validator from pydantic.dataclasses import dataclass from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"] [docs]@dataclass class GoogleApiClient: """A Generic Google Api Client. To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google`` python package installed. As the google api expects credentials you need to set up a google account and register your Service. "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) """ credentials_path: Path = Path.home() / ".credentials" / "credentials.json" service_account_path: Path = Path.home() / ".credentials" / "credentials.json" token_path: Path = Path.home() / ".credentials" / "token.json" def __post_init__(self) -> None: self.creds = self._load_credentials() [docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-1
"""Validate that either folder_id or document_ids is set, but not both.""" if not values.get("credentials_path") and not values.get( "service_account_path" ): raise ValueError("Must specify either channel_name or video_ids") return values def _load_credentials(self) -> Any: """Load credentials.""" # Adapted from https://developers.google.com/drive/api/v3/quickstart/python try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) creds = None if self.service_account_path.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_path) ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-2
token.write(creds.to_json()) return creds ALLOWED_SCHEMAS = {"http", "https"} ALLOWED_NETLOCK = { "youtu.be", "m.youtube.com", "youtube.com", "www.youtube.com", "www.youtube-nocookie.com", "vid.plus", } def _parse_video_id(url: str) -> Optional[str]: """Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith("/watch"): query = parsed_url.query parsed_query = parse_qs(query) if "v" in parsed_query: ids = parsed_query["v"] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip("/") video_id = path.split("/")[-1] if len(video_id) != 11: # Video IDs are 11 characters long return None return video_id [docs]class YoutubeLoader(BaseLoader): """Loads Youtube transcripts.""" [docs] def __init__( self, video_id: str, add_video_info: bool = False, language: Union[str, Sequence[str]] = "en", translation: str = "en", continue_on_failure: bool = False, ): """Initialize with YouTube video ID.""" self.video_id = video_id self.add_video_info = add_video_info self.language = language
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-3
self.add_video_info = add_video_info self.language = language if isinstance(language, str): self.language = [language] else: self.language = language self.translation = translation self.continue_on_failure = continue_on_failure [docs] @staticmethod def extract_video_id(youtube_url: str) -> str: """Extract video id from common YT urls.""" video_id = _parse_video_id(youtube_url) if not video_id: raise ValueError( f"Could not determine the video ID for the URL {youtube_url}" ) return video_id [docs] @classmethod def from_youtube_url(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader: """Given youtube URL, load video.""" video_id = cls.extract_video_id(youtube_url) return cls(video_id, **kwargs) [docs] def load(self) -> List[Document]: """Load documents.""" try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi, ) except ImportError: raise ImportError( "Could not import youtube_transcript_api python package. " "Please install it with `pip install youtube-transcript-api`." ) metadata = {"source": self.video_id} if self.add_video_info: # Get more video meta info # Such as title, description, thumbnail url, publish_date video_info = self._get_video_info() metadata.update(video_info) try: transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id) except TranscriptsDisabled: return [] try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-4
except TranscriptsDisabled: return [] try: transcript = transcript_list.find_transcript(self.language) except NoTranscriptFound: en_transcript = transcript_list.find_transcript(["en"]) transcript = en_transcript.translate(self.translation) transcript_pieces = transcript.fetch() transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces]) return [Document(page_content=transcript, metadata=metadata)] def _get_video_info(self) -> dict: """Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except ImportError: raise ImportError( "Could not import pytube python package. " "Please install it with `pip install pytube`." ) yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}") video_info = { "title": yt.title or "Unknown", "description": yt.description or "Unknown", "view_count": yt.views or 0, "thumbnail_url": yt.thumbnail_url or "Unknown", "publish_date": yt.publish_date.strftime("%Y-%m-%d %H:%M:%S") if yt.publish_date else "Unknown", "length": yt.length or 0, "author": yt.author or "Unknown", } return video_info [docs]@dataclass class GoogleApiYoutubeLoader(BaseLoader): """Loads all Videos from a Channel To use, you should have the ``googleapiclient,youtube_transcript_api`` python package installed.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-5
python package installed. As the service needs a google_api_client, you first have to initialize the GoogleApiClient. Additionally you have to either provide a channel name or a list of videoids "https://developers.google.com/docs/api/quickstart/python" Example: .. code-block:: python from langchain.document_loaders import GoogleApiClient from langchain.document_loaders import GoogleApiYoutubeLoader google_api_client = GoogleApiClient( service_account_path=Path("path_to_your_sec_file.json") ) loader = GoogleApiYoutubeLoader( google_api_client=google_api_client, channel_name = "CodeAesthetic" ) load.load() """ google_api_client: GoogleApiClient channel_name: Optional[str] = None video_ids: Optional[List[str]] = None add_video_info: bool = True captions_language: str = "en" continue_on_failure: bool = False def __post_init__(self) -> None: self.youtube_client = self._build_youtube_client(self.google_api_client.creds) def _build_youtube_client(self, creds: Any) -> Any: try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401 except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib " "youtube-transcript-api` " "to use the Google Drive loader" ) return build("youtube", "v3", credentials=creds) [docs] @root_validator
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-6
[docs] @root_validator def validate_channel_or_videoIds_is_set( cls, values: Dict[str, Any] ) -> Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if not values.get("channel_name") and not values.get("video_ids"): raise ValueError("Must specify either channel_name or video_ids") return values def _get_transcripe_for_video_id(self, video_id: str) -> str: from youtube_transcript_api import NoTranscriptFound, YouTubeTranscriptApi transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) try: transcript = transcript_list.find_transcript([self.captions_language]) except NoTranscriptFound: for available_transcript in transcript_list: transcript = available_transcript.translate(self.captions_language) continue transcript_pieces = transcript.fetch() return " ".join([t["text"].strip(" ") for t in transcript_pieces]) def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document: captions = self._get_transcripe_for_video_id(video_id) video_response = ( self.youtube_client.videos() .list( part="id,snippet", id=video_id, ) .execute() ) return Document( page_content=captions, metadata=video_response.get("items")[0], ) def _get_channel_id(self, channel_name: str) -> str: request = self.youtube_client.search().list( part="id", q=channel_name, type="channel",
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-7
part="id", q=channel_name, type="channel", maxResults=1, # we only need one result since channel names are unique ) response = request.execute() channel_id = response["items"][0]["id"]["channelId"] return channel_id def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]: try: from youtube_transcript_api import ( NoTranscriptFound, TranscriptsDisabled, ) except ImportError: raise ImportError( "You must run" "`pip install --upgrade " "youtube-transcript-api` " "to use the youtube loader" ) channel_id = self._get_channel_id(channel) request = self.youtube_client.search().list( part="id,snippet", channelId=channel_id, maxResults=50, # adjust this value to retrieve more or fewer videos ) video_ids = [] while request is not None: response = request.execute() # Add each video ID to the list for item in response["items"]: if not item["id"].get("videoId"): continue meta_data = {"videoId": item["id"]["videoId"]} if self.add_video_info: item["snippet"].pop("thumbnails") meta_data.update(item["snippet"]) try: page_content = self._get_transcripe_for_video_id( item["id"]["videoId"] ) video_ids.append( Document( page_content=page_content, metadata=meta_data, ) )
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
c89c9655e346-8
metadata=meta_data, ) ) except (TranscriptsDisabled, NoTranscriptFound) as e: if self.continue_on_failure: logger.error( "Error fetching transscript " + f" {item['id']['videoId']}, exception: {e}" ) else: raise e pass request = self.youtube_client.search().list_next(request, response) return video_ids [docs] def load(self) -> List[Document]: """Load documents.""" document_list = [] if self.channel_name: document_list.extend(self._get_document_for_channel(self.channel_name)) elif self.video_ids: document_list.extend( [ self._get_document_for_video_id(video_id) for video_id in self.video_ids ] ) else: raise ValueError("Must specify either channel_name or video_ids") return document_list
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/youtube.html
12567918a8ae-0
Source code for langchain.document_loaders.psychic """Loads documents from Psychic.dev.""" from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class PsychicLoader(BaseLoader): """Loads documents from Psychic.dev.""" [docs] def __init__( self, api_key: str, account_id: str, connector_id: Optional[str] = None ): """Initialize with API key, connector id, and account id. Args: api_key: The Psychic API key. account_id: The Psychic account id. connector_id: The Psychic connector id. """ try: from psychicapi import ConnectorId, Psychic # noqa: F401 except ImportError: raise ImportError( "`psychicapi` package not found, please run `pip install psychicapi`" ) self.psychic = Psychic(secret_key=api_key) self.connector_id = ConnectorId(connector_id) self.account_id = account_id [docs] def load(self) -> List[Document]: """Load documents.""" psychic_docs = self.psychic.get_documents( connector_id=self.connector_id, account_id=self.account_id ) return [ Document( page_content=doc["content"], metadata={"title": doc["title"], "source": doc["uri"]}, ) for doc in psychic_docs.documents ]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/psychic.html
2c8989a57408-0
Source code for langchain.document_loaders.recursive_url_loader import asyncio import re from typing import Callable, Iterator, List, Optional, Set, Union from urllib.parse import urljoin, urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class RecursiveUrlLoader(BaseLoader): """Loads all child links from a given url.""" [docs] def __init__( self, url: str, max_depth: Optional[int] = None, use_async: Optional[bool] = None, extractor: Optional[Callable[[str], str]] = None, exclude_dirs: Optional[str] = None, timeout: Optional[int] = None, prevent_outside: Optional[bool] = None, ) -> None: """Initialize with URL to crawl and any subdirectories to exclude. Args: url: The URL to crawl. exclude_dirs: A list of subdirectories to exclude. use_async: Whether to use asynchronous loading, if use_async is true, this function will not be lazy, but it will still work in the expected way, just not lazy. extractor: A function to extract the text from the html, when extract function returns empty string, the document will be ignored. max_depth: The max depth of the recursive loading. timeout: The timeout for the requests, in the unit of seconds. """ self.url = url self.exclude_dirs = exclude_dirs self.use_async = use_async if use_async is not None else False self.extractor = extractor if extractor is not None else lambda x: x self.max_depth = max_depth if max_depth is not None else 2
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-1
self.max_depth = max_depth if max_depth is not None else 2 self.timeout = timeout if timeout is not None else 10 self.prevent_outside = prevent_outside if prevent_outside is not None else True def _get_sub_links(self, raw_html: str, base_url: str) -> List[str]: """This function extracts all the links from the raw html, and convert them into absolute paths. Args: raw_html (str): original html base_url (str): the base url of the html Returns: List[str]: sub links """ # Get all links that are relative to the root of the website all_links = re.findall(r"href=[\"\'](.*?)[\"\']", raw_html) absolute_paths = [] invalid_prefixes = ("javascript:", "mailto:", "#") invalid_suffixes = ( ".css", ".js", ".ico", ".png", ".jpg", ".jpeg", ".gif", ".svg", ) # Process the links for link in all_links: # Ignore blacklisted patterns # like javascript: or mailto:, files of svg, ico, css, js if link.startswith(invalid_prefixes) or link.endswith(invalid_suffixes): continue # Some may be absolute links like https://to/path if link.startswith("http"): if (not self.prevent_outside) or ( self.prevent_outside and link.startswith(base_url) ): absolute_paths.append(link) else: absolute_paths.append(urljoin(base_url, link)) # Some may be relative links like /to/path if link.startswith("/") and not link.startswith("//"):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-2
if link.startswith("/") and not link.startswith("//"): absolute_paths.append(urljoin(base_url, link)) continue # Some may have omitted the protocol like //to/path if link.startswith("//"): absolute_paths.append(f"{urlparse(base_url).scheme}:{link}") continue # Remove duplicates # also do another filter to prevent outside links absolute_paths = list( set( [ path for path in absolute_paths if not self.prevent_outside or path.startswith(base_url) and path != base_url ] ) ) return absolute_paths def _gen_metadata(self, raw_html: str, url: str) -> dict: """Build metadata from BeautifulSoup output.""" try: from bs4 import BeautifulSoup except ImportError: print("The bs4 package is required for the RecursiveUrlLoader.") print("Please install it with `pip install bs4`.") metadata = {"source": url} soup = BeautifulSoup(raw_html, "html.parser") if title := soup.find("title"): metadata["title"] = title.get_text() if description := soup.find("meta", attrs={"name": "description"}): metadata["description"] = description.get("content", None) if html := soup.find("html"): metadata["language"] = html.get("lang", None) return metadata def _get_child_links_recursive( self, url: str, visited: Optional[Set[str]] = None, depth: int = 0 ) -> Iterator[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-3
Args: url: The URL to crawl. visited: A set of visited URLs. """ if depth > self.max_depth: return [] # Add a trailing slash if not present if not url.endswith("/"): url += "/" # Exclude the root and parent from a list visited = set() if visited is None else visited # Exclude the links that start with any of the excluded directories if self.exclude_dirs and any( url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs ): return [] # Get all links that can be accessed from the current URL try: response = requests.get(url, timeout=self.timeout) except Exception: return [] absolute_paths = self._get_sub_links(response.text, url) # Store the visited links and recursively visit the children for link in absolute_paths: # Check all unvisited links if link not in visited: visited.add(link) try: response = requests.get(link) text = response.text except Exception: # unreachable link, so just ignore it continue loaded_link = Document( page_content=self.extractor(text), metadata=self._gen_metadata(text, link), ) yield loaded_link # If the link is a directory (w/ children) then visit it if link.endswith("/"): yield from self._get_child_links_recursive(link, visited, depth + 1) return [] async def _async_get_child_links_recursive( self, url: str, visited: Optional[Set[str]] = None, depth: int = 0 ) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-4
) -> List[Document]: """Recursively get all child links starting with the path of the input URL. Args: url: The URL to crawl. visited: A set of visited URLs. depth: To reach the current url, how many pages have been visited. """ try: import aiohttp except ImportError: print("The aiohttp package is required for the RecursiveUrlLoader.") print("Please install it with `pip install aiohttp`.") if depth > self.max_depth: return [] # Add a trailing slash if not present if not url.endswith("/"): url += "/" # Exclude the root and parent from a list visited = set() if visited is None else visited # Exclude the links that start with any of the excluded directories if self.exclude_dirs and any( url.startswith(exclude_dir) for exclude_dir in self.exclude_dirs ): return [] # Disable SSL verification because websites may have invalid SSL certificates, # but won't cause any security issues for us. async with aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False), timeout=aiohttp.ClientTimeout(self.timeout), ) as session: # Some url may be invalid, so catch the exception response: aiohttp.ClientResponse try: response = await session.get(url) text = await response.text() except aiohttp.client_exceptions.InvalidURL: return [] # There may be some other exceptions, so catch them, # we don't want to stop the whole process except Exception: return [] absolute_paths = self._get_sub_links(text, url)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-5
return [] absolute_paths = self._get_sub_links(text, url) # Worker will be only called within the current function # Worker function will process the link # then recursively call get_child_links_recursive to process the children async def worker(link: str) -> Union[Document, None]: try: async with aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False), timeout=aiohttp.ClientTimeout(self.timeout), ) as session: response = await session.get(link) text = await response.text() extracted = self.extractor(text) if len(extracted) > 0: return Document( page_content=extracted, metadata=self._gen_metadata(text, link), ) else: return None # Despite the fact that we have filtered some links, # there may still be some invalid links, so catch the exception except aiohttp.client_exceptions.InvalidURL: return None # There may be some other exceptions, so catch them, # we don't want to stop the whole process except Exception: # print(e) return None # The coroutines that will be executed tasks = [] # Generate the tasks for link in absolute_paths: # Check all unvisited links if link not in visited: visited.add(link) tasks.append(worker(link)) # Get the not None results results = list( filter(lambda x: x is not None, await asyncio.gather(*tasks)) ) # Recursively call the function to get the children of the children sub_tasks = [] for link in absolute_paths: sub_tasks.append(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
2c8989a57408-6
sub_tasks = [] for link in absolute_paths: sub_tasks.append( self._async_get_child_links_recursive(link, visited, depth + 1) ) # sub_tasks returns coroutines of list, # so we need to flatten the list await asyncio.gather(*sub_tasks) flattened = [] next_results = await asyncio.gather(*sub_tasks) for sub_result in next_results: if isinstance(sub_result, Exception): # We don't want to stop the whole process, so just ignore it # Not standard html format or invalid url or 404 may cause this # But we can't do anything about it. continue if sub_result is not None: flattened += sub_result results += flattened return list(filter(lambda x: x is not None, results)) [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load web pages. When use_async is True, this function will not be lazy, but it will still work in the expected way, just not lazy.""" if self.use_async: results = asyncio.run(self._async_get_child_links_recursive(self.url)) if results is None: return iter([]) else: return iter(results) else: return self._get_child_links_recursive(self.url) [docs] def load(self) -> List[Document]: """Load web pages.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/recursive_url_loader.html
1394ec5ba77a-0
Source code for langchain.document_loaders.s3_file """Loading logic for loading documents from an AWS S3 file.""" import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class S3FileLoader(BaseLoader): """Loading logic for loading documents from an AWS S3 file.""" [docs] def __init__(self, bucket: str, key: str): """Initialize with bucket and key name. Args: bucket: The name of the S3 bucket. key: The key of the S3 object. """ self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" try: import boto3 except ImportError: raise ImportError( "Could not import `boto3` python package. " "Please install it with `pip install boto3`." ) s3 = boto3.client("s3") with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) s3.download_file(self.bucket, self.key, file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_file.html
e66f101762a1-0
Source code for langchain.document_loaders.toml import json from pathlib import Path from typing import Iterator, List, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class TomlLoader(BaseLoader): """ A TOML document loader that inherits from the BaseLoader class. This class can be initialized with either a single source file or a source directory containing TOML files. """ [docs] def __init__(self, source: Union[str, Path]): """Initialize the TomlLoader with a source file or directory.""" self.source = Path(source) [docs] def load(self) -> List[Document]: """Load and return all documents.""" return list(self.lazy_load()) [docs] def lazy_load(self) -> Iterator[Document]: """Lazily load the TOML documents from the source file or directory.""" import tomli if self.source.is_file() and self.source.suffix == ".toml": files = [self.source] elif self.source.is_dir(): files = list(self.source.glob("**/*.toml")) else: raise ValueError("Invalid source path or file type") for file_path in files: with file_path.open("r", encoding="utf-8") as file: content = file.read() try: data = tomli.loads(content) doc = Document( page_content=json.dumps(data), metadata={"source": str(file_path)}, ) yield doc except tomli.TOMLDecodeError as e: print(f"Error parsing TOML file {file_path}: {e}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/toml.html
3ce4dfcece68-0
Source code for langchain.document_loaders.chatgpt """Load conversations from ChatGPT data export""" import datetime import json from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]def concatenate_rows(message: dict, title: str) -> str: """ Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return "" sender = message["author"]["role"] if message["author"] else "unknown" text = message["content"]["parts"][0] date = datetime.datetime.fromtimestamp(message["create_time"]).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{title} - {sender} on {date}: {text}\n\n" [docs]class ChatGPTLoader(BaseLoader): """Load conversations from exported ChatGPT data.""" [docs] def __init__(self, log_file: str, num_logs: int = -1): """Initialize a class object. Args: log_file: Path to the log file num_logs: Number of logs to load. If 0, load all logs. """ self.log_file = log_file self.num_logs = num_logs [docs] def load(self) -> List[Document]: with open(self.log_file, encoding="utf8") as f: data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f) documents = [] for d in data: title = d["title"]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html
3ce4dfcece68-1
documents = [] for d in data: title = d["title"] messages = d["mapping"] text = "".join( [ concatenate_rows(messages[key]["message"], title) for idx, key in enumerate(messages) if not ( idx == 0 and messages[key]["message"]["author"]["role"] == "system" ) ] ) metadata = {"source": str(self.log_file)} documents.append(Document(page_content=text, metadata=metadata)) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html
e3aa77c39e4d-0
Source code for langchain.document_loaders.confluence """Load Data from a Confluence Space""" import logging from enum import Enum from io import BytesIO from typing import Any, Callable, Dict, List, Optional, Union from tenacity import ( before_sleep_log, retry, stop_after_attempt, wait_exponential, ) from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class ContentFormat(str, Enum): """Enumerator of the content formats of Confluence page.""" STORAGE = "body.storage" VIEW = "body.view" def get_content(self, page: dict) -> str: if self == ContentFormat.STORAGE: return page["body"]["storage"]["value"] elif self == ContentFormat.VIEW: return page["body"]["view"]["value"] raise ValueError("unknown content format") [docs]class ConfluenceLoader(BaseLoader): """Load Confluence pages. Port of https://llamahub.ai/l/confluence This currently supports username/api_key, Oauth2 login or personal access token authentication. Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned. You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel. Confluence API supports difference format of page content. The storage format is the
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-1
Confluence API supports difference format of page content. The storage format is the raw XML representation for storage. The view format is the HTML representation for viewing with macros are rendered as though it is viewed by users. You can pass a enum `content_format` argument to `load()` to specify the content format, this is set to `ContentFormat.STORAGE` by default. Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id> Example: .. code-block:: python from langchain.document_loaders import ConfluenceLoader loader = ConfluenceLoader( url="https://yoursite.atlassian.com/wiki", username="me", api_key="12345" ) documents = loader.load(space_key="SPACE",limit=50) :param url: _description_ :type url: str :param api_key: _description_, defaults to None :type api_key: str, optional :param username: _description_, defaults to None :type username: str, optional :param oauth2: _description_, defaults to {} :type oauth2: dict, optional :param token: _description_, defaults to None :type token: str, optional :param cloud: _description_, defaults to True :type cloud: bool, optional :param number_of_retries: How many times to retry, defaults to 3 :type number_of_retries: Optional[int], optional :param min_retry_seconds: defaults to 2 :type min_retry_seconds: Optional[int], optional
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-2
:type min_retry_seconds: Optional[int], optional :param max_retry_seconds: defaults to 10 :type max_retry_seconds: Optional[int], optional :param confluence_kwargs: additional kwargs to initialize confluence with :type confluence_kwargs: dict, optional :raises ValueError: Errors while validating input :raises ImportError: Required dependencies not installed. """ [docs] def __init__( self, url: str, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, cloud: Optional[bool] = True, number_of_retries: Optional[int] = 3, min_retry_seconds: Optional[int] = 2, max_retry_seconds: Optional[int] = 10, confluence_kwargs: Optional[dict] = None, ): confluence_kwargs = confluence_kwargs or {} errors = ConfluenceLoader.validate_init_args( url, api_key, username, oauth2, token ) if errors: raise ValueError(f"Error(s) while validating input: {errors}") self.base_url = url self.number_of_retries = number_of_retries self.min_retry_seconds = min_retry_seconds self.max_retry_seconds = max_retry_seconds try: from atlassian import Confluence # noqa: F401 except ImportError: raise ImportError( "`atlassian` package not found, please run " "`pip install atlassian-python-api`" ) if oauth2: self.confluence = Confluence(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-3
) if oauth2: self.confluence = Confluence( url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs ) elif token: self.confluence = Confluence( url=url, token=token, cloud=cloud, **confluence_kwargs ) else: self.confluence = Confluence( url=url, username=username, password=api_key, cloud=cloud, **confluence_kwargs, ) [docs] @staticmethod def validate_init_args( url: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, oauth2: Optional[dict] = None, token: Optional[str] = None, ) -> Union[List, None]: """Validates proper combinations of init arguments""" errors = [] if url is None: errors.append("Must provide `base_url`") if (api_key and not username) or (username and not api_key): errors.append( "If one of `api_key` or `username` is provided, " "the other must be as well." ) if (api_key or username) and oauth2: errors.append( "Cannot provide a value for `api_key` and/or " "`username` and provide a value for `oauth2`" ) if oauth2 and oauth2.keys() != [ "access_token", "access_token_secret", "consumer_key", "key_cert", ]: errors.append( "You have either omitted require keys or added extra "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-4
errors.append( "You have either omitted require keys or added extra " "keys to the oauth2 dictionary. key values should be " "`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`" ) if token and (api_key or username or oauth2): errors.append( "Cannot provide a value for `token` and a value for `api_key`, " "`username` or `oauth2`" ) if errors: return errors return None [docs] def load( self, space_key: Optional[str] = None, page_ids: Optional[List[str]] = None, label: Optional[str] = None, cql: Optional[str] = None, include_restricted_content: bool = False, include_archived_content: bool = False, include_attachments: bool = False, include_comments: bool = False, content_format: ContentFormat = ContentFormat.STORAGE, limit: Optional[int] = 50, max_pages: Optional[int] = 1000, ocr_languages: Optional[str] = None, keep_markdown_format: bool = False, ) -> List[Document]: """ :param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this label, defaults to None :type label: Optional[str], optional :param cql: CQL Expression, defaults to None
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-5
:param cql: CQL Expression, defaults to None :type cql: Optional[str], optional :param include_restricted_content: defaults to False :type include_restricted_content: bool, optional :param include_archived_content: Whether to include archived content, defaults to False :type include_archived_content: bool, optional :param include_attachments: defaults to False :type include_attachments: bool, optional :param include_comments: defaults to False :type include_comments: bool, optional :param content_format: Specify content format, defaults to ContentFormat.STORAGE :type content_format: ContentFormat :param limit: Maximum number of pages to retrieve per request, defaults to 50 :type limit: int, optional :param max_pages: Maximum number of pages to retrieve in total, defaults 1000 :type max_pages: int, optional :param ocr_languages: The languages to use for the Tesseract agent. To use a language, you'll first need to install the appropriate Tesseract language pack. :type ocr_languages: str, optional :param keep_markdown_format: Whether to keep the markdown format, defaults to False :type keep_markdown_format: bool :raises ValueError: _description_ :raises ImportError: _description_ :return: _description_ :rtype: List[Document] """ if not space_key and not page_ids and not label and not cql: raise ValueError( "Must specify at least one among `space_key`, `page_ids`, " "`label`, `cql` parameters." ) docs = [] if space_key: pages = self.paginate_request(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-6
docs = [] if space_key: pages = self.paginate_request( self.confluence.get_all_pages_from_space, space=space_key, limit=limit, max_pages=max_pages, status="any" if include_archived_content else "current", expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, keep_markdown_format, ) if label: pages = self.paginate_request( self.confluence.get_all_pages_by_label, label=label, limit=limit, max_pages=max_pages, ) ids_by_label = [page["id"] for page in pages] if page_ids: page_ids = list(set(page_ids + ids_by_label)) else: page_ids = list(set(ids_by_label)) if cql: pages = self.paginate_request( self._search_content_by_cql, cql=cql, limit=limit, max_pages=max_pages, include_archived_spaces=include_archived_content, expand=content_format.value, ) docs += self.process_pages( pages, include_restricted_content, include_attachments, include_comments, content_format, ocr_languages, keep_markdown_format, ) if page_ids: for page_id in page_ids: get_page = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ),
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-7
self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, # type: ignore[arg-type] min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(self.confluence.get_page_by_id) page = get_page(page_id=page_id, expand=content_format.value) if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, keep_markdown_format, ) docs.append(doc) return docs def _search_content_by_cql( self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any ) -> List[dict]: url = "rest/api/content/search" params: Dict[str, Any] = {"cql": cql} params.update(kwargs) if include_archived_spaces is not None: params["includeArchivedSpaces"] = include_archived_spaces response = self.confluence.get(url, params=params) return response.get("results", []) [docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List: """Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-8
seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the result key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """ max_pages = kwargs.pop("max_pages") docs: List[dict] = [] while len(docs) < max_pages: get_pages = retry( reraise=True, stop=stop_after_attempt( self.number_of_retries # type: ignore[arg-type] ), wait=wait_exponential( multiplier=1, min=self.min_retry_seconds, # type: ignore[arg-type] max=self.max_retry_seconds, # type: ignore[arg-type] ), before_sleep=before_sleep_log(logger, logging.WARNING), )(retrieval_method) batch = get_pages(**kwargs, start=len(docs)) if not batch: break docs.extend(batch) return docs[:max_pages] [docs] def is_public_page(self, page: dict) -> bool: """Check if a page is publicly accessible."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-9
"""Check if a page is publicly accessible.""" restrictions = self.confluence.get_all_restrictions_for_content(page["id"]) return ( page["status"] == "current" and not restrictions["read"]["restrictions"]["user"]["results"] and not restrictions["read"]["restrictions"]["group"]["results"] ) [docs] def process_pages( self, pages: List[dict], include_restricted_content: bool, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, keep_markdown_format: Optional[bool] = False, ) -> List[Document]: """Process a list of pages into a list of documents.""" docs = [] for page in pages: if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page( page, include_attachments, include_comments, content_format, ocr_languages, keep_markdown_format, ) docs.append(doc) return docs [docs] def process_page( self, page: dict, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str] = None, keep_markdown_format: Optional[bool] = False, ) -> Document: if keep_markdown_format: try: from markdownify import markdownify except ImportError: raise ImportError( "`markdownify` package not found, please run " "`pip install markdownify`" ) else: try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-10
"`pip install markdownify`" ) else: try: from bs4 import BeautifulSoup # type: ignore except ImportError: raise ImportError( "`beautifulsoup4` package not found, please run " "`pip install beautifulsoup4`" ) if include_attachments: attachment_texts = self.process_attachment(page["id"], ocr_languages) else: attachment_texts = [] if keep_markdown_format: # Use markdownify to keep the page Markdown style text = markdownify( page["body"]["storage"]["value"], heading_style="ATX" ) + "".join(attachment_texts) else: content = content_format.get_content(page) text = BeautifulSoup(content, "lxml").get_text(" ", strip=True) + "".join( attachment_texts ) if include_comments: comments = self.confluence.get_page_comments( page["id"], expand="body.view.value", depth="all" )["results"] comment_texts = [ BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text( " ", strip=True ) for comment in comments ] text = text + "".join(comment_texts) return Document( page_content=text, metadata={ "title": page["title"], "id": page["id"], "source": self.base_url.strip("/") + page["_links"]["webui"], }, ) [docs] def process_attachment( self, page_id: str, ocr_languages: Optional[str] = None, ) -> List[str]: try:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-11
) -> List[str]: try: from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`Pillow` package not found, " "please run `pip install Pillow`" ) # depending on setup you may also need to set the correct path for # poppler and tesseract attachments = self.confluence.get_attachments_from_content(page_id)["results"] texts = [] for attachment in attachments: media_type = attachment["metadata"]["mediaType"] absolute_url = self.base_url + attachment["_links"]["download"] title = attachment["title"] if media_type == "application/pdf": text = title + self.process_pdf(absolute_url, ocr_languages) elif ( media_type == "image/png" or media_type == "image/jpg" or media_type == "image/jpeg" ): text = title + self.process_image(absolute_url, ocr_languages) elif ( media_type == "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document" ): text = title + self.process_doc(absolute_url) elif media_type == "application/vnd.ms-excel": text = title + self.process_xls(absolute_url) elif media_type == "image/svg+xml": text = title + self.process_svg(absolute_url, ocr_languages) else: continue texts.append(text) return texts [docs] def process_pdf( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-12
try: import pytesseract # noqa: F401 from pdf2image import convert_from_bytes # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `pdf2image` package not found, " "please run `pip install pytesseract pdf2image`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: images = convert_from_bytes(response.content) except ValueError: return text for i, image in enumerate(images): image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f"Page {i + 1}:\n{image_text}\n\n" return text [docs] def process_image( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 except ImportError: raise ImportError( "`pytesseract` or `Pillow` package not found, " "please run `pip install pytesseract Pillow`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text try: image = Image.open(BytesIO(response.content)) except OSError: return text
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-13
image = Image.open(BytesIO(response.content)) except OSError: return text return pytesseract.image_to_string(image, lang=ocr_languages) [docs] def process_doc(self, link: str) -> str: try: import docx2txt # noqa: F401 except ImportError: raise ImportError( "`docx2txt` package not found, please run `pip install docx2txt`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text file_data = BytesIO(response.content) return docx2txt.process(file_data) [docs] def process_xls(self, link: str) -> str: import io import os try: import xlrd # noqa: F401 except ImportError: raise ImportError("`xlrd` package not found, please run `pip install xlrd`") try: import pandas as pd except ImportError: raise ImportError( "`pandas` package not found, please run `pip install pandas`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text filename = os.path.basename(link) # Getting the whole content of the url after filename, # Example: ".csv?version=2&modificationDate=1631800010678&cacheVersion=1&api=v2"
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-14
file_extension = os.path.splitext(filename)[1] if file_extension.startswith( ".csv" ): # if the extension found in the url is ".csv" content_string = response.content.decode("utf-8") df = pd.read_csv(io.StringIO(content_string)) text += df.to_string(index=False, header=False) + "\n\n" else: workbook = xlrd.open_workbook(file_contents=response.content) for sheet in workbook.sheets(): text += f"{sheet.name}:\n" for row in range(sheet.nrows): for col in range(sheet.ncols): text += f"{sheet.cell_value(row, col)}\t" text += "\n" text += "\n" return text [docs] def process_svg( self, link: str, ocr_languages: Optional[str] = None, ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 from reportlab.graphics import renderPM # noqa: F401 from svglib.svglib import svg2rlg # noqa: F401 except ImportError: raise ImportError( "`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, " "please run `pip install pytesseract Pillow reportlab svglib`" ) response = self.confluence.request(path=link, absolute=True) text = "" if ( response.status_code != 200 or response.content == b"" or response.content is None ): return text drawing = svg2rlg(BytesIO(response.content))
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
e3aa77c39e4d-15
): return text drawing = svg2rlg(BytesIO(response.content)) img_data = BytesIO() renderPM.drawToFile(drawing, img_data, fmt="PNG") img_data.seek(0) image = Image.open(img_data) return pytesseract.image_to_string(image, lang=ocr_languages)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html
f173fbeb6f6b-0
Source code for langchain.document_loaders.powerpoint """Loads PowerPoint files.""" import os from typing import List from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class UnstructuredPowerPointLoader(UnstructuredFileLoader): """Loader that uses unstructured to load PowerPoint files. Works with both .ppt and .pptx files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredPowerPointLoader loader = UnstructuredPowerPointLoader( "example.pptx", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-pptx """ def _get_elements(self) -> List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.file_utils.filetype import FileType, detect_filetype unstructured_version = tuple( [int(x) for x in __unstructured_version__.split(".")] ) # NOTE(MthwRobinson) - magic will raise an import error if the libmagic # system dependency isn't installed. If it's not installed, we'll just # check the file extension try: import magic # noqa: F401
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/powerpoint.html
f173fbeb6f6b-1
try: import magic # noqa: F401 is_ppt = detect_filetype(self.file_path) == FileType.PPT except ImportError: _, extension = os.path.splitext(str(self.file_path)) is_ppt = extension == ".ppt" if is_ppt and unstructured_version < (0, 4, 11): raise ValueError( f"You are on unstructured version {__unstructured_version__}. " "Partitioning .ppt files is only supported in unstructured>=0.4.11. " "Please upgrade the unstructured package and try again." ) if is_ppt: from unstructured.partition.ppt import partition_ppt return partition_ppt(filename=self.file_path, **self.unstructured_kwargs) else: from unstructured.partition.pptx import partition_pptx return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/powerpoint.html
b0de701ffa6a-0
Source code for langchain.document_loaders.evernote """Load documents from Evernote. https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c """ import hashlib import logging from base64 import b64decode from time import strptime from typing import Any, Dict, Iterator, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class EverNoteLoader(BaseLoader): """EverNote Loader. Loads an EverNote notebook export file e.g. my_notebook.enex into Documents. Instructions on producing this file can be found at https://help.evernote.com/hc/en-us/articles/209005557-Export-notes-and-notebooks-as-ENEX-or-HTML Currently only the plain text in the note is extracted and stored as the contents of the Document, any non content metadata (e.g. 'author', 'created', 'updated' etc. but not 'content-raw' or 'resource') tags on the note will be extracted and stored as metadata on the Document. Args: file_path (str): The path to the notebook export with a .enex extension load_single_document (bool): Whether or not to concatenate the content of all notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. """ # noqa: E501 [docs] def __init__(self, file_path: str, load_single_document: bool = True): """Initialize with file path.""" self.file_path = file_path
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
b0de701ffa6a-1
"""Initialize with file path.""" self.file_path = file_path self.load_single_document = load_single_document [docs] def load(self) -> List[Document]: """Load documents from EverNote export file.""" documents = [ Document( page_content=note["content"], metadata={ **{ key: value for key, value in note.items() if key not in ["content", "content-raw", "resource"] }, **{"source": self.file_path}, }, ) for note in self._parse_note_xml(self.file_path) if note.get("content") is not None ] if not self.load_single_document: return documents return [ Document( page_content="".join([document.page_content for document in documents]), metadata={"source": self.file_path}, ) ] @staticmethod def _parse_content(content: str) -> str: try: import html2text return html2text.html2text(content).strip() except ImportError as e: raise ImportError( "Could not import `html2text`. Although it is not a required package " "to use Langchain, using the EverNote loader requires `html2text`. " "Please install `html2text` via `pip install html2text` and try again." ) from e @staticmethod def _parse_resource(resource: list) -> dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == "data": # Sometimes elem.text is None
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
b0de701ffa6a-2
if elem.tag == "data": # Sometimes elem.text is None rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b"" rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict @staticmethod def _parse_note(note: List, prefix: Optional[str] = None) -> dict: note_dict: Dict[str, Any] = {} resources = [] def add_prefix(element_tag: str) -> str: if prefix is None: return element_tag return f"{prefix}.{element_tag}" for elem in note: if elem.tag == "content": note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text) # A copy of original content note_dict["content-raw"] = elem.text elif elem.tag == "resource": resources.append(EverNoteLoader._parse_resource(elem)) elif elem.tag == "created" or elem.tag == "updated": note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ") elif elem.tag == "note-attributes": additional_attributes = EverNoteLoader._parse_note( elem, elem.tag ) # Recursively enter the note-attributes tag note_dict.update(additional_attributes) else: note_dict[elem.tag] = elem.text if len(resources) > 0: note_dict["resource"] = resources return {add_prefix(key): value for key, value in note_dict.items()} @staticmethod
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
b0de701ffa6a-3
@staticmethod def _parse_note_xml(xml_file: str) -> Iterator[Dict[str, Any]]: """Parse Evernote xml.""" # Without huge_tree set to True, parser may complain about huge text node # Try to recover, because there may be "&nbsp;", which will cause # "XMLSyntaxError: Entity 'nbsp' not defined" try: from lxml import etree except ImportError as e: logger.error( "Could not import `lxml`. Although it is not a required package to use " "Langchain, using the EverNote loader requires `lxml`. Please install " "`lxml` via `pip install lxml` and try again." ) raise e context = etree.iterparse( xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True ) for action, elem in context: if elem.tag == "note": yield EverNoteLoader._parse_note(elem)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html
ccc0283c2e38-0
Source code for langchain.document_loaders.azure_blob_storage_file import os import tempfile from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class AzureBlobStorageFileLoader(BaseLoader): """Loading Documents from Azure Blob Storage.""" [docs] def __init__(self, conn_str: str, container: str, blob_name: str): """Initialize with connection string, container and blob name.""" self.conn_str = conn_str """Connection string for Azure Blob Storage.""" self.container = container """Container name.""" self.blob = blob_name """Blob name.""" [docs] def load(self) -> List[Document]: """Load documents.""" try: from azure.storage.blob import BlobClient except ImportError as exc: raise ImportError( "Could not import azure storage blob python package. " "Please install it with `pip install azure-storage-blob`." ) from exc client = BlobClient.from_connection_string( conn_str=self.conn_str, container_name=self.container, blob_name=self.blob ) with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.container}/{self.blob}" os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(f"{file_path}", "wb") as file: blob_data = client.download_blob() blob_data.readinto(file) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html
ce2c0ad5ea75-0
Source code for langchain.document_loaders.xorbits from typing import Any, Iterator, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class XorbitsLoader(BaseLoader): """Load Xorbits DataFrame.""" [docs] def __init__(self, data_frame: Any, page_content_column: str = "text"): """Initialize with dataframe object. Requirements: Must have xorbits installed. You can install with `pip install xorbits`. Args: data_frame: Xorbits DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text". """ try: import xorbits.pandas as pd except ImportError as e: raise ImportError( "Cannot import xorbits, please install with 'pip install xorbits'." ) from e if not isinstance(data_frame, pd.DataFrame): raise ValueError( f"Expected data_frame to be a xorbits.pandas.DataFrame, \ got {type(data_frame)}" ) self.data_frame = data_frame self.page_content_column = page_content_column [docs] def lazy_load(self) -> Iterator[Document]: """Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata) [docs] def load(self) -> List[Document]: """Load full dataframe.""" return list(self.lazy_load())
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/xorbits.html
502e08f63d6f-0
Source code for langchain.document_loaders.obsidian """Loads Obsidian directory dump.""" import re from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]class ObsidianLoader(BaseLoader): """Loads Obsidian files from disk.""" FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL) [docs] def __init__( self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True ): """Initialize with a path. Args: path: Path to the directory containing the Obsidian files. encoding: Charset encoding, defaults to "UTF-8" collect_metadata: Whether to collect metadata from the front matter. Defaults to True. """ self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata def _parse_front_matter(self, content: str) -> dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) front_matter = {} if match: lines = match.group(1).split("\n") for line in lines: if ":" in line: key, value = line.split(":", 1) front_matter[key.strip()] = value.strip() else: # Skip lines without a colon continue return front_matter def _remove_front_matter(self, content: str) -> str: """Remove front matter metadata from the given content."""
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html
502e08f63d6f-1
"""Remove front matter metadata from the given content.""" if not self.collect_metadata: return content return self.FRONT_MATTER_REGEX.sub("", content) [docs] def load(self) -> List[Document]: """Load documents.""" ps = list(Path(self.file_path).glob("**/*.md")) docs = [] for p in ps: with open(p, encoding=self.encoding) as f: text = f.read() front_matter = self._parse_front_matter(text) text = self._remove_front_matter(text) metadata = { "source": str(p.name), "path": str(p), "created": p.stat().st_ctime, "last_modified": p.stat().st_mtime, "last_accessed": p.stat().st_atime, **front_matter, } docs.append(Document(page_content=text, metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html
8d5df2fd4ac1-0
Source code for langchain.document_loaders.rss """Loader that uses unstructured to load HTML files.""" import logging from typing import Any, Iterator, List, Optional, Sequence from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.news import NewsURLLoader logger = logging.getLogger(__name__) [docs]class RSSFeedLoader(BaseLoader): """Loader that uses newspaper to load news articles from RSS feeds. Args: urls: URLs for RSS feeds to load. Each articles in the feed is loaded into its own document. opml: OPML file to load feed urls from. Only one of urls or opml should be provided. The value can be a URL string, or OPML markup contents as byte or string. continue_on_failure: If True, continue loading documents even if loading fails for a particular URL. show_progress_bar: If True, use tqdm to show a loading progress bar. Requires tqdm to be installed, ``pip install tqdm``. **newsloader_kwargs: Any additional named arguments to pass to NewsURLLoader. Example: .. code-block:: python from langchain.document_loaders import RSSFeedLoader loader = RSSFeedLoader( urls=["<url-1>", "<url-2>"], ) docs = loader.load() The loader uses feedparser to parse RSS feeds. The feedparser library is not installed by default so you should install it if using this loader: https://pythonhosted.org/feedparser/ If you use OPML, you should also install listparser: https://pythonhosted.org/listparser/ Finally, newspaper is used to process each article:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rss.html
8d5df2fd4ac1-1
Finally, newspaper is used to process each article: https://newspaper.readthedocs.io/en/latest/ """ # noqa: E501 [docs] def __init__( self, urls: Optional[Sequence[str]] = None, opml: Optional[str] = None, continue_on_failure: bool = True, show_progress_bar: bool = False, **newsloader_kwargs: Any, ) -> None: """Initialize with urls or OPML.""" if (urls is None) == ( opml is None ): # This is True if both are None or neither is None raise ValueError( "Provide either the urls or the opml argument, but not both." ) self.urls = urls self.opml = opml self.continue_on_failure = continue_on_failure self.show_progress_bar = show_progress_bar self.newsloader_kwargs = newsloader_kwargs [docs] def load(self) -> List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter) @property def _get_urls(self) -> Sequence[str]: if self.urls: return self.urls try: import listparser except ImportError as e: raise ImportError( "Package listparser must be installed if the opml arg is used. "
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rss.html
8d5df2fd4ac1-2
"Package listparser must be installed if the opml arg is used. " "Please install with 'pip install listparser' or use the " "urls arg instead." ) from e rss = listparser.parse(self.opml) return [feed.url for feed in rss.feeds] [docs] def lazy_load(self) -> Iterator[Document]: try: import feedparser # noqa:F401 except ImportError: raise ImportError( "feedparser package not found, please install it with " "`pip install feedparser`" ) for url in self._get_urls: try: feed = feedparser.parse(url) if getattr(feed, "bozo", False): raise ValueError( f"Error fetching {url}, exception: {feed.bozo_exception}" ) except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching {url}, exception: {e}") continue else: raise e try: for entry in feed.entries: loader = NewsURLLoader( urls=[entry.link], **self.newsloader_kwargs, ) article = loader.load()[0] article.metadata["feed"] = url yield article except Exception as e: if self.continue_on_failure: logger.error(f"Error processing entry {entry.link}, exception: {e}") continue else: raise e
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/rss.html
a50dbbea5e10-0
Source code for langchain.document_loaders.hn """Loads HN.""" from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.web_base import WebBaseLoader [docs]class HNLoader(WebBaseLoader): """Load Hacker News data from either main page results or the comments page.""" [docs] def load(self) -> List[Document]: """Get important HN webpage information. HN webpage components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ soup_info = self.scrape() if "item" in self.web_path: return self.load_comments(soup_info) else: return self.load_results(soup_info) [docs] def load_comments(self, soup_info: Any) -> List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get("title") return [ Document( page_content=comment.text.strip(), metadata={"source": self.web_path, "title": title}, ) for comment in comments ] [docs] def load_results(self, soup: Any) -> List[Document]: """Load items from an HN page.""" items = soup.select("tr[class='athing']") documents = [] for lineItem in items: ranking = lineItem.select_one("span[class='rank']").text link = lineItem.find("span", {"class": "titleline"}).find("a").get("href")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hn.html
a50dbbea5e10-1
title = lineItem.find("span", {"class": "titleline"}).text.strip() metadata = { "source": self.web_path, "title": title, "link": link, "ranking": ranking, } documents.append( Document( page_content=title, link=link, ranking=ranking, metadata=metadata ) ) return documents
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/hn.html
4f4047bbfecd-0
Source code for langchain.document_loaders.telegram """Loads Telegram chat json dump.""" from __future__ import annotations import asyncio import json from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Union from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter if TYPE_CHECKING: import pandas as pd from telethon.hints import EntityLike [docs]def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used.""" date = row["date"] sender = row["from"] text = row["text"] return f"{sender} on {date}: {text}\n\n" [docs]class TelegramChatFileLoader(BaseLoader): """Loads Telegram chat json directory dump.""" [docs] def __init__(self, path: str): """Initialize with a path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message["type"] == "message" and isinstance(message["text"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)] [docs]def text_to_docs(text: Union[str, List[str]]) -> List[Document]: """Converts a string or list of strings to a list of Documents with metadata.""" if isinstance(text, str):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
4f4047bbfecd-1
if isinstance(text, str): # Take a single string as one page text = [text] page_docs = [Document(page_content=page) for page in text] # Add page numbers as metadata for i, doc in enumerate(page_docs): doc.metadata["page"] = i + 1 # Split pages into chunks doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter( chunk_size=800, separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""], chunk_overlap=20, ) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document( page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i} ) # Add sources a metadata doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks [docs]class TelegramChatApiLoader(BaseLoader): """Loads Telegram chat json directory dump.""" [docs] def __init__( self, chat_entity: Optional[EntityLike] = None, api_id: Optional[int] = None, api_hash: Optional[str] = None, username: Optional[str] = None, file_path: str = "telegram_data.json", ): """Initialize with API parameters. Args: chat_entity: The chat entity to fetch data from. api_id: The API ID. api_hash: The API hash. username: The username. file_path: The file path to save the data to. Defaults to
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
4f4047bbfecd-2
file_path: The file path to save the data to. Defaults to "telegram_data.json". """ self.chat_entity = chat_entity self.api_id = api_id self.api_hash = api_hash self.username = username self.file_path = file_path [docs] async def fetch_data_from_telegram(self) -> None: """Fetch data from Telegram API and save it as a JSON file.""" from telethon.sync import TelegramClient data = [] async with TelegramClient(self.username, self.api_id, self.api_hash) as client: async for message in client.iter_messages(self.chat_entity): is_reply = message.reply_to is not None reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None data.append( { "sender_id": message.sender_id, "text": message.text, "date": message.date.isoformat(), "message.id": message.id, "is_reply": is_reply, "reply_to_id": reply_to_id, } ) with open(self.file_path, "w", encoding="utf-8") as f: json.dump(data, f, ensure_ascii=False, indent=4) def _get_message_threads(self, data: pd.DataFrame) -> dict: """Create a dictionary of message threads from the given data. Args: data (pd.DataFrame): A DataFrame containing the conversation \ data with columns: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: dict: A dictionary where the key is the parent message ID and \ the value is a list of message IDs in ascending order.
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
4f4047bbfecd-3
the value is a list of message IDs in ascending order. """ def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]: """ Recursively find all replies to a given parent message ID. Args: parent_id (int): The parent message ID. reply_data (pd.DataFrame): A DataFrame containing reply messages. Returns: list: A list of message IDs that are replies to the parent message ID. """ # Find direct replies to the parent message ID direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][ "message.id" ].tolist() # Recursively find replies to the direct replies all_replies = [] for reply_id in direct_replies: all_replies += [reply_id] + find_replies(reply_id, reply_data) return all_replies # Filter out parent messages parent_messages = data[~data["is_reply"]] # Filter out reply messages and drop rows with NaN in 'reply_to_id' reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"]) # Convert 'reply_to_id' to integer reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int) # Create a dictionary of message threads with parent message IDs as keys and \ # lists of reply message IDs as values message_threads = { parent_id: [parent_id] + find_replies(parent_id, reply_messages) for parent_id in parent_messages["message.id"] } return message_threads def _combine_message_texts( self, message_threads: Dict[int, List[int]], data: pd.DataFrame ) -> str:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
4f4047bbfecd-4
) -> str: """ Combine the message texts for each parent message ID based \ on the list of message threads. Args: message_threads (dict): A dictionary where the key is the parent message \ ID and the value is a list of message IDs in ascending order. data (pd.DataFrame): A DataFrame containing the conversation data: - message.sender_id - text - date - message.id - is_reply - reply_to_id Returns: str: A combined string of message texts sorted by date. """ combined_text = "" # Iterate through sorted parent message IDs for parent_id, message_ids in message_threads.items(): # Get the message texts for the message IDs and sort them by date message_texts = ( data[data["message.id"].isin(message_ids)] .sort_values(by="date")["text"] .tolist() ) message_texts = [str(elem) for elem in message_texts] # Combine the message texts combined_text += " ".join(message_texts) + ".\n" return combined_text.strip() [docs] def load(self) -> List[Document]: """Load documents.""" if self.chat_entity is not None: try: import nest_asyncio nest_asyncio.apply() asyncio.run(self.fetch_data_from_telegram()) except ImportError: raise ImportError( """`nest_asyncio` package not found. please install with `pip install nest_asyncio` """ ) p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) try: import pandas as pd
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
4f4047bbfecd-5
d = json.load(f) try: import pandas as pd except ImportError: raise ImportError( """`pandas` package not found. please install with `pip install pandas` """ ) normalized_messages = pd.json_normalize(d) df = pd.DataFrame(normalized_messages) message_threads = self._get_message_threads(df) combined_texts = self._combine_message_texts(message_threads, df) return text_to_docs(combined_texts)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/telegram.html
c0735c81ab72-0
Source code for langchain.document_loaders.arxiv from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utilities.arxiv import ArxivAPIWrapper [docs]class ArxivLoader(BaseLoader): """Loads a query result from arxiv.org into a list of Documents. The loader converts the original PDF format into the text. """ [docs] def __init__( self, query: str, load_max_docs: Optional[int] = 100, load_all_available_meta: Optional[bool] = False, ): self.query = query """The query to be passed to the arxiv.org API.""" self.load_max_docs = load_max_docs """The maximum number of documents to load.""" self.load_all_available_meta = load_all_available_meta """Whether to load all available metadata.""" [docs] def load(self) -> List[Document]: arxiv_client = ArxivAPIWrapper( load_max_docs=self.load_max_docs, load_all_available_meta=self.load_all_available_meta, ) docs = arxiv_client.load(self.query) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/arxiv.html
7757c6036a66-0
Source code for langchain.document_loaders.obs_file # coding:utf-8 import os import tempfile from typing import Any, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.unstructured import UnstructuredFileLoader [docs]class OBSFileLoader(BaseLoader): """Loader for Huawei OBS file.""" [docs] def __init__( self, bucket: str, key: str, client: Any = None, endpoint: str = "", config: Optional[dict] = None, ) -> None: """Initialize the OBSFileLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. key (str): The name of the object in the OBS bucket. client (ObsClient, optional): An instance of the ObsClient to connect to OBS. endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided. config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials).
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_file.html
7757c6036a66-1
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. Raises: ValueError: If the `esdk-obs-python` package is not installed. TypeError: If the provided `client` is not an instance of ObsClient. ValueError: If `client` is not provided, but `endpoint` is missing. Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSFileLoader with a new client: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config) ``` To create a new OBSFileLoader with an existing client: ``` from obs import ObsClient # Assuming you have an existing ObsClient object 'obs_client' obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client) ``` To create a new OBSFileLoader without an existing client: ``` obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url") ``` """ # noqa: E501 try: from obs import ObsClient except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_file.html
7757c6036a66-2
from obs import ObsClient except ImportError: raise ImportError( "Could not import esdk-obs-python python package. " "Please install it with `pip install esdk-obs-python`." ) if not client: if not endpoint: raise ValueError("Either OBSClient or endpoint must be provided.") if not config: config = dict() if config.get("get_token_from_ecs"): client = ObsClient(server=endpoint, security_provider_policy="ECS") else: client = ObsClient( access_key_id=config.get("ak"), secret_access_key=config.get("sk"), security_token=config.get("token"), server=endpoint, ) if not isinstance(client, ObsClient): raise TypeError("Client must be ObsClient type") self.client = client self.bucket = bucket self.key = key [docs] def load(self) -> List[Document]: """Load documents.""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f"{temp_dir}/{self.bucket}/{self.key}" os.makedirs(os.path.dirname(file_path), exist_ok=True) # Download the file to a destination self.client.downloadFile( bucketName=self.bucket, objectKey=self.key, downloadFile=file_path ) loader = UnstructuredFileLoader(file_path) return loader.load()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/obs_file.html
70e3630851c4-0
Source code for langchain.document_loaders.pdf """Loads PDF files.""" import json import logging import os import tempfile import time from abc import ABC from io import StringIO from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional, Sequence, Union from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers.pdf import ( AmazonTextractPDFParser, PDFMinerParser, PDFPlumberParser, PyMuPDFParser, PyPDFium2Parser, PyPDFParser, ) from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__file__) [docs]class UnstructuredPDFLoader(UnstructuredFileLoader): """Loader that uses unstructured to load PDF files. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredPDFLoader loader = UnstructuredPDFLoader( "example.pdf", mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition-pdf """
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-1
""" def _get_elements(self) -> List: from unstructured.partition.pdf import partition_pdf return partition_pdf(filename=self.file_path, **self.unstructured_kwargs) [docs]class BasePDFLoader(BaseLoader, ABC): """Base loader class for PDF files. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, use it, then clean up the temporary file after completion """ [docs] def __init__(self, file_path: str): """Initialize with a file path.""" self.file_path = file_path self.web_path = None if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) # If the file is a web path or S3, download it to a temporary file, and use that if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): self.temp_dir = tempfile.TemporaryDirectory() _, suffix = os.path.splitext(self.file_path) temp_pdf = os.path.join(self.temp_dir.name, f"tmp{suffix}") if self._is_s3_url(self.file_path): self.web_path = self.file_path else: r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path with open(temp_pdf, mode="wb") as f: f.write(r.content) self.file_path = str(temp_pdf) elif not os.path.isfile(self.file_path):
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-2
elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None: if hasattr(self, "temp_dir"): self.temp_dir.cleanup() @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) @staticmethod def _is_s3_url(url: str) -> bool: """check if the url is S3""" try: result = urlparse(url) if result.scheme == "s3" and result.netloc: return True return False except ValueError: return False @property def source(self) -> str: return self.web_path if self.web_path is not None else self.file_path [docs]class OnlinePDFLoader(BasePDFLoader): """Loads online PDFs.""" [docs] def load(self) -> List[Document]: """Load documents.""" loader = UnstructuredPDFLoader(str(self.file_path)) return loader.load() [docs]class PyPDFLoader(BasePDFLoader): """Loads a PDF with pypdf and chunks at character level. Loader also stores page numbers in metadata. """ [docs] def __init__( self, file_path: str, password: Optional[Union[str, bytes]] = None ) -> None: """Initialize with a file path.""" try: import pypdf # noqa:F401 except ImportError: raise ImportError(
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-3
except ImportError: raise ImportError( "pypdf package not found, please install it with " "`pip install pypdf`" ) self.parser = PyPDFParser(password=password) super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFium2Loader(BasePDFLoader): """Loads a PDF with pypdfium2 and chunks at character level.""" [docs] def __init__(self, file_path: str): """Initialize with a file path.""" super().__init__(file_path) self.parser = PyPDFium2Parser() [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PyPDFDirectoryLoader(BaseLoader): """Loads a directory with PDF files with pypdf and chunks at character level. Loader also stores page numbers in metadata. """ [docs] def __init__( self, path: str, glob: str = "**/[!.]*.pdf", silent_errors: bool = False, load_hidden: bool = False, recursive: bool = False,
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-4
load_hidden: bool = False, recursive: bool = False, ): self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors @staticmethod def _is_visible(path: Path) -> bool: return not any(part.startswith(".") for part in path.parts) [docs] def load(self) -> List[Document]: p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i)) sub_docs = loader.load() for doc in sub_docs: doc.metadata["source"] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs [docs]class PDFMinerLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files.""" [docs] def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: from pdfminer.high_level import extract_text # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) self.parser = PDFMinerParser() [docs] def load(self) -> List[Document]:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-5
[docs] def load(self) -> List[Document]: """Eagerly load the content.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazily load documents.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) [docs]class PDFMinerPDFasHTMLLoader(BasePDFLoader): """Loader that uses PDFMiner to load PDF files as HTML content.""" [docs] def __init__(self, file_path: str): """Initialize with a file path.""" try: from pdfminer.high_level import extract_text_to_fp # noqa:F401 except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) [docs] def load(self) -> List[Document]: """Load file.""" from pdfminer.high_level import extract_text_to_fp from pdfminer.layout import LAParams from pdfminer.utils import open_filename output_string = StringIO() with open_filename(self.file_path, "rb") as fp: extract_text_to_fp( fp, # type: ignore[arg-type] output_string, codec="", laparams=LAParams(), output_type="html", ) metadata = {"source": self.file_path} return [Document(page_content=output_string.getvalue(), metadata=metadata)] [docs]class PyMuPDFLoader(BasePDFLoader): """Loader that uses PyMuPDF to load PDF files.""" [docs] def __init__(self, file_path: str) -> None:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-6
[docs] def __init__(self, file_path: str) -> None: """Initialize with a file path.""" try: import fitz # noqa:F401 except ImportError: raise ImportError( "`PyMuPDF` package not found, please install it with " "`pip install pymupdf`" ) super().__init__(file_path) [docs] def load(self, **kwargs: Optional[Any]) -> List[Document]: """Load file.""" parser = PyMuPDFParser(text_kwargs=kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) # MathpixPDFLoader implementation taken largely from Daniel Gross's: # https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21 [docs]class MathpixPDFLoader(BasePDFLoader): """This class uses Mathpix service to load PDF files.""" [docs] def __init__( self, file_path: str, processed_file_format: str = "mmd", max_wait_time_seconds: int = 500, should_clean_pdf: bool = False, **kwargs: Any, ) -> None: """Initialize with a file path. Args: file_path: a file for loading. processed_file_format: a format of the processed file. Default is "mmd". max_wait_time_seconds: a maximum time to wait for the response from the server. Default is 500. should_clean_pdf: a flag to clean the PDF file. Default is False. **kwargs: additional keyword arguments. """ super().__init__(file_path)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-7
""" super().__init__(file_path) self.mathpix_api_key = get_from_dict_or_env( kwargs, "mathpix_api_key", "MATHPIX_API_KEY" ) self.mathpix_api_id = get_from_dict_or_env( kwargs, "mathpix_api_id", "MATHPIX_API_ID" ) self.processed_file_format = processed_file_format self.max_wait_time_seconds = max_wait_time_seconds self.should_clean_pdf = should_clean_pdf @property def headers(self) -> dict: return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key} @property def url(self) -> str: return "https://api.mathpix.com/v3/pdf" @property def data(self) -> dict: options = {"conversion_formats": {self.processed_file_format: True}} return {"options_json": json.dumps(options)} [docs] def send_pdf(self) -> str: with open(self.file_path, "rb") as f: files = {"file": f} response = requests.post( self.url, headers=self.headers, files=files, data=self.data ) response_data = response.json() if "pdf_id" in response_data: pdf_id = response_data["pdf_id"] return pdf_id else: raise ValueError("Unable to send PDF to Mathpix.") [docs] def wait_for_processing(self, pdf_id: str) -> None: """Wait for processing to complete. Args: pdf_id: a PDF id. Returns: None """ url = self.url + "/" + pdf_id
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-8
Returns: None """ url = self.url + "/" + pdf_id for _ in range(0, self.max_wait_time_seconds, 5): response = requests.get(url, headers=self.headers) response_data = response.json() status = response_data.get("status", None) if status == "completed": return elif status == "error": raise ValueError("Unable to retrieve PDF from Mathpix") else: print(f"Status: {status}, waiting for processing to complete") time.sleep(5) raise TimeoutError [docs] def get_processed_pdf(self, pdf_id: str) -> str: self.wait_for_processing(pdf_id) url = f"{self.url}/{pdf_id}.{self.processed_file_format}" response = requests.get(url, headers=self.headers) return response.content.decode("utf-8") [docs] def clean_pdf(self, contents: str) -> str: """Clean the PDF file. Args: contents: a PDF file contents. Returns: """ contents = "\n".join( [line for line in contents.split("\n") if not line.startswith("![]")] ) # replace \section{Title} with # Title contents = contents.replace("\\section{", "# ").replace("}", "") # replace the "\" slash that Mathpix adds to escape $, %, (, etc. contents = ( contents.replace(r"\$", "$") .replace(r"\%", "%") .replace(r"\(", "(") .replace(r"\)", ")") ) return contents [docs] def load(self) -> List[Document]: pdf_id = self.send_pdf()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-9
pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {"source": self.source, "file_path": self.source} return [Document(page_content=contents, metadata=metadata)] [docs]class PDFPlumberLoader(BasePDFLoader): """Loader that uses pdfplumber to load PDF files.""" [docs] def __init__( self, file_path: str, text_kwargs: Optional[Mapping[str, Any]] = None ) -> None: """Initialize with a file path.""" try: import pdfplumber # noqa:F401 except ImportError: raise ImportError( "pdfplumber package not found, please install it with " "`pip install pdfplumber`" ) super().__init__(file_path) self.text_kwargs = text_kwargs or {} [docs] def load(self) -> List[Document]: """Load file.""" parser = PDFPlumberParser(text_kwargs=self.text_kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) [docs]class AmazonTextractPDFLoader(BasePDFLoader): """Loads a PDF document from local file system, HTTP or S3. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Amazon Textract service. Example:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-10
access the Amazon Textract service. Example: .. code-block:: python from langchain.document_loaders import AmazonTextractPDFLoader loader = AmazonTextractPDFLoader( file_path="s3://pdfs/myfile.pdf" ) document = loader.load() """ [docs] def __init__( self, file_path: str, textract_features: Optional[Sequence[str]] = None, client: Optional[Any] = None, credentials_profile_name: Optional[str] = None, region_name: Optional[str] = None, endpoint_url: Optional[str] = None, ) -> None: """Initialize the loader. Args: file_path: A file, url or s3 path for input file textract_features: Features to be used for extraction, each feature should be passed as a str that conforms to the enum `Textract_Features`, see `amazon-textract-caller` pkg client: boto3 textract client (Optional) credentials_profile_name: AWS profile name, if not default (Optional) region_name: AWS region, eg us-east-1 (Optional) endpoint_url: endpoint url for the textract service (Optional) """ super().__init__(file_path) try: import textractcaller as tc # noqa: F401 except ImportError: raise ModuleNotFoundError( "Could not import amazon-textract-caller python package. " "Please install it with `pip install amazon-textract-caller`." ) if textract_features: features = [tc.Textract_Features[x] for x in textract_features] else: features = []
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-11
else: features = [] if credentials_profile_name or region_name or endpoint_url: try: import boto3 if credentials_profile_name is not None: session = boto3.Session(profile_name=credentials_profile_name) else: # use default credentials session = boto3.Session() client_params = {} if region_name: client_params["region_name"] = region_name if endpoint_url: client_params["endpoint_url"] = endpoint_url client = session.client("textract", **client_params) except ImportError: raise ModuleNotFoundError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e self.parser = AmazonTextractPDFParser(textract_features=features, client=client) [docs] def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) [docs] def lazy_load( self, ) -> Iterator[Document]: """Lazy load documents""" # the self.file_path is local, but the blob has to include # the S3 location if the file originated from S3 for multi-page documents # raises ValueError when multi-page and not on S3""" if self.web_path and self._is_s3_url(self.web_path): blob = Blob(path=self.web_path) else: blob = Blob.from_path(self.file_path)
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
70e3630851c4-12
else: blob = Blob.from_path(self.file_path) if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1: raise ValueError( f"the file {blob.path} is a multi-page document, \ but not stored on S3. \ Textract requires multi-page documents to be on S3." ) yield from self.parser.parse(blob) @staticmethod def _get_number_of_pages(blob: Blob) -> int: try: import pypdf from PIL import Image, ImageSequence except ImportError: raise ModuleNotFoundError( "Could not import pypdf or Pilloe python package. " "Please install it with `pip install pypdf Pillow`." ) if blob.mimetype == "application/pdf": with blob.as_bytes_io() as input_pdf_file: pdf_reader = pypdf.PdfReader(input_pdf_file) return len(pdf_reader.pages) elif blob.mimetype == "image/tiff": num_pages = 0 img = Image.open(blob.as_bytes()) for _, _ in enumerate(ImageSequence.Iterator(img)): num_pages += 1 return num_pages elif blob.mimetype in ["image/png", "image/jpeg"]: return 1 else: raise ValueError(f"unsupported mime type: {blob.mimetype}")
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/pdf.html
9eb948990088-0
Source code for langchain.document_loaders.stripe """Loader that fetches data from Stripe""" import json import urllib.request from typing import List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.utils import get_from_env, stringify_dict STRIPE_ENDPOINTS = { "balance_transactions": "https://api.stripe.com/v1/balance_transactions", "charges": "https://api.stripe.com/v1/charges", "customers": "https://api.stripe.com/v1/customers", "events": "https://api.stripe.com/v1/events", "refunds": "https://api.stripe.com/v1/refunds", "disputes": "https://api.stripe.com/v1/disputes", } [docs]class StripeLoader(BaseLoader): """Loader that fetches data from Stripe.""" [docs] def __init__(self, resource: str, access_token: Optional[str] = None) -> None: """Initialize with a resource and an access token. Args: resource: The resource. access_token: The access token. """ self.resource = resource access_token = access_token or get_from_env( "access_token", "STRIPE_ACCESS_TOKEN" ) self.headers = {"Authorization": f"Bearer {access_token}"} def _make_request(self, url: str) -> List[Document]: request = urllib.request.Request(url, headers=self.headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) text = stringify_dict(json_data) metadata = {"source": url} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
9eb948990088-1
return [Document(page_content=text, metadata=metadata)] def _get_resource(self) -> List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint) [docs] def load(self) -> List[Document]: return self._get_resource()
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/stripe.html
8eca11891056-0
Source code for langchain.document_loaders.url """Loader that uses unstructured to load HTML files.""" import logging from typing import Any, List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader logger = logging.getLogger(__name__) [docs]class UnstructuredURLLoader(BaseLoader): """Loader that use Unstructured to load files from remote URLs. Use the unstructured partition function to detect the MIME type and route the file to the appropriate partitioner. You can run the loader in one of two modes: "single" and "elements". If you use "single" mode, the document will be returned as a single langchain Document object. If you use "elements" mode, the unstructured library will split the document into elements such as Title and NarrativeText. You can pass in additional unstructured kwargs after mode to apply different unstructured settings. Examples -------- from langchain.document_loaders import UnstructuredURLLoader loader = UnstructuredURLLoader( ursl=["<url-1>", "<url-2>"], mode="elements", strategy="fast", ) docs = loader.load() References ---------- https://unstructured-io.github.io/unstructured/bricks.html#partition """ [docs] def __init__( self, urls: List[str], continue_on_failure: bool = True, mode: str = "single", show_progress_bar: bool = False, **unstructured_kwargs: Any, ): """Initialize with file path.""" try: import unstructured # noqa:F401 from unstructured.__version__ import __version__ as __unstructured_version__
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
8eca11891056-1
from unstructured.__version__ import __version__ as __unstructured_version__ self.__version = __unstructured_version__ except ImportError: raise ImportError( "unstructured package not found, please install it with " "`pip install unstructured`" ) self._validate_mode(mode) self.mode = mode headers = unstructured_kwargs.pop("headers", {}) if len(headers.keys()) != 0: warn_about_headers = False if self.__is_non_html_available(): warn_about_headers = not self.__is_headers_available_for_non_html() else: warn_about_headers = not self.__is_headers_available_for_html() if warn_about_headers: logger.warning( "You are using an old version of unstructured. " "The headers parameter is ignored" ) self.urls = urls self.continue_on_failure = continue_on_failure self.headers = headers self.unstructured_kwargs = unstructured_kwargs self.show_progress_bar = show_progress_bar def _validate_mode(self, mode: str) -> None: _valid_modes = {"single", "elements"} if mode not in _valid_modes: raise ValueError( f"Got {mode} for `mode`, but should be one of `{_valid_modes}`" ) def __is_headers_available_for_html(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 7) def __is_headers_available_for_non_html(self) -> bool: _unstructured_version = self.__version.split("-")[0]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
8eca11891056-2
_unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 13) def __is_non_html_available(self) -> bool: _unstructured_version = self.__version.split("-")[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split(".")]) return unstructured_version >= (0, 5, 12) [docs] def load(self) -> List[Document]: """Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. " "Please install with 'pip install tqdm' or set " "show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition(url=url, **self.unstructured_kwargs) else: if self.__is_headers_available_for_html(): elements = partition_html( url=url, headers=self.headers, **self.unstructured_kwargs ) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e:
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
8eca11891056-3
except Exception as e: if self.continue_on_failure: logger.error(f"Error fetching or processing {url}, exception: {e}") continue else: raise e if self.mode == "single": text = "\n\n".join([str(el) for el in elements]) metadata = {"source": url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == "elements": for element in elements: metadata = element.metadata.to_dict() metadata["category"] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/url.html
afdabd1a374a-0
Source code for langchain.document_loaders.facebook_chat """Loads Facebook chat json dump.""" import datetime import json from pathlib import Path from typing import List from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader [docs]def concatenate_rows(row: dict) -> str: """Combine message information in a readable format ready to be used. Args: row: dictionary containing message information. """ sender = row["sender_name"] text = row["content"] date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime( "%Y-%m-%d %H:%M:%S" ) return f"{sender} on {date}: {text}\n\n" [docs]class FacebookChatLoader(BaseLoader): """Loads Facebook messages json directory dump.""" [docs] def __init__(self, path: str): """Initialize with a path.""" self.file_path = path [docs] def load(self) -> List[Document]: """Load documents.""" p = Path(self.file_path) with open(p, encoding="utf8") as f: d = json.load(f) text = "".join( concatenate_rows(message) for message in d["messages"] if message.get("content") and isinstance(message["content"], str) ) metadata = {"source": str(p)} return [Document(page_content=text, metadata=metadata)]
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/facebook_chat.html
f6a2f9298684-0
Source code for langchain.document_loaders.bigquery from __future__ import annotations from typing import TYPE_CHECKING, List, Optional from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader if TYPE_CHECKING: from google.auth.credentials import Credentials [docs]class BigQueryLoader(BaseLoader): """Loads a query result from BigQuery into a list of documents. Each document represents one row of the result. The `page_content_columns` are written into the `page_content` of the document. The `metadata_columns` are written into the `metadata` of the document. By default, all columns are written into the `page_content` and none into the `metadata`. """ [docs] def __init__( self, query: str, project: Optional[str] = None, page_content_columns: Optional[List[str]] = None, metadata_columns: Optional[List[str]] = None, credentials: Optional[Credentials] = None, ): """Initialize BigQuery document loader. Args: query: The query to run in BigQuery. project: Optional. The project to run the query in. page_content_columns: Optional. The columns to write into the `page_content` of the document. metadata_columns: Optional. The columns to write into the `metadata` of the document. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine (`google.auth.compute_engine.Credentials`) or Service Account (`google.oauth2.service_account.Credentials`) credentials directly. """ self.query = query self.project = project
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html
f6a2f9298684-1
""" self.query = query self.project = project self.page_content_columns = page_content_columns self.metadata_columns = metadata_columns self.credentials = credentials [docs] def load(self) -> List[Document]: try: from google.cloud import bigquery except ImportError as ex: raise ImportError( "Could not import google-cloud-bigquery python package. " "Please install it with `pip install google-cloud-bigquery`." ) from ex bq_client = bigquery.Client(credentials=self.credentials, project=self.project) query_result = bq_client.query(self.query).result() docs: List[Document] = [] page_content_columns = self.page_content_columns metadata_columns = self.metadata_columns if page_content_columns is None: page_content_columns = [column.name for column in query_result.schema] if metadata_columns is None: metadata_columns = [] for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns ) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) docs.append(doc) return docs
https://api.python.langchain.com/en/latest/_modules/langchain/document_loaders/bigquery.html