index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_mongodb.py
from typing import Dict, List from unittest.mock import AsyncMock, MagicMock, patch import pytest from langchain_core.documents import Document from langchain_community.document_loaders.mongodb import MongodbLoader @pytest.fixture def raw_docs() -> List[Dict]: return [ {"_id": "1", "address": {"building": "1", "room": "1"}}, {"_id": "2", "address": {"building": "2", "room": "2"}}, {"_id": "3", "address": {"building": "3", "room": "2"}}, ] @pytest.fixture def expected_documents() -> List[Document]: return [ Document( page_content="{'_id': '2', 'address': {'building': '2', 'room': '2'}}", metadata={"database": "sample_restaurants", "collection": "restaurants"}, ), Document( page_content="{'_id': '3', 'address': {'building': '3', 'room': '2'}}", metadata={"database": "sample_restaurants", "collection": "restaurants"}, ), ] @pytest.mark.requires("motor") async def test_load_mocked_with_filters(expected_documents: List[Document]) -> None: filter_criteria = {"address.room": {"$eq": "2"}} field_names = ["address.building", "address.room"] metadata_names = ["_id"] include_db_collection_in_metadata = True mock_async_load = AsyncMock() mock_async_load.return_value = expected_documents mock_find = AsyncMock() mock_find.return_value = iter(expected_documents) mock_count_documents = MagicMock() mock_count_documents.return_value = len(expected_documents) mock_collection = MagicMock() mock_collection.find = mock_find mock_collection.count_documents = mock_count_documents with patch( "motor.motor_asyncio.AsyncIOMotorClient", return_value=MagicMock() ), patch( "langchain_community.document_loaders.mongodb.MongodbLoader.aload", new=mock_async_load, ): loader = MongodbLoader( "mongodb://localhost:27017", "test_db", "test_collection", filter_criteria=filter_criteria, field_names=field_names, metadata_names=metadata_names, include_db_collection_in_metadata=include_db_collection_in_metadata, ) loader.collection = mock_collection documents = await loader.aload() assert documents == expected_documents
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_generic_loader.py
"""Test generic loader.""" import os import tempfile from pathlib import Path from typing import Any, Generator, Iterator import pytest from langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob, FileSystemBlobLoader from langchain_community.document_loaders.generic import GenericLoader from langchain_community.document_loaders.parsers.txt import TextParser @pytest.fixture def toy_dir() -> Generator[Path, None, None]: """Yield a pre-populated directory to test the blob loader.""" with tempfile.TemporaryDirectory() as temp_dir: # Create test.txt with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt: test_txt.write("This is a test.txt file.") # Create test.html with open(os.path.join(temp_dir, "test.html"), "w") as test_html: test_html.write( "<html><body><h1>This is a test.html file.</h1></body></html>" ) # Create .hidden_file with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file: hidden_file.write("This is a hidden file.") # Create some_dir/nested_file.txt some_dir = os.path.join(temp_dir, "some_dir") os.makedirs(some_dir) with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file: nested_file.write("This is a nested_file.txt file.") # Create some_dir/other_dir/more_nested.txt other_dir = os.path.join(some_dir, "other_dir") os.makedirs(other_dir) with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file: nested_file.write("This is a more_nested.txt file.") yield Path(temp_dir) class AsIsParser(BaseBlobParser): """Parser created for testing purposes.""" def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the first character of a blob.""" yield Document(page_content=blob.as_string()) def test__init__(toy_dir: str) -> None: """Test initialization from init.""" loader = GenericLoader( FileSystemBlobLoader(toy_dir, suffixes=[".txt"]), AsIsParser(), ) docs = loader.load() assert len(docs) == 3 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file." def test_from_filesystem_classmethod(toy_dir: str) -> None: """Test generic loader.""" loader = GenericLoader.from_filesystem( toy_dir, suffixes=[".txt"], parser=AsIsParser() ) docs = loader.load() assert len(docs) == 3 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file." def test_from_filesystem_classmethod_with_path(toy_dir: str) -> None: loader = GenericLoader.from_filesystem(os.path.join(toy_dir, "test.txt")) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == "This is a test.txt file." def test_from_filesystem_classmethod_with_glob(toy_dir: str) -> None: """Test that glob parameter is taken into account.""" loader = GenericLoader.from_filesystem(toy_dir, glob="*.txt", parser=AsIsParser()) docs = loader.load() assert len(docs) == 1 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file." @pytest.mark.requires("tqdm") def test_from_filesystem_classmethod_show_progress(toy_dir: str) -> None: """Test that glob parameter is taken into account.""" loader = GenericLoader.from_filesystem( toy_dir, glob="*.txt", parser=AsIsParser(), show_progress=True ) docs = loader.load() assert len(docs) == 1 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file." def test_from_filesystem_using_default_parser(toy_dir: str) -> None: """Use the default generic parser.""" loader = GenericLoader.from_filesystem( toy_dir, suffixes=[".txt"], ) docs = loader.load() assert len(docs) == 3 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file." def test_specifying_parser_via_class_attribute(toy_dir: str) -> None: class TextLoader(GenericLoader): """Parser created for testing purposes.""" @staticmethod def get_parser(**kwargs: Any) -> BaseBlobParser: return TextParser() loader = TextLoader.from_filesystem(toy_dir, suffixes=[".txt"]) docs = loader.load() assert len(docs) == 3 # Glob order seems to be deterministic with recursion. If this test becomes flaky, # we can sort the docs by page content. assert docs[0].page_content == "This is a test.txt file."
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_confluence.py
import unittest from typing import Any, Dict from unittest.mock import MagicMock, patch import pytest import requests from langchain_core.documents import Document from langchain_community.document_loaders.confluence import ( ConfluenceLoader, ContentFormat, ) @pytest.fixture def mock_confluence(): # type: ignore with patch("atlassian.Confluence") as mock_confluence: yield mock_confluence @pytest.mark.requires("atlassian", "bs4", "lxml") class TestConfluenceLoader: CONFLUENCE_URL: str = "https://example.atlassian.com/wiki" MOCK_USERNAME: str = "user@gmail.com" MOCK_API_TOKEN: str = "api_token" MOCK_SPACE_KEY: str = "spaceId123" def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None: ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, ) mock_confluence.assert_called_once_with( url=self.CONFLUENCE_URL, username="user@gmail.com", password="api_token", cloud=True, ) def test_confluence_loader_initialization_invalid(self) -> None: with pytest.raises(ValueError): ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, token="foo", ) with pytest.raises(ValueError): ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, oauth2={ "access_token": "bar", "access_token_secret": "bar", "consumer_key": "bar", "key_cert": "bar", }, ) with pytest.raises(ValueError): ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, session=requests.Session(), ) def test_confluence_loader_initialization_from_env( self, mock_confluence: MagicMock ) -> None: with unittest.mock.patch.dict( "os.environ", { "CONFLUENCE_USERNAME": self.MOCK_USERNAME, "CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN, }, ): ConfluenceLoader(url=self.CONFLUENCE_URL) mock_confluence.assert_called_with( url=self.CONFLUENCE_URL, username=None, password=None, cloud=True ) def test_confluence_loader_load_data_invalid_args(self) -> None: confluence_loader = ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, ) with pytest.raises( ValueError, match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501 ): confluence_loader.load() def test_confluence_loader_load_data_by_page_ids( self, mock_confluence: MagicMock ) -> None: mock_confluence.get_page_by_id.side_effect = [ self._get_mock_page("123"), self._get_mock_page("456"), ] mock_confluence.get_all_restrictions_for_content.side_effect = [ self._get_mock_page_restrictions("123"), self._get_mock_page_restrictions("456"), ] mock_page_ids = ["123", "456"] confluence_loader = self._get_mock_confluence_loader( mock_confluence, page_ids=mock_page_ids ) documents = list(confluence_loader.lazy_load()) assert mock_confluence.get_page_by_id.call_count == 2 assert mock_confluence.get_all_restrictions_for_content.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].page_content == "Content 123" assert documents[1].page_content == "Content 456" assert mock_confluence.get_all_pages_from_space.call_count == 0 assert mock_confluence.get_all_pages_by_label.call_count == 0 assert mock_confluence.cql.call_count == 0 assert mock_confluence.get_page_child_by_type.call_count == 0 def test_confluence_loader_load_data_by_space_id( self, mock_confluence: MagicMock ) -> None: # one response with two pages mock_confluence.get_all_pages_from_space.return_value = [ self._get_mock_page("123"), self._get_mock_page("456"), ] mock_confluence.get_all_restrictions_for_content.side_effect = [ self._get_mock_page_restrictions("123"), self._get_mock_page_restrictions("456"), ] confluence_loader = self._get_mock_confluence_loader( mock_confluence, space_key=self.MOCK_SPACE_KEY, max_pages=2 ) documents = confluence_loader.load() assert mock_confluence.get_all_pages_from_space.call_count == 1 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].page_content == "Content 123" assert documents[1].page_content == "Content 456" assert mock_confluence.get_page_by_id.call_count == 0 assert mock_confluence.get_all_pages_by_label.call_count == 0 assert mock_confluence.cql.call_count == 0 assert mock_confluence.get_page_child_by_type.call_count == 0 @pytest.mark.requires("markdownify") def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled( self, mock_confluence: MagicMock ) -> None: # one response with two pages mock_confluence.get_all_pages_from_space.return_value = [ self._get_mock_page("123", ContentFormat.VIEW), self._get_mock_page("456", ContentFormat.VIEW), ] mock_confluence.get_all_restrictions_for_content.side_effect = [ self._get_mock_page_restrictions("123"), self._get_mock_page_restrictions("456"), ] confluence_loader = self._get_mock_confluence_loader( mock_confluence, space_key=self.MOCK_SPACE_KEY, content_format=ContentFormat.VIEW, keep_markdown_format=True, max_pages=2, ) documents = confluence_loader.load() assert mock_confluence.get_all_pages_from_space.call_count == 1 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].page_content == "Content 123\n\n" assert documents[1].page_content == "Content 456\n\n" assert mock_confluence.get_page_by_id.call_count == 0 assert mock_confluence.get_all_pages_by_label.call_count == 0 assert mock_confluence.cql.call_count == 0 assert mock_confluence.get_page_child_by_type.call_count == 0 def _get_mock_confluence_loader( self, mock_confluence: MagicMock, **kwargs: Any ) -> ConfluenceLoader: confluence_loader = ConfluenceLoader( self.CONFLUENCE_URL, username=self.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN, **kwargs, ) confluence_loader.confluence = mock_confluence return confluence_loader def _get_mock_page( self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE ) -> Dict: return { "id": f"{page_id}", "title": f"Page {page_id}", "body": { f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"} }, "status": "current", "type": "page", "_links": { "self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}", "tinyui": "/x/tiny_ui_link", "editui": f"/pages/resumedraft.action?draftId={page_id}", "webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview", }, } def _get_mock_page_restrictions(self, page_id: str) -> Dict: return { "read": { "operation": "read", "restrictions": { "user": {"results": [], "start": 0, "limit": 200, "size": 0}, "group": {"results": [], "start": 0, "limit": 200, "size": 0}, }, "_expandable": {"content": f"/rest/api/content/{page_id}"}, "_links": { "self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501 }, }, "update": { "operation": "update", "restrictions": { "user": {"results": [], "start": 0, "limit": 200, "size": 0}, "group": {"results": [], "start": 0, "limit": 200, "size": 0}, }, "_expandable": {"content": f"/rest/api/content/{page_id}"}, "_links": { "self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501 }, }, "_links": { "self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501 "base": self.CONFLUENCE_URL, "context": "/wiki", }, }
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_readthedoc.py
from pathlib import Path import pytest from langchain_community.document_loaders.readthedocs import ReadTheDocsLoader PARENT_DIR = Path(__file__).parent / "test_docs" / "readthedocs" @pytest.mark.requires("bs4") def test_main_id_main_content() -> None: loader = ReadTheDocsLoader(PARENT_DIR / "main_id_main_content") documents = loader.load() assert len(documents[0].page_content) != 0 @pytest.mark.requires("bs4") def test_div_role_main() -> None: loader = ReadTheDocsLoader(PARENT_DIR / "div_role_main") documents = loader.load() assert len(documents[0].page_content) != 0 @pytest.mark.requires("bs4") def test_custom() -> None: loader = ReadTheDocsLoader( PARENT_DIR / "custom", custom_html_tag=("article", {"role": "main"}), ) documents = loader.load() assert len(documents[0].page_content) != 0 @pytest.mark.requires("bs4") def test_nested_html_structure() -> None: loader = ReadTheDocsLoader(PARENT_DIR / "nested_html_structure") documents = loader.load() assert documents[0].page_content == "Hello World!" @pytest.mark.requires("bs4") def test_index_page() -> None: loader = ReadTheDocsLoader(PARENT_DIR / "index_page", exclude_links_ratio=0.5) documents = loader.load() assert len(documents[0].page_content) == 0 @pytest.mark.requires("bs4") def test_empty() -> None: loader = ReadTheDocsLoader( PARENT_DIR / "custom", ) documents = loader.load() assert len(documents[0].page_content) == 0
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_assemblyai.py
import pytest import responses from pytest_mock import MockerFixture from requests import HTTPError from langchain_community.document_loaders import ( AssemblyAIAudioLoaderById, AssemblyAIAudioTranscriptLoader, ) from langchain_community.document_loaders.assemblyai import TranscriptFormat @pytest.mark.requires("assemblyai") def test_initialization() -> None: loader = AssemblyAIAudioTranscriptLoader( file_path="./testfile.mp3", api_key="api_key" ) assert loader.file_path == "./testfile.mp3" assert loader.transcript_format == TranscriptFormat.TEXT @pytest.mark.requires("assemblyai") def test_load(mocker: MockerFixture) -> None: mocker.patch( "assemblyai.Transcriber.transcribe", return_value=mocker.MagicMock( text="Test transcription text", json_response={"id": "1"}, error=None ), ) loader = AssemblyAIAudioTranscriptLoader( file_path="./testfile.mp3", api_key="api_key" ) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == "Test transcription text" assert docs[0].metadata == {"id": "1"} @pytest.mark.requires("assemblyai") def test_transcription_error(mocker: MockerFixture) -> None: mocker.patch( "assemblyai.Transcriber.transcribe", return_value=mocker.MagicMock(error="Test error"), ) loader = AssemblyAIAudioTranscriptLoader( file_path="./testfile.mp3", api_key="api_key" ) expected_error = "Could not transcribe file: Test error" with pytest.raises(ValueError, match=expected_error): loader.load() @pytest.mark.requires("assemblyai") @responses.activate def test_load_by_id() -> None: responses.add( responses.GET, "https://api.assemblyai.com/v2/transcript/1234", json={"text": "Test transcription text", "id": "1234"}, status=200, ) loader = AssemblyAIAudioLoaderById( transcript_id="1234", api_key="api_key", transcript_format=TranscriptFormat.TEXT ) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == "Test transcription text" assert docs[0].metadata == {"text": "Test transcription text", "id": "1234"} @pytest.mark.requires("assemblyai") @responses.activate def test_transcription_error_by_id() -> None: responses.add( responses.GET, "https://api.assemblyai.com/v2/transcript/1234", status=404, ) loader = AssemblyAIAudioLoaderById( transcript_id="1234", api_key="api_key", transcript_format=TranscriptFormat.TEXT ) with pytest.raises(HTTPError): loader.load()
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_imports.py
from langchain_community.document_loaders import __all__, _module_lookup EXPECTED_ALL = [ "AcreomLoader", "AsyncHtmlLoader", "AsyncChromiumLoader", "AZLyricsLoader", "AcreomLoader", "AirbyteCDKLoader", "AirbyteGongLoader", "AirbyteJSONLoader", "AirbyteHubspotLoader", "AirbyteSalesforceLoader", "AirbyteShopifyLoader", "AirbyteStripeLoader", "AirbyteTypeformLoader", "AirbyteZendeskSupportLoader", "AirtableLoader", "AmazonTextractPDFLoader", "ApifyDatasetLoader", "ArcGISLoader", "ArxivLoader", "AssemblyAIAudioLoaderById", "AssemblyAIAudioTranscriptLoader", "AstraDBLoader", "AsyncHtmlLoader", "AthenaLoader", "AzureAIDataLoader", "AzureAIDocumentIntelligenceLoader", "AzureBlobStorageContainerLoader", "AzureBlobStorageFileLoader", "BSHTMLLoader", "BibtexLoader", "BigQueryLoader", "BiliBiliLoader", "BlackboardLoader", "Blob", "BlobLoader", "BlockchainDocumentLoader", "BraveSearchLoader", "BrowserbaseLoader", "BrowserlessLoader", "CassandraLoader", "CSVLoader", "ChatGPTLoader", "CoNLLULoader", "CollegeConfidentialLoader", "ConcurrentLoader", "ConfluenceLoader", "CouchbaseLoader", "CubeSemanticLoader", "DataFrameLoader", "DatadogLogsLoader", "DedocAPIFileLoader", "DedocFileLoader", "DedocPDFLoader", "PebbloSafeLoader", "PebbloTextLoader", "DiffbotLoader", "DirectoryLoader", "DiscordChatLoader", "DocugamiLoader", "DocusaurusLoader", "Docx2txtLoader", "DropboxLoader", "DuckDBLoader", "EtherscanLoader", "EverNoteLoader", "FacebookChatLoader", "FaunaLoader", "FigmaFileLoader", "FileSystemBlobLoader", "FireCrawlLoader", "GCSDirectoryLoader", "GCSFileLoader", "GeoDataFrameLoader", "GithubFileLoader", "GlueCatalogLoader", "GitHubIssuesLoader", "GitLoader", "GitbookLoader", "GoogleApiClient", "GoogleApiYoutubeLoader", "GoogleSpeechToTextLoader", "GoogleDriveLoader", "GutenbergLoader", "HNLoader", "HuggingFaceDatasetLoader", "HuggingFaceModelLoader", "IFixitLoader", "IMSDbLoader", "ImageCaptionLoader", "IuguLoader", "JSONLoader", "JoplinLoader", "KineticaLoader", "LLMSherpaFileLoader", "LarkSuiteDocLoader", "LakeFSLoader", "MHTMLLoader", "MWDumpLoader", "MastodonTootsLoader", "MathpixPDFLoader", "MaxComputeLoader", "MergedDataLoader", "ModernTreasuryLoader", "MongodbLoader", "NeedleLoader", "NewsURLLoader", "NotebookLoader", "NotionDBLoader", "NotionDirectoryLoader", "OBSDirectoryLoader", "OBSFileLoader", "ObsidianLoader", "OneDriveFileLoader", "OneDriveLoader", "OnlinePDFLoader", "OpenCityDataLoader", "OracleAutonomousDatabaseLoader", "OracleDocLoader", "OracleTextSplitter", "OutlookMessageLoader", "PDFMinerLoader", "PDFMinerPDFasHTMLLoader", "PDFPlumberLoader", "PagedPDFSplitter", "PlaywrightURLLoader", "PolarsDataFrameLoader", "PsychicLoader", "PubMedLoader", "PyMuPDFLoader", "PyPDFDirectoryLoader", "PyPDFLoader", "PyPDFium2Loader", "PySparkDataFrameLoader", "PythonLoader", "RSSFeedLoader", "ReadTheDocsLoader", "RecursiveUrlLoader", "RedditPostsLoader", "RoamLoader", "RocksetLoader", "S3DirectoryLoader", "S3FileLoader", "ScrapflyLoader", "ScrapingAntLoader", "SQLDatabaseLoader", "SRTLoader", "SeleniumURLLoader", "SharePointLoader", "SitemapLoader", "SlackDirectoryLoader", "SnowflakeLoader", "SpiderLoader", "SpreedlyLoader", "StripeLoader", "SurrealDBLoader", "TelegramChatApiLoader", "TelegramChatFileLoader", "TelegramChatLoader", "TensorflowDatasetLoader", "TencentCOSDirectoryLoader", "TencentCOSFileLoader", "TextLoader", "TiDBLoader", "ToMarkdownLoader", "TomlLoader", "TrelloLoader", "TwitterTweetLoader", "UnstructuredAPIFileIOLoader", "UnstructuredAPIFileLoader", "UnstructuredCHMLoader", "UnstructuredCSVLoader", "UnstructuredEPubLoader", "UnstructuredEmailLoader", "UnstructuredExcelLoader", "UnstructuredFileIOLoader", "UnstructuredFileLoader", "UnstructuredHTMLLoader", "UnstructuredImageLoader", "UnstructuredMarkdownLoader", "UnstructuredODTLoader", "UnstructuredOrgModeLoader", "UnstructuredPDFLoader", "UnstructuredPowerPointLoader", "UnstructuredRSTLoader", "UnstructuredRTFLoader", "UnstructuredTSVLoader", "UnstructuredURLLoader", "UnstructuredWordDocumentLoader", "UnstructuredXMLLoader", "VsdxLoader", "WeatherDataLoader", "WebBaseLoader", "WhatsAppChatLoader", "WikipediaLoader", "XorbitsLoader", "YoutubeAudioLoader", "YoutubeLoader", "YuqueLoader", ] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_pebblo.py
import os from pathlib import Path from typing import Dict import pytest from langchain_core.documents import Document from pytest_mock import MockerFixture from langchain_community.document_loaders import CSVLoader, PyPDFLoader EXAMPLE_DOCS_DIRECTORY = str(Path(__file__).parent.parent.parent / "examples/") class MockResponse: def __init__(self, json_data: Dict, status_code: int): self.json_data = json_data self.status_code = status_code def json(self) -> Dict: return self.json_data def test_pebblo_import() -> None: """Test that the Pebblo safe loader can be imported.""" from langchain_community.document_loaders import PebbloSafeLoader # noqa: F401 def test_pebblo_text_loader_import() -> None: """Test that the Pebblo text loader can be imported.""" from langchain_community.document_loaders import PebbloTextLoader # noqa: F401 def test_empty_filebased_loader(mocker: MockerFixture) -> None: """Test basic file based csv loader.""" # Setup from langchain_community.document_loaders import PebbloSafeLoader mocker.patch.multiple( "requests", get=MockResponse(json_data={"data": ""}, status_code=200), post=MockResponse(json_data={"data": ""}, status_code=200), ) file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "test_empty.csv") expected_docs: list = [] # Exercise loader = PebbloSafeLoader( CSVLoader(file_path=file_path), "dummy_app_name", "dummy_owner", "dummy_description", ) result = loader.load() # Assert assert result == expected_docs def test_csv_loader_load_valid_data(mocker: MockerFixture) -> None: # Setup from langchain_community.document_loaders import PebbloSafeLoader mocker.patch.multiple( "requests", get=MockResponse(json_data={"data": ""}, status_code=200), post=MockResponse(json_data={"data": ""}, status_code=200), ) file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "test_nominal.csv") full_file_path = os.path.abspath(file_path) expected_docs = [ Document( metadata={ "source": full_file_path, "row": 0, "full_path": full_file_path, # For UT as here we are not calculating checksum "pb_checksum": None, }, page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", ), Document( metadata={ "source": full_file_path, "row": 1, "full_path": full_file_path, # For UT as here we are not calculating checksum "pb_checksum": None, }, page_content="column1: value4\ncolumn2: value5\ncolumn3: value6", ), ] # Exercise loader = PebbloSafeLoader( CSVLoader(file_path=file_path), "dummy_app_name", "dummy_owner", "dummy_description", ) result = loader.load() # Assert assert result == expected_docs @pytest.mark.requires("pypdf") def test_pdf_lazy_load(mocker: MockerFixture) -> None: # Setup from langchain_community.document_loaders import PebbloSafeLoader mocker.patch.multiple( "requests", get=MockResponse(json_data={"data": ""}, status_code=200), post=MockResponse(json_data={"data": ""}, status_code=200), ) file_path = os.path.join( EXAMPLE_DOCS_DIRECTORY, "multi-page-forms-sample-2-page.pdf" ) # Exercise loader = PebbloSafeLoader( PyPDFLoader(file_path=file_path), "dummy_app_name", "dummy_owner", "dummy_description", ) result = list(loader.lazy_load()) # Assert assert len(result) == 2 def test_pebblo_safe_loader_api_key() -> None: # Setup from langchain_community.document_loaders import PebbloSafeLoader file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, "test_empty.csv") api_key = "dummy_api_key" # Exercise loader = PebbloSafeLoader( CSVLoader(file_path=file_path), "dummy_app_name", "dummy_owner", "dummy_description", api_key=api_key, ) # Assert assert loader.pb_client.api_key == api_key assert loader.pb_client.classifier_location == "local" def test_pebblo_text_loader(mocker: MockerFixture) -> None: """ Test loading in-memory text with PebbloTextLoader and PebbloSafeLoader. """ # Setup from langchain_community.document_loaders import PebbloSafeLoader, PebbloTextLoader mocker.patch.multiple( "requests", get=MockResponse(json_data={"data": ""}, status_code=200), post=MockResponse(json_data={"data": ""}, status_code=200), ) text = "This is a test text." source = "fake_source" expected_docs = [ Document( metadata={ "full_path": source, "pb_checksum": None, }, page_content=text, ), ] # Exercise texts = [text] loader = PebbloSafeLoader( PebbloTextLoader(texts, source=source), "dummy_app_name", "dummy_owner", "dummy_description", ) result = loader.load() # Assert assert result == expected_docs
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_hugging_face.py
from pathlib import Path import pytest from langchain_community.document_loaders import HuggingFaceDatasetLoader HUGGING_FACE_EXAMPLE_DATASET = str( Path(__file__).parent / "sample_documents" / "sample_hugging_face_dataset.py" ) @pytest.mark.requires("datasets") @pytest.fixture def test_load_string() -> None: """Loads page_content of type string""" page_content_column = "text" name = "v1" loader = HuggingFaceDatasetLoader( HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name ) docs = loader.load() # Length should be number of splits for specified `name` assert len(docs) == 2 doc = docs[0] assert doc.page_content == '"This is text in version 1"' assert doc.metadata.keys() == { "split", "list", "dict", } @pytest.mark.requires("datasets") @pytest.fixture def test_load_list() -> None: """Loads page_content of type List""" page_content_column = "list" name = "v1" loader = HuggingFaceDatasetLoader( HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name ) doc = loader.load()[0] assert doc.page_content == '["List item 1", "List item 2", "List item 3"]' assert doc.metadata.keys() == { "split", "text", "dict", } @pytest.mark.requires("datasets") @pytest.fixture def test_load_object() -> None: """Loads page_content of type Object""" page_content_column = "dict" name = "v2" loader = HuggingFaceDatasetLoader( HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name ) doc = loader.load()[0] assert ( doc.page_content == '{"dict_text": ["Hello world!", "langchain is cool"], "dict_int": [2, 123]}' ) assert doc.metadata.keys() == { "split", "text", "list", } @pytest.mark.requires("datasets") @pytest.fixture def test_load_nonexistent_dataset() -> None: """Tests that ValueError is thrown for nonexistent dataset name""" page_content_column = "text" name = "v3" loader = HuggingFaceDatasetLoader( HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name ) with pytest.raises(ValueError): loader.load() @pytest.mark.requires("datasets") @pytest.fixture def test_load_nonexistent_feature() -> None: """Tests that KeyError is thrown for nonexistent feature/key in dataset""" page_content_column = "langchain" name = "v2" loader = HuggingFaceDatasetLoader( HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name ) with pytest.raises(KeyError): loader.load()
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_airbyte.py
"""Test the airbyte document loader. Light test to ensure that the airbyte document loader can be imported. """ def test_airbyte_import() -> None: """Test that the airbyte document loader can be imported.""" from langchain_community.document_loaders import airbyte # noqa
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_obsidian.py
from pathlib import Path from langchain_community.document_loaders.obsidian import ObsidianLoader OBSIDIAN_EXAMPLE_PATH = Path(__file__).parent / "sample_documents" / "obsidian" STANDARD_METADATA_FIELDS = { "created", "path", "source", "last_accessed", "last_modified", } loader = ObsidianLoader(str(OBSIDIAN_EXAMPLE_PATH)) docs = loader.load() def test_page_content_loaded() -> None: """Verify that all docs have page_content""" assert len(docs) == 6 assert all(doc.page_content for doc in docs) def test_disable_collect_metadata() -> None: """If collect_metadata is False, no additional metadata should be collected.""" loader_without_metadata = ObsidianLoader( str(OBSIDIAN_EXAMPLE_PATH), collect_metadata=False ) docs_wo = loader_without_metadata.load() assert len(docs_wo) == 6 assert all(doc.page_content for doc in docs_wo) assert all(set(doc.metadata) == STANDARD_METADATA_FIELDS for doc in docs_wo) def test_metadata_without_frontmatter() -> None: """Verify docs without frontmatter, still have basic metadata.""" doc = next(doc for doc in docs if doc.metadata["source"] == "no_metadata.md") assert set(doc.metadata) == STANDARD_METADATA_FIELDS def test_metadata_with_frontmatter() -> None: """Verify a standard frontmatter field is loaded.""" doc = next(doc for doc in docs if doc.metadata["source"] == "frontmatter.md") assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {"tags"} assert set(doc.metadata["tags"].split(",")) == {"journal/entry", "obsidian"} def test_metadata_with_template_vars_in_frontmatter() -> None: """Verify frontmatter fields with template variables are loaded.""" doc = next( doc for doc in docs if doc.metadata["source"] == "template_var_frontmatter.md" ) FRONTMATTER_FIELDS = { "aString", "anArray", "aDict", "tags", } assert set(doc.metadata) == FRONTMATTER_FIELDS | STANDARD_METADATA_FIELDS assert doc.metadata["aString"] == "{{var}}" assert doc.metadata["anArray"] == "['element', '{{varElement}}']" assert doc.metadata["aDict"] == "{'dictId1': 'val', 'dictId2': '{{varVal}}'}" assert set(doc.metadata["tags"].split(",")) == {"tag", "{{varTag}}"} def test_metadata_with_bad_frontmatter() -> None: """Verify a doc with non-yaml frontmatter.""" doc = next(doc for doc in docs if doc.metadata["source"] == "bad_frontmatter.md") assert set(doc.metadata) == STANDARD_METADATA_FIELDS def test_metadata_with_tags_and_frontmatter() -> None: """Verify a doc with frontmatter and tags/dataview tags are all added to metadata.""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) FRONTMATTER_FIELDS = { "aBool", "aFloat", "anInt", "anArray", "aString", "aDict", "tags", } DATAVIEW_FIELDS = {"dataview1", "dataview2", "dataview3"} assert ( set(doc.metadata) == STANDARD_METADATA_FIELDS | FRONTMATTER_FIELDS | DATAVIEW_FIELDS ) def test_tags_in_page_content() -> None: """Verify a doc with tags are included in the metadata""" doc = next(doc for doc in docs if doc.metadata["source"] == "no_frontmatter.md") assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {"tags"} def test_boolean_metadata() -> None: """Verify boolean metadata is loaded correctly""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["aBool"] def test_float_metadata() -> None: """Verify float metadata is loaded correctly""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["aFloat"] == 13.12345 def test_int_metadata() -> None: """Verify int metadata is loaded correctly""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["anInt"] == 15 def test_string_metadata() -> None: """Verify string metadata is loaded correctly""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["aString"] == "string value" def test_array_metadata() -> None: """Verify array metadata is loaded as a string""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["anArray"] == "['one', 'two', 'three']" def test_dict_metadata() -> None: """Verify dict metadata is stored as a string""" doc = next( doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md" ) assert doc.metadata["aDict"] == "{'dictId1': '58417', 'dictId2': 1500}"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_evernote_loader.py
import os import pathlib import time import pytest from langchain_community.document_loaders import EverNoteLoader @pytest.mark.requires("lxml", "html2text") class TestEverNoteLoader: @staticmethod def example_notebook_path(notebook_name: str) -> str: current_dir = pathlib.Path(__file__).parent return os.path.join(current_dir, "sample_documents", notebook_name) def test_loadnotebook_eachnoteisindividualdocument(self) -> None: loader = EverNoteLoader( self.example_notebook_path("sample_notebook.enex"), False ) documents = loader.load() assert len(documents) == 2 def test_loadnotebook_eachnotehasexpectedcontentwithleadingandtrailingremoved( self, ) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook.enex"), False ).load() content_note1 = documents[0].page_content assert content_note1 == "abc" content_note2 = documents[1].page_content assert content_note2 == "**Jan - March 2022**" def test_loademptynotebook_emptylistreturned(self) -> None: documents = EverNoteLoader( self.example_notebook_path("empty_export.enex"), False ).load() assert len(documents) == 0 def test_loadnotewithemptycontent_emptydocumentcontent(self) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook_emptynote.enex"), False ).load() note = documents[0] assert note.page_content == "" def test_loadnotewithmissingcontenttag_emptylistreturned( self, ) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook_missingcontenttag.enex"), False ).load() assert len(documents) == 0 def test_loadnotewithnometadata_documentreturnedwithsourceonly( self, ) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook_missingmetadata.enex"), False ).load() note = documents[0] assert note.page_content == "I only have content, no metadata" assert len(note.metadata) == 1 assert "source" in note.metadata assert "sample_notebook_missingmetadata.enex" in note.metadata["source"] def test_loadnotebookwithimage_notehasplaintextonlywithresourcesremoved( self, ) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook_with_media.enex"), False ).load() note = documents[0] assert ( note.page_content == """\ When you pick this mug up with your thumb on top and middle finger through the loop, your ring finger slides into the mug under the loop where it is too hot to touch and burns you. If you try and pick it up with your thumb and index finger you can’t hold the mug.""" ) def test_loadnotebook_eachnotehasexpectedmetadata(self) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook.enex"), False ).load() metadata_note1 = documents[0].metadata assert "title" in metadata_note1.keys() assert "created" in metadata_note1.keys() assert "updated" in metadata_note1.keys() assert "note-attributes.author" in metadata_note1.keys() assert ( "content" not in metadata_note1.keys() ) # This should be in the content of the document instead assert ( "content-raw" not in metadata_note1.keys() ) # This is too large to be stored as metadata assert ( "resource" not in metadata_note1.keys() ) # This is too large to be stored as metadata assert metadata_note1["title"] == "Test" assert metadata_note1["note-attributes.author"] == "Michael McGarry" assert isinstance(metadata_note1["created"], time.struct_time) assert isinstance(metadata_note1["updated"], time.struct_time) assert metadata_note1["created"].tm_year == 2023 assert metadata_note1["created"].tm_mon == 5 assert metadata_note1["created"].tm_mday == 11 assert metadata_note1["updated"].tm_year == 2024 assert metadata_note1["updated"].tm_mon == 7 assert metadata_note1["updated"].tm_mday == 14 metadata_note2 = documents[1].metadata assert "title" in metadata_note2.keys() assert "created" in metadata_note2.keys() assert "updated" not in metadata_note2.keys() assert "note-attributes.author" in metadata_note2.keys() assert "note-attributes.source" in metadata_note2.keys() assert "content" not in metadata_note2.keys() assert "content-raw" not in metadata_note2.keys() assert ( "resource" not in metadata_note2.keys() ) # This is too large to be stored as metadata assert metadata_note2["title"] == "Summer Training Program" assert metadata_note2["note-attributes.author"] == "Mike McGarry" assert metadata_note2["note-attributes.source"] == "mobile.iphone" assert isinstance(metadata_note2["created"], time.struct_time) assert metadata_note2["created"].tm_year == 2022 assert metadata_note2["created"].tm_mon == 12 assert metadata_note2["created"].tm_mday == 27 def test_loadnotebookwithconflictingsourcemetadatatag_sourceoffilepreferred( self, ) -> None: documents = EverNoteLoader( self.example_notebook_path("sample_notebook_2.enex"), False ).load() assert "sample_notebook_2.enex" in documents[0].metadata["source"] assert "mobile.iphone" not in documents[0].metadata["source"] def test_returnsingledocument_loadnotebook_eachnoteiscombinedinto1document( self, ) -> None: loader = EverNoteLoader( self.example_notebook_path("sample_notebook.enex"), True ) documents = loader.load() assert len(documents) == 1 def test_returnsingledocument_loadnotebook_notecontentiscombinedinto1document( self, ) -> None: loader = EverNoteLoader( self.example_notebook_path("sample_notebook.enex"), True ) documents = loader.load() note = documents[0] assert note.page_content == "abc**Jan - March 2022**"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_hugging_face_model.py
import json from typing import Tuple import responses from requests import Request from langchain_community.document_loaders import HuggingFaceModelLoader # Mocked model data to simulate an API response MOCKED_MODELS_RESPONSE = [ { "_id": "657a1fff16886e681230c05a", "id": "microsoft/phi-2", "likes": 2692, "private": False, "downloads": 546775, "tags": [ "transformers", "safetensors", "phi", "text-generation", "nlp", "code", "custom_code", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us", ], "pipeline_tag": "text-generation", "library_name": "transformers", "createdAt": "2023-12-13T21:19:59.000Z", "modelId": "microsoft/phi-2", }, # Add additional models as needed ] # Mocked README content for models MOCKED_README_CONTENT = { "microsoft/phi-2": "README content for microsoft/phi-2", "openai/gpt-3": "README content for openai/gpt-3", } def response_callback(request: Request) -> Tuple[int, dict, str]: if "/api/models" in request.url: return (200, {}, json.dumps(MOCKED_MODELS_RESPONSE)) elif "README.md" in request.url: model_id = ( request.url.split("/")[3] + "/" + request.url.split("/")[4] ) # Extract model_id content = MOCKED_README_CONTENT.get(model_id, "") return (200, {}, content) return (404, {}, "Not Found") @responses.activate def test_load_models_with_readme() -> None: """Tests loading models along with their README content.""" responses.add_callback( responses.GET, "https://huggingface.co/api/models", callback=response_callback, # type: ignore content_type="application/json", ) responses.add_callback( responses.GET, # Use a regex or update this placeholder "https://huggingface.co/microsoft/phi-2/raw/main/README.md", callback=response_callback, # type: ignore content_type="text/plain", ) loader = HuggingFaceModelLoader(search="phi-2", limit=2) docs = loader.load() assert len(docs) == len(MOCKED_MODELS_RESPONSE) for doc, expected_model in zip(docs, MOCKED_MODELS_RESPONSE): id_ = expected_model["id"] assert isinstance(id_, str) assert doc.page_content == MOCKED_README_CONTENT[id_] assert doc.metadata["modelId"] == expected_model["id"]
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_mhtml.py
from pathlib import Path import pytest from langchain_community.document_loaders.mhtml import MHTMLLoader HERE = Path(__file__).parent EXAMPLES = HERE.parent.parent / "integration_tests" / "examples" @pytest.mark.requires("bs4", "lxml") def test_mhtml_loader() -> None: """Test mhtml loader.""" file_path = EXAMPLES / "example.mht" loader = MHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata["title"] == "LangChain" assert metadata["source"] == str(file_path) assert "LANG CHAIN 🦜️🔗Official Home Page" in content
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_trello.py
import unittest from collections import namedtuple from typing import Any, Optional from unittest.mock import patch import pytest from langchain_community.document_loaders.trello import TrelloLoader def list_to_objects(dict_list: list) -> list: """Helper to convert dict objects.""" return [ namedtuple("Object", d.keys())(**d) for d in dict_list if isinstance(d, dict) ] def card_list_to_objects(cards: list) -> list: """Helper to convert dict cards into trello weird mix of objects and dictionaries""" for card in cards: card["checklists"] = list_to_objects(card.get("checklists")) card["labels"] = list_to_objects(card.get("labels")) return list_to_objects(cards) class MockBoard: """ Defining Trello mock board internal object to use in the patched method. """ def __init__(self, id: str, name: str, cards: list, lists: list): self.id = id self.name = name self.cards = cards self.lists = lists def get_cards(self, card_filter: Optional[str] = "") -> list: """We do not need to test the card-filter since is on Trello Client side.""" return self.cards def list_lists(self) -> list: return self.lists TRELLO_LISTS = [ { "id": "5555cacbc4daa90564b34cf2", "name": "Publishing Considerations", }, { "id": "5555059b74c03b3a9e362cd0", "name": "Backlog", }, { "id": "555505a3427fd688c1ca5ebd", "name": "Selected for Milestone", }, { "id": "555505ba95ff925f9fb1b370", "name": "Blocked", }, { "id": "555505a695ff925f9fb1b13d", "name": "In Progress", }, { "id": "555505bdfe380c7edc8ca1a3", "name": "Done", }, ] # Create a mock list of cards. TRELLO_CARDS_QA = [ { "id": "12350aca6952888df7975903", "name": "Closed Card Title", "description": "This is the <em>description</em> of Closed Card.", "closed": True, "labels": [], "due_date": "", "url": "https://trello.com/card/12350aca6952888df7975903", "list_id": "555505bdfe380c7edc8ca1a3", "checklists": [ { "name": "Checklist 1", "items": [ { "name": "Item 1", "state": "pending", }, { "name": "Item 2", "state": "completed", }, ], }, ], "comments": [ { "data": { "text": "This is a comment on a <s>Closed</s> Card.", }, }, ], }, { "id": "45650aca6952888df7975903", "name": "Card 2", "description": "This is the description of <strong>Card 2</strong>.", "closed": False, "labels": [{"name": "Medium"}, {"name": "Task"}], "due_date": "", "url": "https://trello.com/card/45650aca6952888df7975903", "list_id": "555505a695ff925f9fb1b13d", "checklists": [], "comments": [], }, { "id": "55550aca6952888df7975903", "name": "Camera", "description": "<div></div>", "closed": False, "labels": [{"name": "Task"}], "due_date": "", "url": "https://trello.com/card/55550aca6952888df7975903", "list_id": "555505a3427fd688c1ca5ebd", "checklists": [ { "name": "Tasks", "items": [ {"name": "Zoom", "state": "complete"}, {"name": "Follow players", "state": "complete"}, { "name": "camera limit to stage size", "state": "complete", }, {"name": "Post Processing effects", "state": "complete"}, { "name": "Shitch to universal render pipeline", "state": "complete", }, ], }, ], "comments": [ { "data": { "text": ( "to follow group of players use Group Camera feature of " "cinemachine." ) } }, { "data": { "text": "Use 'Impulse' <s>Cinemachine</s> feature for camera shake." } }, {"data": {"text": "depth of field with custom shader."}}, ], }, ] @pytest.fixture def mock_trello_client() -> Any: """Fixture that creates a mock for trello.TrelloClient.""" # Create a mock `trello.TrelloClient` object. with patch("trello.TrelloClient") as mock_trello_client: # Create a mock list of trello list (columns in the UI). # The trello client returns a hierarchy mix of objects and dictionaries. list_objs = list_to_objects(TRELLO_LISTS) cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA) boards = [ MockBoard("5555eaafea917522902a2a2c", "Research", [], list_objs), MockBoard("55559f6002dd973ad8cdbfb7", "QA", cards_qa_objs, list_objs), ] # Patch `get_boards()` method of the mock `TrelloClient` object to return the # mock list of boards. mock_trello_client.return_value.list_boards.return_value = boards yield mock_trello_client.return_value @pytest.mark.usefixtures("mock_trello_client") @pytest.mark.requires("trello", "bs4", "lxml") class TestTrelloLoader(unittest.TestCase): def test_empty_board(self) -> None: """ Test loading a board with no cards. """ trello_loader = TrelloLoader.from_credentials( "Research", api_key="API_KEY", token="API_TOKEN", ) documents = trello_loader.load() self.assertEqual(len(documents), 0, "Empty board returns an empty list.") def test_complete_text_and_metadata(self) -> None: """ Test loading a board cards with all metadata. """ from bs4 import BeautifulSoup trello_loader = TrelloLoader.from_credentials( "QA", api_key="API_KEY", token="API_TOKEN", ) documents = trello_loader.load() self.assertEqual(len(documents), len(TRELLO_CARDS_QA), "Card count matches.") soup = BeautifulSoup(documents[0].page_content, "html.parser") self.assertTrue( len(soup.find_all()) == 0, "There is not markup in Closed Card document content.", ) # Check samples of every field type is present in page content. texts = [ "Closed Card Title", "This is the description of Closed Card.", "Checklist 1", "Item 1:pending", "This is a comment on a Closed Card.", ] for text in texts: self.assertTrue(text in documents[0].page_content) # Check all metadata is present in first Card self.assertEqual( documents[0].metadata, { "title": "Closed Card Title", "id": "12350aca6952888df7975903", "url": "https://trello.com/card/12350aca6952888df7975903", "labels": [], "list": "Done", "closed": True, "due_date": "", }, "Metadata of Closed Card Matches.", ) soup = BeautifulSoup(documents[1].page_content, "html.parser") self.assertTrue( len(soup.find_all()) == 0, "There is not markup in Card 2 document content.", ) # Check samples of every field type is present in page content. texts = [ "Card 2", "This is the description of Card 2.", ] for text in texts: self.assertTrue(text in documents[1].page_content) # Check all metadata is present in second Card self.assertEqual( documents[1].metadata, { "title": "Card 2", "id": "45650aca6952888df7975903", "url": "https://trello.com/card/45650aca6952888df7975903", "labels": ["Medium", "Task"], "list": "In Progress", "closed": False, "due_date": "", }, "Metadata of Card 2 Matches.", ) soup = BeautifulSoup(documents[2].page_content, "html.parser") self.assertTrue( len(soup.find_all()) == 0, "There is not markup in Card 2 document content.", ) # Check samples of every field type is present in page content. texts = [ "Camera", "camera limit to stage size:complete", "Use 'Impulse' Cinemachine feature for camera shake.", ] for text in texts: self.assertTrue(text in documents[2].page_content, text + " is present.") # Check all metadata is present in second Card self.assertEqual( documents[2].metadata, { "title": "Camera", "id": "55550aca6952888df7975903", "url": "https://trello.com/card/55550aca6952888df7975903", "labels": ["Task"], "list": "Selected for Milestone", "closed": False, "due_date": "", }, "Metadata of Camera Card matches.", ) def test_partial_text_and_metadata(self) -> None: """ Test loading a board cards removing some text and metadata. """ trello_loader = TrelloLoader.from_credentials( "QA", api_key="API_KEY", token="API_TOKEN", extra_metadata=("list"), include_card_name=False, include_checklist=False, include_comments=False, ) documents = trello_loader.load() # Check samples of every field type is present in page content. texts = [ "Closed Card Title", "Checklist 1", "Item 1:pending", "This is a comment on a Closed Card.", ] for text in texts: self.assertFalse(text in documents[0].page_content) # Check all metadata is present in first Card self.assertEqual( documents[0].metadata, { "title": "Closed Card Title", "id": "12350aca6952888df7975903", "url": "https://trello.com/card/12350aca6952888df7975903", "list": "Done", }, "Metadata of Closed Card Matches.", )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_directory_loader.py
from pathlib import Path import pytest from langchain_core.documents import Document from langchain_community.document_loaders.csv_loader import CSVLoader from langchain_community.document_loaders.directory import DirectoryLoader class TestDirectoryLoader: # Tests that when multhreading is enabled, multiple documents are read successfully. def test_directory_loader_with_multithreading_enabled(self) -> None: dir_path = self._get_csv_dir_path() loader = DirectoryLoader( dir_path, glob="**/*.csv", loader_cls=CSVLoader, use_multithreading=True ) expected_docs = [ Document( page_content="column1: value1", metadata={ "source": self._get_csv_file_path("test_one_col.csv"), "row": 0, }, ), Document( page_content="column1: value2", metadata={ "source": self._get_csv_file_path("test_one_col.csv"), "row": 1, }, ), Document( page_content="column1: value3", metadata={ "source": self._get_csv_file_path("test_one_col.csv"), "row": 2, }, ), Document( page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", metadata={ "source": self._get_csv_file_path("test_one_row.csv"), "row": 0, }, ), Document( page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", metadata={ "source": self._get_csv_file_path("test_nominal.csv"), "row": 0, }, ), Document( page_content="column1: value4\ncolumn2: value5\ncolumn3: value6", metadata={ "source": self._get_csv_file_path("test_nominal.csv"), "row": 1, }, ), Document( page_content="column1: value1\ncolumn2: value2\n" "column3: value3\nNone: value4,value5", metadata={ "source": self._get_csv_file_path("test_none_col.csv"), "row": 0, }, ), Document( page_content="column1: value6\ncolumn2: value7\n" "column3: value8\nNone: value9", metadata={ "source": self._get_csv_file_path("test_none_col.csv"), "row": 1, }, ), ] loaded_docs = sorted(loader.load(), key=lambda doc: doc.metadata["source"]) expected_docs = sorted(expected_docs, key=lambda doc: doc.metadata["source"]) for i, doc in enumerate(loaded_docs): assert doc == expected_docs[i] # Tests that lazy loading a CSV file with multiple documents is successful. def test_directory_loader_lazy_load_single_file_multiple_docs(self) -> None: # Setup dir_path = self._get_csv_dir_path() file_name = "test_nominal.csv" file_path = self._get_csv_file_path(file_name) expected_docs = [ Document( page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", metadata={"source": file_path, "row": 0}, ), Document( page_content="column1: value4\ncolumn2: value5\ncolumn3: value6", metadata={"source": file_path, "row": 1}, ), ] # Assert loader = DirectoryLoader(dir_path, glob=file_name, loader_cls=CSVLoader) for i, doc in enumerate(loader.lazy_load()): assert doc == expected_docs[i] # Tests that lazy loading an empty CSV file is handled correctly. def test_directory_loader_lazy_load_empty_file(self) -> None: # Setup dir_path = self._get_csv_dir_path() file_name = "test_empty.csv" # Assert loader = DirectoryLoader(dir_path, glob=file_name, loader_cls=CSVLoader) for _ in loader.lazy_load(): pytest.fail( "DirectoryLoader.lazy_load should not yield something for an empty file" ) # Tests that lazy loading multiple CSV files is handled correctly. def test_directory_loader_lazy_load_multiple_files(self) -> None: # Setup dir_path = self._get_csv_dir_path() file_name = "test_nominal.csv" file_path = self._get_csv_file_path(file_name) expected_docs = [ Document( page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", metadata={"source": file_path, "row": 0}, ), Document( page_content="column1: value4\ncolumn2: value5\ncolumn3: value6", metadata={"source": file_path, "row": 1}, ), ] file_name = "test_one_col.csv" file_path = self._get_csv_file_path(file_name) expected_docs += [ Document( page_content="column1: value1", metadata={"source": file_path, "row": 0}, ), Document( page_content="column1: value2", metadata={"source": file_path, "row": 1}, ), Document( page_content="column1: value3", metadata={"source": file_path, "row": 2}, ), ] file_name = "test_one_row.csv" file_path = self._get_csv_file_path(file_name) expected_docs += [ Document( page_content="column1: value1\ncolumn2: value2\ncolumn3: value3", metadata={"source": file_path, "row": 0}, ) ] file_name = "test_none_col.csv" file_path = self._get_csv_file_path(file_name) expected_docs += [ Document( page_content="column1: value1\ncolumn2: value2\n" "column3: value3\nNone: value4,value5", metadata={"source": file_path, "row": 0}, ), Document( page_content="column1: value6\ncolumn2: value7\n" "column3: value8\nNone: value9", metadata={"source": file_path, "row": 1}, ), ] # Assert loader = DirectoryLoader(dir_path, loader_cls=CSVLoader) loaded_docs = [] for doc in loader.lazy_load(): assert doc in expected_docs loaded_docs.append(doc) assert len(loaded_docs) == len(expected_docs) # utility functions def _get_csv_file_path(self, file_name: str) -> str: return str(Path(__file__).resolve().parent / "test_docs" / "csv" / file_name) def _get_csv_dir_path(self) -> str: return str(Path(__file__).resolve().parent / "test_docs" / "csv")
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_bibtex.py
from pathlib import Path import pytest from langchain_community.document_loaders.bibtex import BibtexLoader BIBTEX_EXAMPLE_FILE = Path(__file__).parent / "sample_documents" / "bibtex.bib" @pytest.mark.requires("fitz", "bibtexparser") def test_load_success() -> None: """Test that returns one document""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE)) docs = loader.load() assert len(docs) == 1 doc = docs[0] assert doc.page_content assert set(doc.metadata) == { "id", "published_year", "title", "publication", "authors", "abstract", } @pytest.mark.requires("fitz", "bibtexparser") def test_load_max_content_chars() -> None: """Test that cuts off document contents at max_content_chars.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), max_content_chars=10) doc = loader.load()[0] assert len(doc.page_content) == 10 @pytest.mark.requires("fitz", "bibtexparser") def test_load_load_extra_metadata() -> None: """Test that returns extra metadata fields.""" loader = BibtexLoader(file_path=str(BIBTEX_EXAMPLE_FILE), load_extra_metadata=True) doc = loader.load()[0] assert set(doc.metadata) == { "id", "published_year", "title", "publication", "authors", "abstract", "booktitle", "editor", "organization", } @pytest.mark.requires("fitz", "bibtexparser") def test_load_file_pattern() -> None: """Test that returns no documents when json file pattern specified.""" loader = BibtexLoader( file_path=str(BIBTEX_EXAMPLE_FILE), file_pattern=r"[^:]+\.json" ) docs = loader.load() assert len(docs) == 0
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_rspace_loader.py
import unittest from langchain_community.document_loaders.rspace import RSpaceLoader class TestRSpaceLoader(unittest.TestCase): url: str = "https://community.researchspace.com" api_key: str = "myapikey" global_id: str = "SD12345" def test_valid_arguments(self) -> None: loader = RSpaceLoader( url=TestRSpaceLoader.url, api_key=TestRSpaceLoader.api_key, global_id=TestRSpaceLoader.global_id, ) self.assertEqual(TestRSpaceLoader.url, loader.url) # add assertion here self.assertEqual(TestRSpaceLoader.api_key, loader.api_key) # add assertion here self.assertEqual( TestRSpaceLoader.global_id, loader.global_id ) # add assertion here def test_missing_apikey_raises_validation_error(self) -> None: with self.assertRaises(ValueError) as cm: RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader.global_id) e = cm.exception self.assertRegex(str(e), r"Did not find api_key") def test_missing_url_raises_validation_error(self) -> None: with self.assertRaises(ValueError) as cm: RSpaceLoader( api_key=TestRSpaceLoader.api_key, global_id=TestRSpaceLoader.global_id ) e = cm.exception self.assertRegex(str(e), r"Did not find url")
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_youtube.py
import pytest from langchain_core.documents import Document from langchain_community.document_loaders import YoutubeLoader from langchain_community.document_loaders.youtube import TranscriptFormat @pytest.mark.parametrize( "youtube_url, expected_video_id", [ ("http://www.youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"), ("http://youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"), ("http://m.youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"), ("http://youtu.be/-wtIMTCHWuI", "-wtIMTCHWuI"), ("https://youtu.be/-wtIMTCHWuI", "-wtIMTCHWuI"), ("https://www.youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"), ("https://m.youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"), ("https://youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"), ("http://youtu.be/lalOy8Mbfdc?t=1", "lalOy8Mbfdc"), ("http://youtu.be/lalOy8Mbfdc?t=1s", "lalOy8Mbfdc"), ("https://youtu.be/lalOy8Mbfdc?t=1", "lalOy8Mbfdc"), ("http://www.youtube-nocookie.com/embed/lalOy8Mbfdc?rel=0", "lalOy8Mbfdc"), ("https://youtu.be/lalOy8Mbfdc?t=1s", "lalOy8Mbfdc"), ("https://www.youtube.com/shorts/cd0Fy92_w_s", "cd0Fy92_w_s"), ], ) def test_video_id_extraction(youtube_url: str, expected_video_id: str) -> None: """Test that the video id is extracted from a youtube url""" assert YoutubeLoader.extract_video_id(youtube_url) == expected_video_id def test__get_transcript_chunks() -> None: test_transcript_pieces = [ {"text": "♪ Hail to the victors valiant ♪", "start": 3.719, "duration": 5.0}, {"text": "♪ Hail to the conquering heroes ♪", "start": 8.733, "duration": 5.0}, {"text": "♪ Hail, hail to Michigan ♪", "start": 14.541, "duration": 5.0}, {"text": "♪ The leaders and best ♪", "start": 19.785, "duration": 5.0}, {"text": "♪ Hail to the victors valiant ♪", "start": 25.661, "duration": 4.763}, {"text": "♪ Hail to the conquering heroes ♪", "start": 30.424, "duration": 5.0}, {"text": "♪ Hail, hail to Michigan ♪", "start": 36.37, "duration": 4.91}, {"text": "♪ The champions of the west ♪", "start": 41.28, "duration": 2.232}, {"text": "♪ Hail to the victors valiant ♪", "start": 43.512, "duration": 4.069}, { "text": "♪ Hail to the conquering heroes ♪", "start": 47.581, "duration": 4.487, }, {"text": "♪ Hail, hail to Michigan ♪", "start": 52.068, "duration": 4.173}, {"text": "♪ The leaders and best ♪", "start": 56.241, "duration": 4.542}, {"text": "♪ Hail to victors valiant ♪", "start": 60.783, "duration": 3.944}, { "text": "♪ Hail to the conquering heroes ♪", "start": 64.727, "duration": 4.117, }, {"text": "♪ Hail, hail to Michigan ♪", "start": 68.844, "duration": 3.969}, {"text": "♪ The champions of the west ♪", "start": 72.813, "duration": 4.232}, {"text": "(choir clapping rhythmically)", "start": 77.045, "duration": 3.186}, {"text": "- Go blue!", "start": 80.231, "duration": 0.841}, {"text": "(choir clapping rhythmically)", "start": 81.072, "duration": 3.149}, {"text": "Go blue!", "start": 84.221, "duration": 0.919}, {"text": "♪ It's great to be ♪", "start": 85.14, "duration": 1.887}, { "text": "♪ A Michigan Wolverine ♪\n- Go blue!", "start": 87.027, "duration": 2.07, }, {"text": "♪ It's great to be ♪", "start": 89.097, "duration": 1.922}, { "text": "♪ A Michigan Wolverine ♪\n- Go blue!", "start": 91.019, "duration": 2.137, }, { "text": "♪ It's great to be ♪\n(choir scatting)", "start": 93.156, "duration": 1.92, }, { "text": "♪ a Michigan Wolverine ♪\n(choir scatting)", "start": 95.076, "duration": 2.118, }, { "text": "♪ It's great to be ♪\n(choir scatting)", "start": 97.194, "duration": 1.85, }, { "text": "♪ A Michigan ♪\n(choir scatting)", "start": 99.044, "duration": 1.003, }, {"text": "- Let's go blue!", "start": 100.047, "duration": 1.295}, { "text": "♪ Hail to the victors valiant ♪", "start": 101.342, "duration": 1.831, }, { "text": "♪ Hail to the conquering heroes ♪", "start": 103.173, "duration": 2.21, }, {"text": "♪ Hail, hail to Michigan ♪", "start": 105.383, "duration": 1.964}, {"text": "♪ The leaders and best ♪", "start": 107.347, "duration": 2.21}, { "text": "♪ Hail to the victors valiant ♪", "start": 109.557, "duration": 1.643, }, { "text": "♪ Hail to the conquering heroes ♪", "start": 111.2, "duration": 2.129, }, {"text": "♪ Hail, hail to Michigan ♪", "start": 113.329, "duration": 2.091}, {"text": "♪ The champions of the west ♪", "start": 115.42, "duration": 2.254}, { "text": "♪ Hail to the victors valiant ♪", "start": 117.674, "duration": 4.039, }, { "text": "♪ Hail to the conquering heroes ♪", "start": 121.713, "duration": 4.103, }, { "text": "♪ Hail to the blue, hail to the blue ♪", "start": 125.816, "duration": 1.978, }, { "text": "♪ Hail to the blue, hail to the blue ♪", "start": 127.794, "duration": 2.095, }, { "text": "♪ Hail to the blue, hail to the blue ♪", "start": 129.889, "duration": 1.932, }, { "text": "♪ Hail to the blue, hail to the blue ♪", "start": 131.821, "duration": 2.091, }, { "text": "♪ Hail to the blue, hail to the blue ♪", "start": 133.912, "duration": 2.109, }, {"text": "♪ Hail to the blue, hail ♪", "start": 136.021, "duration": 3.643}, {"text": "♪ To Michigan ♪", "start": 139.664, "duration": 4.105}, {"text": "♪ The champions of the west ♪", "start": 143.769, "duration": 3.667}, {"text": "♪ Go blue ♪", "start": 154.122, "duration": 2.167}, ] test_transcript_chunks = [ Document( page_content="♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪ ♪ The leaders and best ♪", # noqa: E501 metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=0s", "start_seconds": 0, "start_timestamp": "00:00:00", }, ), Document( page_content="♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪ ♪ The champions of the west ♪ ♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪", # noqa: E501 metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=30s", "start_seconds": 30, "start_timestamp": "00:00:30", }, ), Document( page_content="♪ The leaders and best ♪ ♪ Hail to victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪ ♪ The champions of the west ♪ (choir clapping rhythmically) - Go blue! (choir clapping rhythmically) Go blue! ♪ It's great to be ♪ ♪ A Michigan Wolverine ♪\n- Go blue!", # noqa: E501 metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=60s", "start_seconds": 60, "start_timestamp": "00:01:00", }, ), Document( page_content="♪ It's great to be ♪ ♪ A Michigan Wolverine ♪\n- Go blue! ♪ It's great to be ♪\n(choir scatting) ♪ a Michigan Wolverine ♪\n(choir scatting) ♪ It's great to be ♪\n(choir scatting) ♪ A Michigan ♪\n(choir scatting) - Let's go blue! ♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪ ♪ The leaders and best ♪ ♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail, hail to Michigan ♪ ♪ The champions of the west ♪", # noqa: E501 metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=90s", "start_seconds": 90, "start_timestamp": "00:01:30", }, ), Document( page_content="♪ Hail to the victors valiant ♪ ♪ Hail to the conquering heroes ♪ ♪ Hail to the blue, hail to the blue ♪ ♪ Hail to the blue, hail to the blue ♪ ♪ Hail to the blue, hail to the blue ♪ ♪ Hail to the blue, hail to the blue ♪ ♪ Hail to the blue, hail to the blue ♪ ♪ Hail to the blue, hail ♪ ♪ To Michigan ♪ ♪ The champions of the west ♪", # noqa: E501 metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=120s", "start_seconds": 120, "start_timestamp": "00:02:00", }, ), Document( page_content="♪ Go blue ♪", metadata={ "source": "https://www.youtube.com/watch?v=TKCMw0utiak&t=150s", "start_seconds": 150, "start_timestamp": "00:02:30", }, ), ] ytl = YoutubeLoader( "TKCMw0utiak", transcript_format=TranscriptFormat.CHUNKS, chunk_size_seconds=30, ) assert ( list(ytl._get_transcript_chunks(test_transcript_pieces)) == test_transcript_chunks )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_onenote.py
import os from typing import Any from unittest.mock import Mock import pytest from _pytest.monkeypatch import MonkeyPatch from langchain_core.documents import Document from pytest_mock import MockerFixture from langchain_community.document_loaders.onenote import OneNoteLoader def test_initialization() -> None: os.environ["MS_GRAPH_CLIENT_ID"] = "CLIENT_ID" os.environ["MS_GRAPH_CLIENT_SECRET"] = "CLIENT_SECRET" loader = OneNoteLoader( notebook_name="test_notebook", section_name="test_section", page_title="test_title", access_token="access_token", ) assert loader.notebook_name == "test_notebook" assert loader.section_name == "test_section" assert loader.page_title == "test_title" assert loader.access_token == "access_token" assert loader._headers == { "Authorization": "Bearer access_token", } @pytest.mark.requires("bs4") def test_load(mocker: MockerFixture) -> None: os.environ["MS_GRAPH_CLIENT_ID"] = "CLIENT_ID" os.environ["MS_GRAPH_CLIENT_SECRET"] = "CLIENT_SECRET" mocker.patch( "requests.get", return_value=mocker.MagicMock(json=lambda: {"value": []}, links=None), ) loader = OneNoteLoader( notebook_name="test_notebook", section_name="test_section", page_title="test_title", access_token="access_token", ) documents = loader.load() assert documents == [] mocker.patch( "langchain_community.document_loaders.onenote.OneNoteLoader._get_page_content", return_value=( "<html><head><title>Test Title</title></head>" "<body><p>Test Content</p></body></html>" ), ) loader = OneNoteLoader( object_ids=["test_id"], access_token="access_token", ) documents = loader.load() assert documents == [ Document( page_content="Test Title\nTest Content", metadata={"title": "Test Title"} ) ] class FakeConfidentialClientApplication(Mock): def get_authorization_request_url(self, *args: Any, **kwargs: Any) -> str: return "fake_authorization_url" @pytest.mark.requires("msal") def test_msal_import(monkeypatch: MonkeyPatch, mocker: MockerFixture) -> None: os.environ["MS_GRAPH_CLIENT_ID"] = "CLIENT_ID" os.environ["MS_GRAPH_CLIENT_SECRET"] = "CLIENT_SECRET" monkeypatch.setattr("builtins.input", lambda _: "invalid_url") mocker.patch( "msal.ConfidentialClientApplication", return_value=FakeConfidentialClientApplication(), ) loader = OneNoteLoader( notebook_name="test_notebook", section_name="test_section", page_title="test_title", ) with pytest.raises(IndexError): loader._auth() def test_url() -> None: os.environ["MS_GRAPH_CLIENT_ID"] = "CLIENT_ID" os.environ["MS_GRAPH_CLIENT_SECRET"] = "CLIENT_SECRET" loader = OneNoteLoader( notebook_name="test_notebook", section_name="test_section", page_title="test_title", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$expand=parentNotebook,parentSection" "&$filter=parentNotebook/displayName%20eq%20'test_notebook'" "%20and%20parentSection/displayName%20eq%20'test_section'" "%20and%20title%20eq%20'test_title'" ) loader = OneNoteLoader( notebook_name="test_notebook", section_name="test_section", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$expand=parentNotebook,parentSection" "&$filter=parentNotebook/displayName%20eq%20'test_notebook'" "%20and%20parentSection/displayName%20eq%20'test_section'" ) loader = OneNoteLoader( notebook_name="test_notebook", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$expand=parentNotebook" "&$filter=parentNotebook/displayName%20eq%20'test_notebook'" ) loader = OneNoteLoader( section_name="test_section", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$expand=parentSection" "&$filter=parentSection/displayName%20eq%20'test_section'" ) loader = OneNoteLoader( section_name="test_section", page_title="test_title", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$expand=parentSection" "&$filter=parentSection/displayName%20eq%20'test_section'" "%20and%20title%20eq%20'test_title'" ) loader = OneNoteLoader( page_title="test_title", access_token="access_token", onenote_api_base_url="https://graph.microsoft.com/v1.0/me/onenote", ) assert loader._url == ( "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id" "&$filter=title%20eq%20'test_title'" )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_arcgis_loader.py
from unittest.mock import MagicMock, patch import pytest from langchain_community.document_loaders import ArcGISLoader @pytest.fixture def arcgis_mocks(mock_feature_layer, mock_gis): # type: ignore sys_modules = { "arcgis": MagicMock(), "arcgis.features.FeatureLayer": mock_feature_layer, "arcgis.gis.GIS": mock_gis, } with patch.dict("sys.modules", sys_modules): yield @pytest.fixture def mock_feature_layer(): # type: ignore feature_layer = MagicMock() feature_layer.query.return_value = [ MagicMock(as_dict={"attributes": {"field": "value"}}) ] feature_layer.url = "https://example.com/layer_url" feature_layer.properties = { "description": "<html><body>Some HTML content</body></html>", "name": "test", "serviceItemId": "testItemId", } return feature_layer @pytest.fixture def mock_gis(): # type: ignore gis = MagicMock() gis.content.get.return_value = MagicMock(description="Item description") return gis def test_lazy_load(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) loader.BEAUTIFULSOUP = None documents = list(loader.lazy_load()) assert len(documents) == 1 assert documents[0].metadata["url"] == "https://example.com/layer_url" # Add more assertions based on your expected behavior def test_initialization_with_string_layer( # type: ignore arcgis_mocks, mock_feature_layer, mock_gis ): layer_url = "https://example.com/layer_url" with patch("arcgis.features.FeatureLayer", return_value=mock_feature_layer): loader = ArcGISLoader(layer=layer_url, gis=mock_gis) assert loader.url == layer_url def test_layer_description_provided_by_user( # type: ignore arcgis_mocks, mock_feature_layer, mock_gis ): custom_description = "Custom Layer Description" loader = ArcGISLoader( layer=mock_feature_layer, gis=mock_gis, lyr_desc=custom_description ) layer_properties = loader._get_layer_properties(lyr_desc=custom_description) assert layer_properties["layer_description"] == custom_description def test_initialization_without_arcgis(mock_feature_layer, mock_gis): # type: ignore with patch.dict("sys.modules", {"arcgis": None}): with pytest.raises( ImportError, match="arcgis is required to use the ArcGIS Loader" ): ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) def test_get_layer_properties_with_description( # type: ignore arcgis_mocks, mock_feature_layer, mock_gis ): loader = ArcGISLoader( layer=mock_feature_layer, gis=mock_gis, lyr_desc="Custom Description" ) props = loader._get_layer_properties("Custom Description") assert props["layer_description"] == "Custom Description" def test_load_method(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis) documents = loader.load() assert len(documents) == 1 def test_geometry_returned(arcgis_mocks, mock_feature_layer, mock_gis): # type: ignore mock_feature_layer.query.return_value = [ MagicMock( as_dict={ "attributes": {"field": "value"}, "geometry": {"type": "point", "coordinates": [0, 0]}, } ) ] loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=True) documents = list(loader.lazy_load()) assert "geometry" in documents[0].metadata def test_geometry_not_returned( # type: ignore arcgis_mocks, mock_feature_layer, mock_gis ): loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, return_geometry=False) documents = list(loader.lazy_load()) assert "geometry" not in documents[0].metadata
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_couchbase.py
"""Test importing the Couchbase document loader.""" def test_couchbase_import() -> None: """Test that the Couchbase document loader can be imported.""" from langchain_community.document_loaders import CouchbaseLoader # noqa: F401
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_mediawikidump.py
from pathlib import Path import pytest from langchain_community.document_loaders.mediawikidump import MWDumpLoader PARENT_DIR = Path(__file__).parent / "sample_documents" @pytest.mark.requires("mwparserfromhell", "mwxml") def test_loading_flawed_xml() -> None: loader = MWDumpLoader((PARENT_DIR / "mwtest_current_pages.xml").absolute()) with pytest.raises(TypeError): loader.load() @pytest.mark.requires("mwparserfromhell", "mwxml") def test_skipping_errors() -> None: loader = MWDumpLoader( file_path=(PARENT_DIR / "mwtest_current_pages.xml").absolute(), stop_on_error=False, ) documents = loader.load() assert len(documents) == 3 @pytest.mark.requires("mwparserfromhell", "mwxml") def test_skipping_redirects() -> None: loader = MWDumpLoader( file_path=(PARENT_DIR / "mwtest_current_pages.xml").absolute(), skip_redirects=True, stop_on_error=False, ) documents = loader.load() assert len(documents) == 2 @pytest.mark.requires("mwparserfromhell", "mwxml") def test_multiple_namespaces() -> None: loader = MWDumpLoader( file_path=(PARENT_DIR / "mwtest_current_pages.xml").absolute(), namespaces=[0, 6], skip_redirects=True, stop_on_error=False, ) documents = loader.load() [print(doc) for doc in documents] # noqa: T201 assert len(documents) == 2
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_psychic.py
from typing import Dict from unittest.mock import MagicMock, patch import pytest from langchain_core.documents import Document from langchain_community.document_loaders.psychic import PsychicLoader @pytest.fixture def mock_psychic(): # type: ignore with patch("psychicapi.Psychic") as mock_psychic: yield mock_psychic @pytest.fixture def mock_connector_id(): # type: ignore with patch("psychicapi.ConnectorId") as mock_connector_id: yield mock_connector_id @pytest.mark.requires("psychicapi") class TestPsychicLoader: MOCK_API_KEY: str = "api_key" MOCK_CONNECTOR_ID: str = "notion" MOCK_ACCOUNT_ID: str = "account_id" def test_psychic_loader_initialization( self, mock_psychic: MagicMock, mock_connector_id: MagicMock ) -> None: PsychicLoader( api_key=self.MOCK_API_KEY, connector_id=self.MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID, ) mock_psychic.assert_called_once_with(secret_key=self.MOCK_API_KEY) mock_connector_id.assert_called_once_with(self.MOCK_CONNECTOR_ID) def test_psychic_loader_load_data(self, mock_psychic: MagicMock) -> None: mock_get_documents_response = MagicMock() mock_get_documents_response.documents = [ self._get_mock_document("123"), self._get_mock_document("456"), ] mock_get_documents_response.next_page_cursor = None mock_psychic.get_documents.return_value = mock_get_documents_response psychic_loader = self._get_mock_psychic_loader(mock_psychic) documents = psychic_loader.load() assert mock_psychic.get_documents.call_count == 1 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].page_content == "Content 123" assert documents[1].page_content == "Content 456" def _get_mock_psychic_loader(self, mock_psychic: MagicMock) -> PsychicLoader: psychic_loader = PsychicLoader( api_key=self.MOCK_API_KEY, connector_id=self.MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID, ) psychic_loader.psychic = mock_psychic return psychic_loader def _get_mock_document(self, uri: str) -> Dict: return {"uri": f"{uri}", "title": f"Title {uri}", "content": f"Content {uri}"}
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_notebook.py
import json from pytest_mock import MockerFixture from langchain_community.document_loaders.notebook import NotebookLoader def test_initialization() -> None: loader = NotebookLoader(path="./testfile.ipynb") assert loader.file_path == "./testfile.ipynb" def test_load_no_outputs(mocker: MockerFixture) -> None: mock_notebook_content = { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": ["# Test notebook\n", "This is a test notebook."], "outputs": [ { "name": "stdout", "output_type": "stream", "text": ["Hello World!\n"], } ], } ] } mocked_cell_type = mock_notebook_content["cells"][0]["cell_type"] mocked_source = mock_notebook_content["cells"][0]["source"] # Convert the mock notebook content to a JSON string mock_notebook_content_str = json.dumps(mock_notebook_content) # Mock the open function & json.load functions mocker.patch("builtins.open", mocker.mock_open(read_data=mock_notebook_content_str)) mocker.patch("json.load", return_value=mock_notebook_content) loader = NotebookLoader(path="./testfile.ipynb") docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == f"'{mocked_cell_type}' cell: '{mocked_source}'\n\n" assert docs[0].metadata == {"source": "testfile.ipynb"} def test_load_with_outputs(mocker: MockerFixture) -> None: mock_notebook_content: dict = { "cells": [ { "cell_type": "code", "metadata": {}, "source": ["# Test notebook\n", "This is a test notebook."], "outputs": [ { "name": "stdout", "output_type": "stream", "text": ["Hello World!\n"], } ], } ] } mocked_cell_type = mock_notebook_content["cells"][0]["cell_type"] mocked_source = mock_notebook_content["cells"][0]["source"] mocked_output = mock_notebook_content["cells"][0]["outputs"][0]["text"] # Convert the mock notebook content to a JSON string mock_notebook_content_str = json.dumps(mock_notebook_content) # Mock the open function & json.load functions mocker.patch("builtins.open", mocker.mock_open(read_data=mock_notebook_content_str)) mocker.patch("json.load", return_value=mock_notebook_content) loader = NotebookLoader(path="./testfile.ipynb", include_outputs=True) docs = loader.load() assert len(docs) == 1 expected_content = ( f"'{mocked_cell_type}' cell: '{mocked_source}'\n" f" with output: '{mocked_output}'\n\n" ) assert docs[0].page_content == expected_content assert docs[0].metadata == {"source": "testfile.ipynb"}
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_cube_semantic.py
import unittest from unittest.mock import MagicMock, Mock, patch from langchain_community.document_loaders import CubeSemanticLoader MODULE_PATH = "langchain_community.document_loaders.cube_semantic.CubeSemanticLoader" class TestCubeSemanticLoader(unittest.TestCase): def setUp(self) -> None: self.loader = CubeSemanticLoader( cube_api_url="http://example.com", cube_api_token="test_token" ) @patch("requests.request") def test_get_dimension_values(self, mock_request: MagicMock) -> None: mock_response = Mock() mock_response.status_code = 200 mock_response.json.return_value = {"data": [{"test_dimension": "value1"}]} mock_request.return_value = mock_response values = self.loader._get_dimension_values("test_dimension") self.assertEqual(values, ["value1"]) @patch("requests.get") @patch(f"{MODULE_PATH}._get_dimension_values") def test_load( self, mock_get_dimension_values: MagicMock, mock_get: MagicMock ) -> None: # Mocking the response mock_response = Mock() mock_response.raise_for_status.return_value = None mock_response.json.return_value = { "cubes": [ { "name": "test_cube", "type": "view", "public": True, "measures": [], "dimensions": [ { "name": "test_dimension", "type": "string", "title": "Test Title", "description": "Test Description", } ], } ] } mock_get.return_value = mock_response mock_get_dimension_values.return_value = ["value1", "value2"] documents = self.loader.load() self.assertEqual(len(documents), 1) self.assertEqual(documents[0].page_content, "Test Title, Test Description") self.assertEqual(documents[0].metadata["column_values"], ["value1", "value2"]) if __name__ == "__main__": unittest.main()
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/telegram_channel.json
[ { "sender_id": -1111111, "text": "Hello, world!", "date": "2023-05-15T19:30:49+00:00", "message.id": 1785, "is_reply": false, "reply_to_id": null }, { "sender_id": -1111111, "text": "Telegram is the best!", "date": "2023-05-08T20:17:10+00:00", "message.id": 1784, "is_reply": true, "reply_to_id": 1783 }, { "sender_id": -1111111, "text": "Langchain is great.", "date": "2023-05-03T23:43:33+00:00", "message.id": 1783, "is_reply": true, "reply_to_id": 1782 }, { "sender_id": -1111111, "text": "LLMs are awesome!", "date": "2023-05-03T15:32:25+00:00", "message.id": 1782, "is_reply": false, "reply_to_id": null } ]
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/telegram.json
{ "name": "Grace 🧤", "type": "personal_chat", "id": 2730825451, "messages": [ { "id": 1980499, "type": "message", "date": "2020-01-01T00:00:02", "from": "Henry", "from_id": 4325636679, "text": "It's 2020..." }, { "id": 1980500, "type": "message", "date": "2020-01-01T00:00:04", "from": "Henry", "from_id": 4325636679, "text": "Fireworks!" }, { "id": 1980501, "type": "message", "date": "2020-01-01T00:00:05", "from": "Grace 🧤 🍒", "from_id": 4720225552, "text": "You're a minute late!" } ] }
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs/div_role_main/test.html
<html> <div role="main"> Hello World! </div> </html>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs/index_page/test.html
<html> <main id="main-content"> Websites: <a href="https://langchain.com">Langchain</a> <a href="https://docs.langchain.com">Langchain Docs</a> <a href="https://api.python.langchain.com/en/latest/api_reference.html" >Langchain API Reference</a > </main> </html>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs/nested_html_structure/test.html
<html> <main id="main-content"> Hello <span><em>World</em>!</span> </main> </html>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs/custom/test.html
<html> <article role="main"> Hello World! </article> </html>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/readthedocs/main_id_main_content/test.html
<html> <main id="main-content"> Hello World! </main> </html>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_nominal.csv
column1,column2,column3 value1,value2,value3 value4,value5,value6
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_one_col.csv
column1 value1 value2 value3
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_one_row.csv
column1,column2,column3 value1,value2,value3
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_none_col.csv
column1,column2,column3 value1,value2,value3,value4,value5 value6,value7,value8,value9
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_filesystem_blob_loader.py
"""Verify that file system blob loader works as expected.""" import os import tempfile from pathlib import Path from typing import Generator import pytest from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader @pytest.fixture def toy_dir() -> Generator[Path, None, None]: """Yield a pre-populated directory to test the blob loader.""" with tempfile.TemporaryDirectory() as temp_dir: # Create test.txt with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt: test_txt.write("This is a test.txt file.") # Create test.html with open(os.path.join(temp_dir, "test.html"), "w") as test_html: test_html.write( "<html><body><h1>This is a test.html file.</h1></body></html>" ) # Create .hidden_file with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file: hidden_file.write("This is a hidden file.") # Create some_dir/nested_file.txt some_dir = os.path.join(temp_dir, "some_dir") os.makedirs(some_dir) with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file: nested_file.write("This is a nested_file.txt file.") # Create some_dir/other_dir/more_nested.txt other_dir = os.path.join(some_dir, "other_dir") os.makedirs(other_dir) with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file: nested_file.write("This is a more_nested.txt file.") yield Path(temp_dir) _TEST_CASES = [ { "glob": "**/[!.]*", "suffixes": None, "exclude": (), "relative_filenames": [ "test.html", "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "*", "suffixes": None, "exclude": (), "relative_filenames": ["test.html", "test.txt", ".hidden_file"], }, { "glob": "**/*.html", "suffixes": None, "exclude": (), "relative_filenames": ["test.html"], }, { "glob": "*/*.txt", "suffixes": None, "exclude": (), "relative_filenames": ["some_dir/nested_file.txt"], }, { "glob": "**/*.txt", "suffixes": None, "exclude": (), "relative_filenames": [ "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "**/*", "suffixes": [".txt"], "exclude": (), "relative_filenames": [ "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "meeeeeeow", "suffixes": None, "exclude": (), "relative_filenames": [], }, { "glob": "*", "suffixes": [".html", ".txt"], "exclude": (), "relative_filenames": ["test.html", "test.txt"], }, # Using exclude patterns { "glob": "**/*", "suffixes": [".txt"], "exclude": ("some_dir/*",), "relative_filenames": ["test.txt", "some_dir/other_dir/more_nested.txt"], }, # Using 2 exclude patterns, one of which is recursive { "glob": "**/*", "suffixes": None, "exclude": ("**/*.txt", ".hidden*"), "relative_filenames": ["test.html"], }, ] @pytest.mark.parametrize("params", _TEST_CASES) def test_file_names_exist(toy_dir: str, params: dict) -> None: """Verify that the file names exist.""" glob_pattern = params["glob"] suffixes = params["suffixes"] exclude = params["exclude"] relative_filenames = params["relative_filenames"] loader = FileSystemBlobLoader( toy_dir, glob=glob_pattern, suffixes=suffixes, exclude=exclude ) blobs = list(loader.yield_blobs()) file_names = sorted(str(blob.path) for blob in blobs) expected_filenames = sorted( str(Path(toy_dir) / relative_filename) for relative_filename in relative_filenames ) assert file_names == expected_filenames assert loader.count_matching_files() == len(relative_filenames) @pytest.mark.requires("tqdm") def test_show_progress(toy_dir: str) -> None: """Verify that file system loader works with a progress bar.""" loader = FileSystemBlobLoader(toy_dir) blobs = list(loader.yield_blobs()) assert len(blobs) == loader.count_matching_files()
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py
from langchain_community.document_loaders.blob_loaders import __all__ def test_public_api() -> None: """Hard-code public API to help determine if we have broken it.""" assert sorted(__all__) == [ "Blob", "BlobLoader", "CloudBlobLoader", "FileSystemBlobLoader", "YoutubeAudioLoader", ]
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_cloud_blob_loader.py
"""Verify that file system blob loader works as expected.""" import os import tempfile from typing import Generator from urllib.parse import urlparse import pytest from langchain_community.document_loaders.blob_loaders import CloudBlobLoader @pytest.fixture def toy_dir() -> Generator[str, None, None]: """Yield a pre-populated directory to test the blob loader.""" with tempfile.TemporaryDirectory() as temp_dir: # Create test.txt with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt: test_txt.write("This is a test.txt file.") # Create test.html with open(os.path.join(temp_dir, "test.html"), "w") as test_html: test_html.write( "<html><body><h1>This is a test.html file.</h1></body></html>" ) # Create .hidden_file with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file: hidden_file.write("This is a hidden file.") # Create some_dir/nested_file.txt some_dir = os.path.join(temp_dir, "some_dir") os.makedirs(some_dir) with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file: nested_file.write("This is a nested_file.txt file.") # Create some_dir/other_dir/more_nested.txt other_dir = os.path.join(some_dir, "other_dir") os.makedirs(other_dir) with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file: nested_file.write("This is a more_nested.txt file.") yield f"file://{temp_dir}" # @pytest.fixture # @pytest.mark.requires("boto3") # def toy_dir() -> str: # return "s3://ppr-langchain-test" _TEST_CASES = [ { "glob": "**/[!.]*", "suffixes": None, "exclude": (), "relative_filenames": [ "test.html", "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "*", "suffixes": None, "exclude": (), "relative_filenames": ["test.html", "test.txt", ".hidden_file"], }, { "glob": "**/*.html", "suffixes": None, "exclude": (), "relative_filenames": ["test.html"], }, { "glob": "*/*.txt", "suffixes": None, "exclude": (), "relative_filenames": ["some_dir/nested_file.txt"], }, { "glob": "**/*.txt", "suffixes": None, "exclude": (), "relative_filenames": [ "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "**/*", "suffixes": [".txt"], "exclude": (), "relative_filenames": [ "test.txt", "some_dir/nested_file.txt", "some_dir/other_dir/more_nested.txt", ], }, { "glob": "meeeeeeow", "suffixes": None, "exclude": (), "relative_filenames": [], }, { "glob": "*", "suffixes": [".html", ".txt"], "exclude": (), "relative_filenames": ["test.html", "test.txt"], }, # Using exclude patterns { "glob": "**/*", "suffixes": [".txt"], "exclude": ("some_dir/*",), "relative_filenames": ["test.txt", "some_dir/other_dir/more_nested.txt"], }, # Using 2 exclude patterns, one of which is recursive { "glob": "**/*", "suffixes": None, "exclude": ("**/*.txt", ".hidden*"), "relative_filenames": ["test.html"], }, ] @pytest.mark.requires("cloudpathlib") @pytest.mark.parametrize("params", _TEST_CASES) def test_file_names_exist(toy_dir: str, params: dict) -> None: """Verify that the file names exist.""" glob_pattern = params["glob"] suffixes = params["suffixes"] exclude = params["exclude"] relative_filenames = params["relative_filenames"] loader = CloudBlobLoader( toy_dir, glob=glob_pattern, suffixes=suffixes, exclude=exclude ) blobs = list(loader.yield_blobs()) url_parsed = urlparse(toy_dir) scheme = "" if url_parsed.scheme == "file": scheme = "file://" file_names = sorted(f"{scheme}{blob.path}" for blob in blobs) expected_filenames = sorted( str(toy_dir + "/" + relative_filename) for relative_filename in relative_filenames ) assert file_names == expected_filenames assert loader.count_matching_files() == len(relative_filenames) @pytest.mark.requires("cloudpathlib") def test_show_progress(toy_dir: str) -> None: """Verify that file system loader works with a progress bar.""" loader = CloudBlobLoader(toy_dir) blobs = list(loader.yield_blobs()) assert len(blobs) == loader.count_matching_files()
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook_2.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230611T011239Z" application="Evernote" version="10.56.9"> <note> <title>Summer Training Program</title> <created>20221227T015948Z</created> <source>mobile.iphone</source> <note-attributes> <author>Mike McGarry</author> </note-attributes> <content> <![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div><b>Jan - March 2022</b></div></en-note> ]]> </content> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook_missingcontenttag.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230611T011239Z" application="Evernote" version="10.56.9"> <note> <title>Summer Training Program</title> <created>20221227T015948Z</created> <source>mobile.iphone</source> <note-attributes> <author>Mike McGarry</author> </note-attributes> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook_emptynote.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230611T011239Z" application="Evernote" version="10.56.9"> <note> <title>Summer Training Program</title> <created>20221227T015948Z</created> <source>mobile.iphone</source> <note-attributes> <author>Mike McGarry</author> </note-attributes> <content> </content> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/bibtex.bib
@inproceedings{shen2021layoutparser, title = {LayoutParser: A unified toolkit for deep learning based document image analysis}, author = {Shen, Zejiang and Zhang, Ruochen and Dell, Melissa and Lee, Benjamin Charles Germain and Carlson, Jacob and Li, Weining}, booktitle = {Document Analysis and Recognition--ICDAR 2021: 16th International Conference, Lausanne, Switzerland, September 5--10, 2021, Proceedings, Part I 16}, pages = {131--146}, year = {2021}, organization = {Springer}, editor = {Llad{\'o}s, Josep and Lopresti, Daniel and Uchida, Seiichi}, file = {layout-parser-paper.pdf}, abstract = {{Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.", isbn="978-3-030-86549-8}}, }
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook_with_media.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230619T022508Z" application="Evernote" version="10.56.9"> <note> <title>Tea Mug Design</title> <created>20180719T085818Z</created> <updated>20230513T110142Z</updated> <note-attributes> <author>Michael McGarry</author> <latitude>43.777825</latitude> <longitude>11.249122222222221</longitude> <source>mobile.iphone</source> </note-attributes> <content> <![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div><br/></div><en-media hash="8ab763800efcb0865f5d55e8a0e43eb2" type="image/jpeg" style="--en-naturalWidth:200; --en-naturalHeight:135;" /><div><br/></div><div>When you pick this mug up with your thumb on top and middle finger through the loop, your ring finger slides into the mug under the loop where it is too hot to touch and burns you.</div><div><br/></div><div>If you try and pick it up with your thumb and index finger you can’t hold the mug. </div><div><br/></div></en-note> ]]> </content> <resource> <data encoding="base64"> /9j/4AAQSkZJRgABAQEASABIAAD/4SGyRXhpZgAASUkqAAgAAAALAA8BAgAGAAAAkgAAABABAgAKAAAAmAAAABIBAwABAAAAAQAAABoBBQABAAAAogAAABsBBQABAAAAqgAAACgBAwABAAAAAgAAADEBAgANAAAAsgAAADIBAgAUAAAAwAAAABMCAwABAAAAAQAAAGmHBAABAAAA1AAAACWIBAABAAAAdgYAAKwHAABBcHBsZQBpUGhvbmUgNnMA SAAAAAEAAABIAAAAAQAAAEdJTVAgMi4xMC4yMgAAMjAyMzowNToxOSAxMjoyMzo1NAAgAJqCBQABAAAAWgIAAJ2CBQABAAAAYgIAACKIAwABAAAAAgAAACeIAwABAAAAMgAAAACQBwAEAAAAMDIyMQOQAgAUAAAAagIAAASQAgAUAAAAfgIAAAGRBwAEAAAAAQIDAAGSCgABAAAAkgIAAAKSBQABAAAAmgIAAAOSCgABAAAAogIAAASSCgABAAAA qgIAAAeSAwABAAAAAwAAAAmSAwABAAAAEAAAAAqSBQABAAAAsgIAABSSAwAEAAAAugIAAHySBwBqAwAAwgIAAJGSAgAEAAAANjU1AJKSAgAEAAAANjU1AACgBwAEAAAAMDEwMAGgAwABAAAAAQAAAAKgBAABAAAAwA8AAAOgBAABAAAA0AsAABeiAwABAAAAAgAAAAGjBwABAAAAAQAAAAKkAwABAAAAAAAAAAOkAwABAAAAAAAAAAWkAwABAAAA HQAAAAakAwABAAAAAAAAADKkBQAEAAAALAYAADOkAgAGAAAATAYAADSkAgAjAAAAUgYAAAAAAAABAAAAIQAAAAsAAAAFAAAAMjAxODowNzoxOSAxMDo1NzoyOQAyMDE4OjA3OjE5IDEwOjU3OjI5APkIAADGAQAALx8AALUNAAB/BwAACQIAAAAAAAABAAAAUwAAABQAAAB2DAEH8QL0AkFwcGxlIGlPUwAAAU1NAA4AAQAJAAAAAQAAAAkAAgAH AAACLgAAALwAAwAHAAAAaAAAAuoABAAJAAAAAQAAAAEABQAJAAAAAQAAAKcABgAJAAAAAQAAAKoABwAJAAAAAQAAAAEACAAKAAAAAwAAA1IACQAJAAAAAQAAERMADgAJAAAAAQAAAAAAFAAJAAAAAQAAAAQAFwAJAAAAAQAAAAAAGQAJAAAAAQAAAAAAHwAJAAAAAQAAAAAAAAAAYnBsaXN0MDBPEQIAcQFiAcIARAGZAL0A3ADnANEAygAdAaEB 6AGMAf4AxgGJAVgBHQEYAbYAhAFHAckArQC8AP0AvwBgAT8CWwFlAYABVgEvAd8BrQEMAeMA5gDyAPoA+ACeACoBCAFhAfsAbQEzARIBRAJxAbAAhwCKAJcAuQAiAZABrgCkAJsAwQBgAR4BkgCyANUAxQDLALsAvgC9AMcAGAETAfIAZABHAFABNAGUAJUAhACCAG0AaQBmAHEAsADaAHUArABwAFcANQEhAZkAjwCEAH0AbQBqAGcAcgC0AO8A hQDMAGwAXgCZAJUAmgCLAIIAeABsAGkAZwBzALYA5QDcAJ4AggBnAIwAlQCaAIsAggB1AGwAaQBlAHUAugDHAKkAqgCsAKoAigCPAI4AhQCBAHYAbgBoAGYAdQC4AKEApgDDAMQAwgB9AH8AfABsAHAAcgBuAGYAZABsAIEAhwCXAMAA3ADYAHMAdABtAGIAPQBKAFQAUgBPAFUAXAB9AI4AsQDaAO0AcABwAGEAXwA/ACYAJgApAC8ATQBfAHYA iwCmANUA7wBuAGkAXABTAF8AOAAzADoAUQBiAGkAdACGAKcAzQDhAG0AZABdAFUAbABNAFEAWgBiAG4AbwB0AIMAmgC1AMgAVwBZAFwAXgBtAHkAawBwAHUAgAB0AGIAUQBJAEsASwAACAAAAAAAAAIBAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAIMYnBsaXN0MDDUAQIDBAUGBwhVZmxhZ3NVdmFsdWVZdGltZXNjYWxlVWVwb2NoEAETAAEiu4VX CRsSO5rKABAACBEXHSctLzg9AAAAAAAAAQEAAAAAAAAACQAAAAAAAAAAAAAAAAAAAD////RTAAAM5P///mMAApoe///z2QAAHNtTAAAAFAAAAFMAAAAUAAAACwAAAAUAAAALAAAABQAAAEFwcGxlAGlQaG9uZSA2cyBiYWNrIGNhbWVyYSA0LjE1bW0gZi8yLjIAAA8AAQACAAIAAABOAAAAAgAFAAMAAAAwBwAAAwACAAIAAABFAAAABAAFAAMA AABIBwAABQABAAEAAAAAAAAABgAFAAEAAABgBwAABwAFAAMAAABoBwAADAACAAIAAABLAAAADQAFAAEAAACABwAAEAACAAIAAABUAAAAEQAFAAEAAACIBwAAFwACAAIAAABUAAAAGAAFAAEAAACQBwAAHQACAAsAAACYBwAAHwACAAcAAACkBwAAAAAAACsAAAABAAAALgAAAAEAAACxDwAAZAAAAAsAAAABAAAADgAAAAEAAAA0FgAAZAAAAES1 AAB5AwAACAAAAAEAAAA5AAAAAQAAAIkJAABkAAAAAAAAAAEAAADWYAAAVwAAANZgAABXAAAAMjAxODowNzoxOQAAMjAwMC8xAAAIAAABBAABAAAAAAEAAAEBBAABAAAArAAAAAIBAwADAAAAEggAAAMBAwABAAAABgAAAAYBAwABAAAABgAAABUBAwABAAAAAwAAAAECBAABAAAAGAgAAAICBAABAAAAkhkAAAAAAAAIAAgACAD/2P/gABBKRklG AAEBAAABAAEAAP/bAEMACAYGBwYFCAcHBwkJCAoMFA0MCwsMGRITDxQdGh8eHRocHCAkLicgIiwjHBwoNyksMDE0NDQfJzk9ODI8LjM0Mv/bAEMBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/AABEIAKwBAAMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQID BAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/ xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV 1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AN+JOa0bdelVY1FXYeormps7po0rcdKuuCYmwcHFUoD0rQX7h+ld0NUcc1qee3l1NqgVbuKOYIflDrnFLFEQoRbWMKOgVKsm5ZJWURxDBxwgqzFdS5GGx9OK41Rct2VZorfYJXTK2O76ACo59DuFtnuZJDbInWJGyTXQQSSSx/PIzfU1JeKr2MiNjBHeuiOFjFORm6kr2OF1WK1u LOKztt0QBDSH+99apS6RDdQW8EspVIz1A7Zro4dNeaIeXAz+hVKX/hHdSbn7OET+8zL/AI15VWVSTvHqdsYU0rSZa0VPCWiIr/YjLMv/AC0kANdDH8R9FjG1VZQOwxXIyeGJ3/117Ci+gBqnL4T0/due/mB77AP6irhKut9CpUsKl8V2d6/xI0V42X5uRjtXGa1q0Goap9psWZU8sLn3yf8AGqf/AAj+jR/fubp/Y4FSwWGjwAhfPPP94/41TqT+ 0zJxorWIz+07sLjzmP1NQvqNwQSSp+orREWl54RyPdjQbXS2H3SM/wC01Q6iHFw6oyBqdxzyv5UxtZvTwJcD2rZGm6WxxkjPfJpr6LpLj5ZbhT7Y/rS9oi7w7FGDULlmGZ3P410Onzu20M5P1NZi6FbA/ur2Rf8AeUf0FXINJZImxfB2wdoBK811Ua8E9Wc9WClsV9c8RLaagtordUBz+NVV8QIowIF+ua5q+8N6/FdSXEto8ybiwZJFckfQEmqN xdtBKFlieJsfdZSDUTanJtHbSpQUUtztR4hd3CrGuScDmuzjUvYxFhltvNeQWV/G88WCM7h/OvWEvo47OMlgBtFT7O+5nXSjblLGn3g069Rs8OcEV1guYpYzIjAoeq5ry+9vjNPuU9PugVpWF3N9mKyvjj5W34rajLkTizmq0ua0i34i0PTyz3sUfzg7gE4DH3qvo1zDDbowi2Fsk4HOelX7ZFeDy3l8wHnGaZcRxwRlVjCjGCazVeHM2inB8qUm LLcGWF4xuwwrOtdZuLGOS1Uh4G/gbt9KfY38EcnkXJAY/cfPBrMuk230qg8A8fSrlVUrNFU6drxlsZ8ttqMeqvqFjdbi5G63lJ2kegNdTpOtzKGCFoZR9+Jux/rWSmAvSl2YYPjkDAOcEU1U01FOmug9KtQnkVAKmjqKe50SNOA9K0Y2+Wsu3PStGLpXoUzjqI88vb2KHULhC3KyMMD606HU1JAjidj9KrarGqa3eA/89m/nU1qyKR0rKN7nS4x5 U7HS6PFeXpzsWKPuTya6i30y2XIly+ByWNcvpmtpbyhWxn0Peuq/tqC4i2phcjuelTCvGTcZM4a8ZJ3SLmLWOaJEjCxkZOB1rF166Bn2xnCBemMUXEx2nByKw72ZirNgkDqa35VbQ5XNvcpXNweeayLi6PPWoL/V4UJG7NYNxrC87c/nXLUaN4JmrJcmoxO2awf7WBfDMQPXGalTU4y+3DEE8N0rllY3SZurK1SCRzXNPqro5UdKUazKO1TZDszq BI4qRZ2HeuYi1mRmxsz+NWV1gAgMpz3A7UrINTpo5iR1HA9asR3PQZrBt7lpziNGY+i81q22nX8+Ctu+D3PFLkTHexqR3Lrgh+D6GnS29pqSeVd2scqnocYP5iprLw3eOA0sqRr+db1vb6VpWGlmE8vZVqoYebd1oL2qWx5ZrvhyPw5qlvKkUssDHeo25OB2OK6C08Q6LdKv2iOdSOPLdWAFaPiy5a6vY1eMIij5QORiufbyl6dfpTqTdObijvSn XpxlJFvXL6x2Qvp9oWY8NgnAFR22rwRICbCdm79aosSx4Bx9KR5fLX72D6Gs3VkzSNGyty3NlfEvlcpYT4/3TVu38UQzny7m2kRDxkqa5RtVEQIY1Tl1iM5G6iNPm3FONvsnaXul294nm2c+D1AzkVz7ahfaVdeXqUDtAeBMFJx9TWPFrb2774pip+tdDpnjG2uAIb+NVzxvByD/AIVfLKGu5i4to0oLyGdA0UisCM9asB8jrTZdB0/VIRLaSeU5 5SSPsaamlalZxBZwJscCRO/4dqqM4vYwkmizUked1MFSIea0i9TpaL9uelacJ4rKgPStODpXdSZyVEeU+Lbs2viW6QZ5bd+ZrKTU5T0YAepNbvjrTUk8TvJhvmjU9awo9PRe1clSoozaPZw+ElUpxl5Gqtw0tnDKGyRlSR/n3qWLVriE8OfzqGKMJp+0dnJ/QVWauSbvK5x16XJNxNyLxRcRAZYn8asr4sjkx9ptYph0xIgYfrXKP0qAmkl2OSSR 1j6loE7EyaXEuf7qComj8LzHBscfTj+VcqSfWp7Nd025j8qjcaLPuTZHSSad4XtDHJ9k+c8hXYkfiDVuW48NT2xhlsoBkY3RoFI/EVw93cyT3DOzHrx7CoDIx71SUu4mkdetj4PU/NbyEf75P9aeP+EPh+7p7P8A7zVxZdvU0wu3cmjlfcDuP7S8MRf6vSYD/vRhv50n/CSaRD/qNMtk/wB2FRXDknHWjJqlELI7k+NWUYhjCD2GKZ/wlV5OeHYf jXHxjNaFsvStYolpHSLqtzMBukY/U1ftXeR1LHJrFtl6Vv2S4IrrgZSRozeI7S12w3EYdlA4IBpP+Et0wJxajPsgrjdWmH9pS8A44zWe8pPR/wAq46tS8metRy9uCbe53zeNbdP9Xbt+VQNr2la2r2t5bBWZSEl2jKmuD8yQNnefzrVeIeVFcoMZwainqxVsN7G0kzNuYMO8UhG5SQee4rLnhaMkHBHrWtqcR+2u4P8ArPn/AD5rNkjk28AlayXu ux691UgpWM2ZXHK9PSqZd1bcGINXJo3UnBOPSqzjPUHNdMZHHUpo0tK8R3mmSho5WTnnB+U/UV6JovxEt5I9t2GifH315U15A2VPIyKElZPuORTlSjPU5ZRPdM05W5quXNIH5rNMTRqwNWtbHisG2k5FaaX0FrHvmkVF9zXdSa6nNUg3ojlvHMWNUgcD76Y/LFc2kdbviPU4tWvEeJSEiBCk9Tn/APVWKA3QV52IknUbifT4KEoUIxktSXbi0mA9 jVBqvEskL5OcqRVBulZXuefmFP8AeX7kb9KrtU7GoG61SPJkhhqTJjtWI6ucfhUZpbhsKqegqjMqHqabTqaaoQmeaQil+lIaYhDSUppAaYE8RrTthwKzIRkitW3HStIhY17MZIrftxtUn0WsOzHzCugtlL/IvVhiuqGxEkcTezsb24OAQXP86qHOORW5q3h68tJnkRfPgY5WWMZBzWX9iumYAQSZP+ya82fNzan0tKcHBOLKqnnmumliMWkxKw52 jP5U3SvC88kgnvV8uFTnaerVa16VAAinvV09LtnBjK0ZtQjqYV1teRTwT5a/yquzKgwSBUN1KWmJ7AAA/SqzMGGD3rCcbybPWovkppeRNIIW64qpLBA46flTWJHAPHaomY00mhuSe6KlxbqucciqDw8/LWux3KQeRVOWEjkcit4TaOSrST1SPQV8QTEc2wH/AAKo5vEU0fS3H51Txn6+lHlg9Rkelc/tGdzwVLsSjxDfTcIxRf8AZGDTkmmmbdMz MfVjk0xVA6AY9hUo7fzFDqNmlOhGGyHY4pQMUtLj/wDVUNmyQEZUj1rOk4zWlVC4XaxFJM4Mwh7qZVaomqVqiatUzwZoRFBfnoOTUEzF2LHqTU7fLH7tVZ6pGLREaTvTjTT1qyBpNJSmkNMQU3vS0dTTGixAK1bYdKzYeMCtS2HSrgOxs2Y5Faskz29u0sZIZBnI61mWY5Fac3y2Uh9q6b2i2VTp+0mo9zI0vxXe2NuYFdvKP8JFaK+LzgZPNZgi Rv4R+VNMKg/cX8q891JWPYeUwvoy/c+I2mH7pZGYjn0rFuFuLht78n0z0q6qAcAY9gKNtRzM6aOX06Tu9WZItefnU4+lVprbDEoMj0roNvHtTGgQ9VpqWljpdM5d4sdRioGTBrqmso3JyoNVzo6Ena4AHWn6EezZzYQseBThAT2roV0dQN27injTYkGSpOfejUagAXpn8DS4OcHr2NLkD6U3dzjt2rmudxIB+dPUc8Dmog2OCfxp+/tRcCUDj2o/ lUfmfnRvouMm+pqldj5qnD1Dc8qDST1ObFw56TsUmpm3J9u9OJ5pG+Vcdz1rVM+bmiKT5iTVZ+tWSahfmriznkiDvSHrT8U0itDMjNNNSHFNNMQ2nKKQCnCncpIswitW1HSsqHqK1bY4xWkCrG3Z9quX8gSyC92NU7Vgq5JqC9u/PcBT8i8CtKslGFjswNJyqqXRDVbBqdcNxVDPSpUYjGfyrjie+2W9n/6hQY8H69qdEwIwT1/hFThR0JwDzgd6 rlQXZX2c9if5Umzr39ateWAPmyF9B1NBTjLZAA4A6mjlHzFTZ6U4Lgg9hU+zALMCPQUu3AJYHPYUcorkWP4z+ApdoT5j8zHt6VJt24OPnPSlx5Rzglz19qqwjnC/PtRuI+lMyBxSE4ODXn3OolB7HpTgeeag3Y4pd2KLgWNwAx3pN9QhjSg1LYEpkwOtVZrj3ps0npVQnzHVfU4ppGc56E4lCLvfv0ppkD8hs1Vvn2zvGOiHbj6VmNeeS4y2Ofyr eKuePWoKS50bTGojVJb5iOcEU9btc81okzz502WT0qNsYpn2lD/EKRpVPQiqRi4MQ0lIXX1ppdQfvVRKg+xJQDUXmr60CQdqDaNJl2E4NacEgC5HaseEjOWqaS8LrsjxsHBAPNax7m8KF3qa/wBtZhsBwPSpN2R7+9Y0MhDgHcvs1aUb4Qfwj8wazqJ31PXoKMVaJbU5OM/j1FPVucA8etQehPHpinDLAHP0A61CRuXYZSMhT171eibcSEOSf4iO lZKtk7iQB0x3q9bsXXBYKo9eCTWiJZeTDEhAHbP3jwBT9o3cDfJn14FMUs4wgCqvU9DUi5PyQrk/xMKuxFxjJtPOWk9j0pCpQ5O5pT29Km4UhY1LOep680hURBduWlPqc4oaDmGBSh6M0p/T+tKR5GCQzOevfFSBVhUPkmU9j0H404BI1DyFi5NNILnEbgRSZNFB9q8m52gKdTR0pw5oAdTJZMDApWcAVVdsmhK4m7CSNmoQ4WRW9Dmnk5HvUDc8 dxVoxkrk+tRASJcpylwN49ieorm7hdxJrrIYzf6PNbEZmgPmRj27j9Sa5uePnGMVtB2OTlunHsZYuJbc8cr6Gp49SifhvkPoaSWLPUVSlts9q6FZ7nDUpyWxrrOjchhTt49a5028iHKMR9DThLcp/Ex/Gq5F0Zg21ujod49aN49a577Tcf7X50okuXPVqfILn8jfMyL1YU0XiE4XJNZUNpNJgnJBrVtLBgDtGQO1UopG0IyfQuozbPmODngipo2d JFlAVgOfzpY4GjB67j1B6Gp44kGMZV/XtQ5JHZGk2rMdGu47jgk87cdKvxHaOSQT2NVkQ4wyBueWFWk3bSFO761jKVzrhHlViwpxyw5H92pc8biA3pVdTg45Vj27VMOD8xyfUVKZpYnXHVgD/smrMB2su/he6mqqKNoLHdnsetToN43s3Ho3arRLNFWaXAKhU9D6VMGzmONCB71Uh3StsVtqr2PQ1bWQ7fKjbBPX39ga1RmyYYiby4ixfH3jxTgB bgYYtIR09KYp8lCvBkPA3c4qaMeSPMnGWI4V+eaozbBFEAE7vudug705Iw4M00gGTjmliRpW82YfIP4T0/Chg1xIQEwg6DkinYVzgKO9KelJmvFPSAHnpTugpO1MZsUC2GyNUDHuKeTUbHBqkQxuc/WmNhvapDxyBUTcjI61SJLOmXQs9Rhlb7udr57qeD/Oo9dsPsepSIq/upPnjPbaef8A61Vzhj71uEf2v4dO4/6TZZ+rR9f8atMwqLlkpdNm cjJDzg81XeD2rUMXam+Rz0q1IJUrmQ1sTyBTfspJ6Vsi2yfSpFtvbirVQz+rpmKll6rmrUVhjDY/CtdbZVxtGamWBRyKpVClh0ijFaDg4x6ir0cO45A2MO/rUoj55+92qRQx4YfL6inzGyppDJQJCuVwPUdqUIw4wGHcip/JcAfKdh5z60eXgYjPU9/5UmylFESrjPltz1walXC/e+Vzxmm4UHkbZKeMxoQx3jrUNlJEijaMff8A51IhUc5+Y1Eg UfNkhj096lBxkyLSuFizHjBMi4I9KswgSkKQGHWqsRJIYHI9PSp0f58J8hA4PrWkWJotqRkqgyvoetW0aOJVjTl8Zwe1VBJtUblG7ufSp0+UFn+cAcYrZMyki0sUcKtLI+OeFbuaZuM8u5m2D0bpimxyfaD83KjqvpTJJAshijHHv3qmyFHUuLIZmSJGIQd+xq40ggTyo5Mc5LJyBVFZUgiXauGP5CrdpGrZeUsM/wB2qiyJJbvY8yGqWJHFymKV dRsnbC3CGs06An9ym/8ACPp/cFeTel3J+u1f5UaxvLftMv50NLGf41/Osj/hHlJ+5R/wjgP8P60Xp9w+vT6xNXehH31/OkyCfvD86zl8NSY+UuPo1PHhq4GCryj6OaOan/MH11/y/iXDgHqMfWmsh6joarf8I1dEcyzf9/DTh4evRx9pnA/66Gjnp/zD+uf3SXyyw4GDWhpFybC/jkYZjJ2yD1U9f0rNGgXiji7l/FzSjR7xTn7XJ/31T54dxPFQ krOLNXWtK+xXx2cwyDfGw6EVn+TgcVv6fDJqGlnS7y5xKpzbzHsfQ+3SsqXSdQtpGjluNrjqCg/wpucbXuFLGRS5ZJ3RWWLHB60/YR1HFSLZXOcG4B99oqUWNwes6kfQUe1h3Nli6XmQCM/w04ISOOtTixnHAmGPpSNZXKn5ZQT9BTVWHcPrdIZ5RJAbOR0xUoiIXkZU0z7NeDrKD+Aqykc+AHII9MVftodxfW6TIwrBQEOVHH4U0gYJXhu9WBau B8rAH1qN7Kf7yyDP0pe2gP61SRCflwJACR0IpVXZ827r61Wm0+/cn/ScZ/2R/hVOXRL6XrdzH2DkUvaQ6szeNj0TNTzoCSZGRcep6VG2sWEJG66TArFfw1KxzJvY+pbNMPh7b/yzqlOn3MnjZ9Imw3ijTY+Fd290H/16aPGliqkCKdjnj5R/jWT/AGEAP9X+lJ/YuP8Aln+lWqtMzeKrPsbUfjizR8+ROQeSMD/Gnnx3aHP+jTDnjgf41iDR+Puf pR/ZPH3P0qvbQJ+sVu50I8e2GFxbT5/iOB/jQ/jqwI4gnOOmQP8AGue/sr/Y/Sl/sof3P0p+3gL6xVNxPHsKtu+zSt7ZFWj8RoGbH2GRUx/Cwrmxpf8A0zpf7KJ/gpqvFA61V9TqvKiPSn+Sg9KkCAdqcFBFeJcY2OCI9QBUjQRY5p8cSkc5p20YqbiIkhXscVKtuD/FzSqoz1NPxjkUMQG2bHUfhTPszHqalErqODUDzOR1xSVwEa3I6jNRGDj7 tKZGJzuNJ5zkctVq4hEhAPpV2a9spYki1KRUYcJMTg/j61QaRlUkHmuV1iV5b0lj2row8HKVr6HLi63sqfMlqb+qu2nDfHAZoT92ZeUP41hf2rdzyhI9qbjgYH+NM03V7yxkCxSZjJ5RuQa7K70axuNOh1LyRHOSCRHwv5V3KjCPQ8l4mrVd1KyOcFjfuR5l2QT2FZ18bm1nMf2hzgdc1r5Z1adnbcj4UZ4FZ+vDF4P92op/FZl4mPLT5k3f1ZRT UbtD8s7fjzWnY61cNcRxylSpOCcVh1JGxVgR1BraVOD3RxwxNWL0kz0BBlQcDmpRBu6CorYboVzVtYlHrXlNWdj6VO6uRfY89f50Gxbt/OrAjGep/OpNg2jk0rsZS+xsO2aje0H93mr8iADIJFQEnOM0DuUjaL3U/lULWhzwp/KtIqKXYu3pVILmYLPjlTSiyGeVNX9opGGKtCuUfsKHsad/Z6YrRiiV+ualNug9aYrmUNPU04aaPWtNYUz3p6wo PWtFEXMf/9n/4Q9maHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA0LjQuMC1FeGl2MiI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3 dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOmlwdGNFeHQ9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4bXBFeHQvMjAwOC0wMi0yOS8iIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdEV2dD0iaHR0cDov L25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlRXZlbnQjIiB4bWxuczpwbHVzPSJodHRwOi8vbnMudXNlcGx1cy5vcmcvbGRmL3htcC8xLjAvIiB4bWxuczpHSU1QPSJodHRwOi8vd3d3LmdpbXAub3JnL3htcC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6eG1wPSJodHRwOi8v bnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bXBNTTpEb2N1bWVudElEPSJnaW1wOmRvY2lkOmdpbXA6NmU2ZjA1YWYtMmIxNi00MzVhLWIyOTUtOWEyYWMzZDE1MzFmIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjgwNmY5ODJhLTk4M2YtNGVmYi04OWI0LTk1OTQ2ZGFjMDUzMiIgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlk OjU5ZDY1NTllLTY3MjUtNDQwZC1iNGUyLWJlMjAzMzk3ZDdkYiIgR0lNUDpBUEk9IjIuMCIgR0lNUDpQbGF0Zm9ybT0iTWFjIE9TIiBHSU1QOlRpbWVTdGFtcD0iMTY4NDQ2MzA0MTM1OTkxNCIgR0lNUDpWZXJzaW9uPSIyLjEwLjIyIiBkYzpGb3JtYXQ9ImltYWdlL2pwZWciIHhtcDpDcmVhdG9yVG9vbD0iR0lNUCAyLjEwIj4gPGlwdGNF eHQ6TG9jYXRpb25DcmVhdGVkPiA8cmRmOkJhZy8+IDwvaXB0Y0V4dDpMb2NhdGlvbkNyZWF0ZWQ+IDxpcHRjRXh0OkxvY2F0aW9uU2hvd24+IDxyZGY6QmFnLz4gPC9pcHRjRXh0OkxvY2F0aW9uU2hvd24+IDxpcHRjRXh0OkFydHdvcmtPck9iamVjdD4gPHJkZjpCYWcvPiA8L2lwdGNFeHQ6QXJ0d29ya09yT2JqZWN0PiA8aXB0Y0V4dDpS ZWdpc3RyeUlkPiA8cmRmOkJhZy8+IDwvaXB0Y0V4dDpSZWdpc3RyeUlkPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6Y2hhbmdlZD0iLyIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDplODdhNDhhZi1mZmYxLTRlOGUtYTllYi1kOTFjM2IwNmFkNGQiIHN0RXZ0OnNvZnR3 YXJlQWdlbnQ9IkdpbXAgMi4xMCAoTWFjIE9TKSIgc3RFdnQ6d2hlbj0iMjAyMy0wNS0xM1QyMTowMDozNysxMDowMCIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0ic2F2ZWQiIHN0RXZ0OmNoYW5nZWQ9Ii8iIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6Nzc1NmU4NTktY2M3ZS00OGE3LTlkZWUtOGU5NjcyNTkyM2YzIiBzdEV2dDpzb2Z0 d2FyZUFnZW50PSJHaW1wIDIuMTAgKE1hYyBPUykiIHN0RXZ0OndoZW49IjIwMjMtMDUtMTlUMTI6MjQ6MDErMTA6MDAiLz4gPC9yZGY6U2VxPiA8L3htcE1NOkhpc3Rvcnk+IDxwbHVzOkltYWdlU3VwcGxpZXI+IDxyZGY6U2VxLz4gPC9wbHVzOkltYWdlU3VwcGxpZXI+IDxwbHVzOkltYWdlQ3JlYXRvcj4gPHJkZjpTZXEvPiA8L3BsdXM6 SW1hZ2VDcmVhdG9yPiA8cGx1czpDb3B5cmlnaHRPd25lcj4gPHJkZjpTZXEvPiA8L3BsdXM6Q29weXJpZ2h0T3duZXI+IDxwbHVzOkxpY2Vuc29yPiA8cmRmOlNlcS8+IDwvcGx1czpMaWNlbnNvcj4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0idyI/Pv/iArBJQ0NfUFJPRklMRQABAQAAAqBsY21zBDAAAG1udHJSR0IgWFlaIAfnAAUAEwACABcAImFjc3BBUFBMAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAD21gABAAAAANMtbGNtcwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADWRlc2MAAAEgAAAAQGNwcnQAAAFgAAAANnd0cHQAAAGYAAAAFGNoYWQAAAGsAAAALHJYWVoAAAHYAAAAFGJYWVoAAAHsAAAAFGdYWVoAAAIAAAAAFHJUUkMAAAIUAAAAIGdUUkMAAAIUAAAAIGJUUkMAAAIUAAAAIGNo cm0AAAI0AAAAJGRtbmQAAAJYAAAAJGRtZGQAAAJ8AAAAJG1sdWMAAAAAAAAAAQAAAAxlblVTAAAAJAAAABwARwBJAE0AUAAgAGIAdQBpAGwAdAAtAGkAbgAgAHMAUgBHAEJtbHVjAAAAAAAAAAEAAAAMZW5VUwAAABoAAAAcAFAAdQBiAGwAaQBjACAARABvAG0AYQBpAG4AAFhZWiAAAAAAAAD21gABAAAAANMtc2YzMgAAAAAAAQxCAAAF3v// 8yUAAAeTAAD9kP//+6H///2iAAAD3AAAwG5YWVogAAAAAAAAb6AAADj1AAADkFhZWiAAAAAAAAAknwAAD4QAALbEWFlaIAAAAAAAAGKXAAC3hwAAGNlwYXJhAAAAAAADAAAAAmZmAADypwAADVkAABPQAAAKW2Nocm0AAAAAAAMAAAAAo9cAAFR8AABMzQAAmZoAACZnAAAPXG1sdWMAAAAAAAAAAQAAAAxlblVTAAAACAAAABwARwBJAE0AUG1s dWMAAAAAAAAAAQAAAAxlblVTAAAACAAAABwAcwBSAEcAQv/bAEMAEAsMDgwKEA4NDhIREBMYKBoYFhYYMSMlHSg6Mz08OTM4N0BIXE5ARFdFNzhQbVFXX2JnaGc+TXF5cGR4XGVnY//bAEMBERISGBUYLxoaL2NCOEJjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY//CABEIAIcAyAMBEQACEQEDEQH/ xAAZAAADAQEBAAAAAAAAAAAAAAABAgADBAX/xAAYAQEBAQEBAAAAAAAAAAAAAAAAAQIDBP/aAAwDAQACEAMQAAAB6MdNtZ23ni5JW1MM6z5669Hs5xRFENqdPTny8uzL3zm+41nNK1Y50dZON6bztrPHNVnRc3PSRkucggykJBrBvnuvUvLFrozGy59Ba1hmn3nbWeDO5PRnNumOaXjzrDNGbBGHNDoBrXLjQaANZ0kcNzo2+ptvHm53jNa88uy1 mUoGsRSSGn1H1NJpcbEc7tyrnrFc+hmdQdTbeeHHfmx2eeXNlbAESgRBrazo1Bntz47MwbnLPbK5z1hbn3cjZtvHNnvy8u9fPm5rZIoKBEGt7OnTHn3xz02vNbM8d1sSs7z9OFDdJnpShyzvFWakQVEAc206dQY1Z1HM7oIAB056QwVIGMr5wyESyBUAc306dMuXsEFYAtgCDGyEJATK+auVZSyBUQy9GnRbjy9ZsYliJDWHPcQSgCa4ByFi2BAR K1u2j57jOzWlMEBBTk59IoJCXKsZ6wWCgsCSytdNdvNvKRq1RqiCcPHpJBVUW5rnm1EvMowQLVo0y6Z6aBGrRHpkg1wcOkRCWAZnHUysS4WyHXaa1aM00MrDWPWlhQjWefw7SQoLAA0uMGhZDDq1pDEEYYfUdGoGlnl8dElBERoyt0ArBsISBABciw2EKStZlwzEQSIjezKagXIseaeUVmipAWqJJY6edI0QSAgpUx7ecWNjrzb4A9Dl6tJSogUA WQQ0a//EACgQAAIBAwMEAQQDAAAAAAAAAAABAhESEwMxEDAhICIjBBQyQjNAQf/aAAgBAQABBQJCESq3aywku1qFqw0z7tE9XI7mXMuYpMuSS1amRn6wnQnbMh2H3Um5xg3HhCJfmhdzE2S0oxKRR2Li8uLjsNRlF6I1KL70/ZM3L7RcoRqfyRSINUbJslIuKl5ei5FREYSZrKNiVDd9iqRlIwR7LhCNd01FMuaMrMqZ8TLNIpo0t0j4SukZIoys vbLklfpmSBqUGuFJxNP6nlH1Efeg9+X5Iiam5Hd7UKD8Ea+4/B+SImr+RpxJFSvDMiMqMr5fRQiJkVfjHPinRfRQhH+05oWlhb10IW39ZCK1672uKlfFFRdd94srQuKlSpcLheC6cd5KjKFCgoiiU6O5v0Je0ShQtKdLfjcviXxLkVXhDdxo+hVGSJmiZ0Z0Z0Z0fcIsMZjMSMSMKMSMaLblaUR6nqIoWmMxIxmMsLCwsLOi3RZKmM7crbxpxQoU P//EACQRAAIBAwQCAwEBAAAAAAAAAAABERICEDAxISADE0BhIkFR/9oACAEDAQE/Aesi5KT1lBTBBBAyCCMNCkYtyCO0FRWey4l96jfMEd1thrTksfJGJZLxHW3YjEELStn+H6P0WOVyNEdvEuCB6njXGLtjx7kEDXRnhxcudTx7YvfEHjJJw0UMoF41m/UUrYquFa2K2NK/bUWxC6RoXbD0kude5aMFtsfAaIIIIIIIFwLX/o8T2j4DxBBSUitK SPhL/MQQRrVoqRUiV0uQudGUVorRWisrKyrMEEEZ2JZUez7Pb9lf2VMqfwm4KyCFm18af//EACARAAICAgIDAQEAAAAAAAAAAAARARIgAhAwITFAUGD/2gAIAQIBAT8B5kRGhsWI2LSPCMEaknokfMYMnVyUK52keEyPGOJ41nrqTCgY+UPGcHI5H0bcyPmuG/dtxBIx4wb923viCcWMt3z/ABUz+WsF8zGP8ZSIXwIWVixYtJaS8lpLDHI5PJ5H IxlixYsWGMY+pYz1/wD/xAAmEAABAgQGAgMBAAAAAAAAAAAAATERIQIQIDAiMkBBcZFhoWCB/9oACAEBAAY/ArzOjoVPsSJpoNpKQ9nHJrghZrQVSa/3AtpWmbTanrAyDIQgaavZNCSYIK2JSeaiDreRqI0VGpMiaDKdjD1HZtJUpZzUbTYRpbBqwJ4xQ4acCORLPhUkUGIJ+w8cH4yPJDkpV74Tjjj4IKykMmY9mUZRlGU25Pyh2OOOPxdX0RSa DZ3/xAAmEAACAQMDBQADAQEAAAAAAAABABExECFRQSBhcYGhkTCx0fDx/9oACAEBAAE/IQuCeAkA4kI0FMnJDsggg1yp8BICNwi0AAhg9QnUS7UEGzQSZF7SBIIiJ0Y0GCWqjWYbuKsMJNELnuWd4zCTAlIZElOyeUkaK7R8ZWwP8HP+TOMB0cPqUOakEyIEZ7uSSPaTDCSkFRIkHiTrsjc4OYsymSg1SLHVgLAYMiaCXYIHV3wM1SU/ZIyGr1ft mZR5QBEBISaLoiSPIKHgfPTSGTgoFVhR706B5ZgTI+WgwOsuukBsfli19yUUm8IPTCKskjwEv+YTo/iExUbaWDKekF2A82FoiaEB4x4HELx4AaJLt6o0aFKJoG0oNsT2bV3FWriOMmJ0BKvdNgmUHoFg2LLoTmvFt+BoTKFBTog/92cQ/EgmqYJFotHCi5/CyASYaC8EgWLHkaXP4EokygsMMMMWllm0pLulPMUJpWCDw/bHIlwDq4G2DNpZszpR O4KPb7No29v64kpalAyUd8MdWGrHVyo0IsFglhHSjWlGqrk0HAmxTGJpgUpCqEhNmdkDgGEXCPSM9kZ6BB8A+g/AMBWii6EEAwxcIQj00dEZxsg0Clh0D0X16D6yNWrDCWgFIYqhhhhhAdntYiqA8sK7nBO7SQCFTHRhox04kvcYiBJpAzVxP9MFS+2Z1UBtPzaZ7lIGpP38tY8OGRo4QELjiXepkQDOBLQmqE5Avhjox0tFkIXY9j//2gAMAwEA AgADAAAAEJp5ZA8p8bGC547xEafdBSHo/wBTVJ8qH/8A50IlY+TbHeRgfpUY2kdAD99ECjpDgVQTmOlwgUIb1RjO37nHkGUv2Fgv/wABEuoLyYydQem3MH3AcEu9lwBA8OJe/qnRlo5MgWcp+0gWGJBnRjZbTg7jb49CabFZZJzvl0P6CcQihOxFafWKaRJFwVr+fnDug2sls9tfwu0A1qEpGfZFiRor+R//xAAfEQEBAQABBQEBAQAAAAAAAAAB ABEQMSEgQWEwUXH/2gAIAQMBAT8QZnjZzD2sWk2/3wAsWIWl7WbMve9xZMFbtiZRMkx0klRgy2Snu2922222sdnY/ogxpdtvURib39y28Mzis27axBBZZZ4YkVk6hQw4H2cMTOGbE7+hJI4sss8MksCQdVq93+pcuohmJJvDMSrEMcn8ki0vBndNy5JP7w8A48ZKT8mPEyO13kXeS37vZ7wZ04PuZ/HId5WnVlbLnhnGWcnYzP4HWAC+HOWLLPIa oTP4bA5238mzdJJPLIT0j7/f7LdWKYqVahRSCLwed8l4e3+oSJYu3GkO9II13g8+n+3Tp4PJ07XU0mPhUEYsg8enGZdOM8uxmcpMss82zLpJne+t976X0tLtwmadSQNLLPDOUOrI+5v5WP5YsSX1YWFixYsWca9DfaWdWSTEG9J9b6Sr1sLCyyyyz8sCyn5CUWP4T140D+W3/8QAHxEAAwACAwEBAQEAAAAAAAAAAQARECEgMTBBUWFx/9oACAEC AQE/EAgoaJQZSfUoIEkl+B/g0e28AIAxHppPULJCCAdggmg9IwCe0MBIQssBIYxjC7dER+2h6wYlEdoORgENp0KhIYlJS3jEEcXTRdoFf5aHaCD1gIe7UhBI6L/fBTT+NLvEQhj6kMQmwhApAPaSGxgIShanvgOIQhuwY7OowAIORh8+YQhPAd13YxCCwhV8Sb6BCQsKZ8bG1IzcXmOYQnZd8KxnK+IQkwXAYxjPEHwCDG3KeAwAzkBg85yGAhgL TGMOVTwHmMAoPAVJa3j3jt78a1rWt5hD29vesVNfjCzI8o00zgC003xoqQZ1ipj/AJSBttptpprh75bYXaBSgX5pJKMDD4xAf//EACYQAQACAgEDAwUBAQAAAAAAAAEAESExQVFhcRCBkaGxwdHhIPD/2gAIAQEAAT8QgZpL0mbARlFXmiFUDy7fqASlpZmoV3FY1crPU038w6h949RW53cafyQD+kBpHiMS/khhAHLBkBe5qWaPhGs5azFx2xBg aLE3LapTs6RkvRncFYDSV8ovgejD7n59DmkdDcIvLFweyWrBYNsoGzJdE1yeS/eL4PsgxwPgQ7HxLrkJQpTyGHN7YPtFiHznT9blK5eD8n6gWoum8PvBd0QBboiXtXgFzQV9WzCQeelyxURzxEECPJ6BF6DQsFmDwSsbDux4ApoI5pwNkVkzBXh8y7uDtUvzctVDpRHNQawlR/N4FzOvXMKs1WpiAgUOLpCUlW25i0H5QskR0UpH68s1B22y/EtK Na/6D1axlOoZtQF5g4IUw6siKVgeBH2g2eA5gVHZ+oZpcAVwHa9z9T8yfzHSwdV+Z821JSYDsTBMO7AnnZcxWFYoVh8YmwD2duSVZwkq1qGVicjmaLx0H3PR5ijJGQX7sJxN3fMY+lQHBl8y2N/4jUETplpZu5kOFiVrwfc/kVGsQ7ggtlMVHOPRmjmJIorIUSiHtGO4LF6itPWP+Y1DiBso1WHrGIWUWHqyt7x+YnBFs0GUOdQX9MUUM98QDVD1 N/MSlSrywIcjGM0n2jv0v05/wDsaQOFc1BI1vun8w6RRjCgjEOsNkSJM8VKleQe9QPUwxnT0jH/Iw4IxIXfSc30JWJUU2S2s95Zz9NQLq/mBCHpczUZrMGIkY+h6oMuCbQGFu/o+6bQp4lZ7/aPqL9AhLGFfj1MfVZh5LKlNQZtXMuOv2IZzx1TErF3XVdsTGTHBeZTfXy1KVQ45b36XLly6yx4n3CWqWVdku5iHmKdZTrKRdXEcB8Wblh1g2WQf iVOc81O6nhnBBb/5JWULXlrUAqHHNJQmcHtuX/i5igBG1T4f7Lp5x3h13lBdCdtE4AqKxL1PbiXtBsd9YKar20xe70ZeeL7z+rZZQQrbzMhAdTAAmB23ANrB9YdQaD059RfQDRLxXzDVyU+mPSdmINyy/LDA7PrKhWT7Q91dJ2Nw6H1h2wd9QdqINWVaDkUJkpEM94Cps4v5CqtpqN+t1FuOYx+s4BPmNPufaN54wPSAQTZnrAHJnrNu/WYbKepA eS+5C3A2HWU64OGUrOKiELxXPDFdW0PiLgGhnOmK5b3cywJh1mPX4YIfel5ia8xFZofMxoizJ7xjRvg6PswiaSvThDo1Bcai1jJCyg2HWeXzClw1W4l7phHYL4FiGhniAa+lMFAHtA8AnOv3Eeu3qVAPqjP+x6THA3Fe0v6TwQLr7QLJ9Uo8loK9HvE1KU4VHVHmLPL2WZMF7sbAzNvtCGm+8358plGq+Ik4Ig4ge08J4w7SeMB5gN6meSB2hXIj 2/SMC05lKMhdRdWDhqH7iY6JbpPIx9QisFNsNI6PoxZsgYz9UA/1E1f3RB4jfEqOpU4iOieKHan/2Q== </data> <mime>image/jpeg</mime> <resource-attributes> <file-name>IMG_4686v3.JPG</file-name> <source-url>en-cache://tokenKey%3D%22AuthToken%3AUser%3A113979823%22+fff281d8-1e2b-214a-61f5-a152ca30a105+8ab763800efcb0865f5d55e8a0e43eb2+https://www.evernote.com/shard/s612/res/a5e074a6-0ed8-9df5-4843-a375d7c8d257</source-url> </resource-attributes> </resource> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_hugging_face_dataset.py
from typing import Any, Generator, List, Tuple import datasets class SampleHuggingface(datasets.GeneratorBasedBuilder): """Sample huggingface dataset with two different versions for testing.""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name="v1", version=datasets.Version("1.0.0"), description="Sample v1 description", ), datasets.BuilderConfig( name="v2", version=datasets.Version("1.0.0"), description="Sample v2 description", ), ] def _info(self) -> datasets.DatasetInfo: """This function defines the structure of the dataset""" return datasets.DatasetInfo( description="Sample Huggingface dataset", features=datasets.Features( { "split": datasets.Value("string"), "text": datasets.Value("string"), "list": datasets.features.Sequence(datasets.Value("string")), "dict": datasets.features.Sequence( { "dict_text": datasets.Value("string"), "dict_int": datasets.Value("int32"), } ), } ), ) def _split_generators( self, dl_manager: datasets.DownloadManager ) -> List[datasets.SplitGenerator]: """ This function defines how the dataset's splits will be generated. Args: dl_manager (`DownloadManager`): Helper for downloading datasets from files and online sources. This is not being used for this test file. """ return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "name": self.config.name}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"split": "test", "name": self.config.name}, ), ] def _generate_examples( self, split: str, name: str ) -> Generator[Tuple[int, object], Any, None]: """This function returns the examples. Args: split (`string`): Split to process name (`string`): Name of dataset, as defined in the BuilderConfig """ if name == "v1": yield ( 1, { "split": split, "text": "This is text in version 1", "list": ["List item 1", "List item 2", "List item 3"], "dict": [ { "dict_text": "Object text 1", "dict_int": "1", }, { "dict_text": "Object text 2", "dict_int": str(000), }, ], }, ) elif name == "v2": yield ( 2, { "split": split, "text": "This is text in version 2", "list": ["Hello", "Bonjour", "Hola"], "dict": [ { "dict_text": "Hello world!", "dict_int": "2", }, { "dict_text": "langchain is cool", "dict_int": str(123), }, ], }, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook_missingmetadata.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230611T011239Z" application="Evernote" version="10.56.9"> <note> <content> I only have content, no metadata </content> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/mwtest_current_pages.xml
<mediawiki xmlns="http://www.mediawiki.org/xml/export-0.11/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mediawiki.org/xml/export-0.11/ http://www.mediawiki.org/xml/export-0.11.xsd" version="0.11" xml:lang="en"> <siteinfo> <sitename>Text Wiki</sitename> <dbname>test123</dbname> <base>http://control.fandom.com/wiki/Control_Wiki</base> <generator>MediaWiki 1.37.3</generator> <case>first-letter</case> <namespaces> <namespace key="-2" case="first-letter">Media</namespace> <namespace key="-1" case="first-letter">Special</namespace> <namespace key="0" case="first-letter" /> <namespace key="1" case="first-letter">Talk</namespace> <namespace key="2" case="first-letter">User</namespace> <namespace key="3" case="first-letter">User talk</namespace> <namespace key="4" case="first-letter">Text Wiki</namespace> <namespace key="5" case="first-letter">Text Wiki talk</namespace> <namespace key="6" case="first-letter">File</namespace> <namespace key="7" case="first-letter">File talk</namespace> <namespace key="8" case="first-letter">MediaWiki</namespace> <namespace key="9" case="first-letter">MediaWiki talk</namespace> <namespace key="10" case="first-letter">Template</namespace> <namespace key="11" case="first-letter">Template talk</namespace> <namespace key="12" case="first-letter">Help</namespace> <namespace key="13" case="first-letter">Help talk</namespace> <namespace key="14" case="first-letter">Category</namespace> <namespace key="15" case="first-letter">Category talk</namespace> <namespace key="110" case="first-letter">Forum</namespace> <namespace key="111" case="first-letter">Forum talk</namespace> <namespace key="420" case="first-letter">GeoJson</namespace> <namespace key="421" case="first-letter">GeoJson talk</namespace> <namespace key="500" case="first-letter">User blog</namespace> <namespace key="501" case="first-letter">User blog comment</namespace> <namespace key="502" case="first-letter">Blog</namespace> <namespace key="503" case="first-letter">Blog talk</namespace> <namespace key="710" case="first-letter">TimedText</namespace> <namespace key="711" case="first-letter">TimedText talk</namespace> <namespace key="828" case="first-letter">Module</namespace> <namespace key="829" case="first-letter">Module talk</namespace> <namespace key="1200" case="first-letter">Message Wall</namespace> <namespace key="1201" case="first-letter">Thread</namespace> <namespace key="1202" case="first-letter">Message Wall Greeting</namespace> <namespace key="2000" case="first-letter">Board</namespace> <namespace key="2001" case="first-letter">Board Thread</namespace> <namespace key="2002" case="first-letter">Topic</namespace> <namespace key="2900" case="first-letter">Map</namespace> <namespace key="2901" case="first-letter">Map talk</namespace> </namespaces> </siteinfo> <page> <title>Whiskers the Cat</title> <ns>0</ns> <id>190</id> <revision> <id>14802</id> <parentid>14312</parentid> <timestamp>2022-04-30T04:37:40Z</timestamp> <contributor> <username>Test user</username> <id>47482455</id> </contributor> <minor/> <comment>/* External links */Unicode+Fixes</comment> <model>wikitext</model> <format>text/x-wiki</format> <text bytes="233" sha1="qa5cny8ozb0vw4ahvoxevpe0u8f03lw" xml:space="preserve"> {{Standard Animal |title = Whiskers the Cat |image =whiskers.jpg |full_name = Sir Whiskers Whiskerington Whiskey Wiskerton III |birth = May 9, 2018&lt;br&gt;Portland, Maine, U.S. |age = {{Age|2018|5|09}} |gender = Male |nationality = American |occupation = Cat }} '''Whiskers the Cat '''is an American cat. == Gallery == &lt;gallery widths="200" spacing="small" position="left" captionalign="left" hideaddbutton="true"&gt; whiskers-on-the-floor.jpg|Whiskers is known for taking long naps &lt;/gallery&gt; == Appearance and Personality == Whiskers is a medium-sized cat with a soft and fluffy coat. Its fur is predominantly white, with patches of orange and gray, giving it a unique and eye-catching appearance. But what truly sets Whiskers apart are its long, elegant whiskers that frame its adorable face. Aside from its adorable appearance, Whiskers is known for its friendly and sociable personality. This cat is extremely affectionate and loves to curl up on laps for a cozy cuddle. It enjoys playing with toys, chasing laser pointers, and exploring its surroundings with curiosity. == Popularity == Whiskers rose to fame through social media platforms, where its adorable pictures and videos quickly went viral. Many internet users were captivated by its cuteness and endearing antics, leading to a dedicated following of fans and admirers. == Legacy == Whiskers has become an iconic internet sensation, symbolizing the charm and playfulness of cats. It has inspired countless memes, fan art, and even merchandise. Whiskers continues to bring joy and happiness to people around the world, reminding us of the beauty and companionship that pets can bring into our lives. == References == {{Reflist}} == External links == * [[Wikipedia:Cat]] on Wikipedia {{WikiNavbox}} [[Category:Cats]]</text> <sha1>qa5cny8ozb0vw4ahvoxevpe0u8f03lw</sha1> </revision> </page> <page> <title>File:TestPics.jpg</title> <ns>6</ns> <id>123</id> <revision> <id>14331</id> <parentid>14802</parentid> <timestamp>2023-04-17T16:39:12Z</timestamp> <contributor> <username>user</username> <id>123456</id> </contributor> <minor/> <comment>user moved page [[File:920x920.jpg]] to [[File:test.jpg]] for reasons</comment> <model>wikitext</model> <format>text/x-wiki</format> <text bytes="25" sha1="1diptt6cjtef34rewxvcmnduax281kz40p" xml:space="preserve"> [[Category:Test images]]</text> <sha1>1diptt6cjter6axvcmnduax281kz40p</sha1> </revision> </page> <page> <title>Gallagher</title> <ns>0</ns> <id>789</id> <revision> <id></id> <parentid>14312</parentid> <timestamp>2022-04-30T04:37:40Z</timestamp> <contributor> <username>Test User</username> <id>474112455</id> </contributor> <minor/> <comment>/* External links */Unicode+Fixes</comment> <model>wikitext</model> <format>text/x-wiki</format> <text bytes="942" sha1="qa5cny8ozb0vw4ahvoxevpe0u8f03lw" xml:space="preserve"> {{Standard Animal |title = Gallagher |image =gallagher-hockey.jpg |full_name = Gallager Golden |birth = November 13, 1994&lt;br&gt;Boston, Massachusetts, U.S. |age = {{Age|1994|11|13}} |gender = Male |nationality = American |occupation = Professional Hockey Player }} == Gallagher (Golden Retriever) == [[File:Gallagher the Golden Retriever.jpg|thumb|Gallagher, the talented golden retriever]] '''Gallagher''' is a golden retriever who gained widespread attention as a professional hockey player for the Boston Bruins during the 1997-1998 season. This remarkable canine athlete captivated audiences with its exceptional skills on the ice and became an icon in the world of sports. == Career == Gallagher's journey to becoming a professional hockey player began when it was discovered by Bruins' scouts during a charity event. Impressed by its agility, speed, and natural talent for puck handling, the team decided to sign Gallagher as an honorary member. During the 1997-1998 season, Gallagher proved to be a remarkable asset to the Bruins. Although unconventional, its ability to navigate the rink, steal the puck, and assist in scoring goals astonished both teammates and opponents alike. Despite the challenges of being a dog in a human-dominated sport, Gallagher's determination and love for the game were unmatched. == Statistics == Throughout the 1997-1998 season, Gallagher's statistics were nothing short of extraordinary: - Goals: 25 - Assists: 40 - Penalties: 10 These impressive numbers placed Gallagher among the league's top performers and solidified its status as an unforgettable player. == Legacy == Gallagher's incredible achievements as a golden retriever hockey player left an indelible mark on the sport. It inspired a generation of fans and players alike, proving that dedication, teamwork, and a little bit of canine charm can overcome any obstacle. While Gallagher's hockey career was limited to a single season, its impact transcended the game. The dog's popularity skyrocketed, leading to numerous endorsements, appearances in movies and commercials, and even a line of dog-themed hockey merchandise. == References == {{Reflist}} [[Category: Animals]] </text> <sha1>qa5cny9y86ftfuyyuoxevpe0u8f03lw</sha1> </revision> </page> <page> <title>Sir Whiskers Whiskerington Whiskey Wiskerton III</title> <ns>0</ns> <id>101</id> <redirect title="Whiskers" /> <revision> <id>455</id> <timestamp>2018-10-17T04:10:06Z</timestamp> <contributor> <username>Test User</username> <id>35032394</id> </contributor> <comment>User moved page [[Sir Whiskers Whiskerington Whiskey Wiskerton III]] to [[Whiskers]]</comment> <model>wikitext</model> <format>text/x-wiki</format> <text bytes="22" sha1="e854tjjb9a233fsdfs6bp1ve4v8g2qnkma" xml:space="preserve">#REDIRECT [[Whiskers]]</text> <sha1>e854tjjb9a233fsdfs6bp1ve4v8g2qnkma</sha1> </revision> </page> </mediawiki>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/sample_notebook.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230611T011239Z" application="Evernote" version="10.56.9"> <note> <title>Test</title> <created>20230511T011217Z</created> <updated>20240714T011228Z</updated> <note-attributes> <author>Michael McGarry</author> </note-attributes> <content> <![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div>abc</div></en-note> ]]> </content> </note> <note> <title>Summer Training Program</title> <created>20221227T015948Z</created> <note-attributes> <author>Mike McGarry</author> <source>mobile.iphone</source> </note-attributes> <content> <![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd"><en-note><div><b>Jan - March 2022</b></div></en-note> ]]> </content> </note> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/empty_export.enex
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE en-export SYSTEM "http://xml.evernote.com/pub/evernote-export4.dtd"> <en-export export-date="20230613T110102Z" application="Evernote" version="10.56.9"> </en-export>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/bad_frontmatter.md
--- anArray: one - two - three tags: 'onetag', 'twotag' ] --- A document with frontmatter that isn't valid.
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/no_frontmatter.md
### Description #recipes #dessert #cookies A document with HR elements that might trip up a front matter parser: --- ### Ingredients - 3/4 cup (170g) **unsalted butter**, slightly softened to room temperature. - 1 and 1/2 cups (180g) **confectioners’ sugar** ---
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/tags_and_frontmatter.md
--- aFloat: 13.12345 anInt: 15 aBool: true aString: string value anArray: - one - two - three aDict: dictId1: '58417' dictId2: 1500 tags: [ 'onetag', 'twotag' ] --- # Tags ()#notatag #12345 #read something #tagWithCases - #tag-with-dash #tag_with_underscore #tag/with/nesting # Dataview Here is some data in a [dataview1:: a value] line. Here is even more data in a (dataview2:: another value) line. dataview3:: more data notdataview4: this is not a field notdataview5: this is not a field # Text content https://example.com/blog/#not-a-tag
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/template_var_frontmatter.md
--- aString: {{var}} anArray: - element - {{varElement}} aDict: dictId1: 'val' dictId2: '{{varVal}}' tags: [ 'tag', '{{varTag}}' ] --- Frontmatter contains template variables.
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/no_metadata.md
A markdown document with no additional metadata.
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/sample_documents/obsidian/frontmatter.md
--- tags: journal/entry, obsidian --- No other content than the frontmatter.
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_docugami.py
"""Test DocugamiLoader.""" from pathlib import Path import pytest from langchain_community.document_loaders import DocugamiLoader DOCUGAMI_XML_PATH = Path(__file__).parent / "test_data" / "docugami-example.xml" @pytest.mark.requires("dgml_utils") def test_docugami_loader_local() -> None: """Test DocugamiLoader.""" loader = DocugamiLoader(file_paths=[DOCUGAMI_XML_PATH]) # type: ignore[call-arg] docs = loader.load() assert len(docs) == 25 assert "/docset:DisclosingParty" in docs[1].metadata["xpath"] assert "h1" in docs[1].metadata["structure"] assert "DisclosingParty" in docs[1].metadata["tag"] assert docs[1].page_content.startswith("Disclosing") def test_docugami_initialization() -> None: """Test correct initialization in remote mode.""" DocugamiLoader( access_token="test", docset_id="123", document_ids=None, file_paths=None )
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/loaders/vendors
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_data/docugami-example.xml
<?xml version="1.0" encoding="utf-8"?> <dg:chunk cp:version="2.10.10.0.1699162341377-69.0" xmlns:docset="http://www.docugami.com/2021/dgml/TaqiTest20231103/NDA" xmlns:addedChunks="http://www.docugami.com/2021/dgml/TaqiTest20231103/NDA/addedChunks" xmlns:dg="http://www.docugami.com/2021/dgml" xmlns:dgc="http://www.docugami.com/2021/dgml/docugami/contracts" xmlns:dgm="http://www.docugami.com/2021/dgml/docugami/medical" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:cp="http://classifyprocess.com/2018/07/"> <docset:MUTUALNON-DISCLOSUREAGREEMENT-section> <dg:chunk structure="h1">NON-DISCLOSURE AGREEMENT </dg:chunk> <docset:MUTUALNON-DISCLOSUREAGREEMENT structure="div"> This Non-Disclosure Agreement ("Agreement") is entered into as of <docset:EffectiveDate>November 4, 2023 </docset:EffectiveDate>("Effective Date"), by and between: </docset:MUTUALNON-DISCLOSUREAGREEMENT> </docset:MUTUALNON-DISCLOSUREAGREEMENT-section> <docset:DisclosingParty-section> <dg:chunk structure="h1"> Disclosing Party: </dg:chunk> <docset:DisclosingParty structure="div"><docset:PrincipalPlaceofBusiness>Widget Corp.</docset:PrincipalPlaceofBusiness>, a <dgc:USState>Delaware </dgc:USState>corporation with its principal place of business at <docset:PrincipalPlaceofBusiness><docset:PrincipalPlaceofBusiness> <docset:WidgetCorpAddress>123 </docset:WidgetCorpAddress> <docset:PrincipalPlaceofBusiness>Innovation Drive</docset:PrincipalPlaceofBusiness> </docset:PrincipalPlaceofBusiness> , <docset:PrincipalPlaceofBusiness>Techville</docset:PrincipalPlaceofBusiness>, <dgc:USState> Delaware</dgc:USState>, <docset:PrincipalPlaceofBusiness>12345 </docset:PrincipalPlaceofBusiness></docset:PrincipalPlaceofBusiness> ("<dgc:Org> <docset:CompanyName>Widget </docset:CompanyName> <docset:CorporateName>Corp.</docset:CorporateName> </dgc:Org>") </docset:DisclosingParty> </docset:DisclosingParty-section> <dg:chunk> <docset:ReceivingParty-section> <dg:chunk structure="h1"> Receiving Party: </dg:chunk> <docset:ReceivingParty structure="div"> <dg:chunk structure="p"><docset:RecipientName>Jane Doe</docset:RecipientName>, an individual residing at <docset:RecipientAddress><docset:RecipientAddress> <docset:RecipientAddress>456 </docset:RecipientAddress> <docset:RecipientAddress>Privacy Lane</docset:RecipientAddress> </docset:RecipientAddress> , <docset:RecipientAddress>Safetown</docset:RecipientAddress>, <dgc:USState> California</dgc:USState>, <docset:RecipientAddress>67890 </docset:RecipientAddress></docset:RecipientAddress> ("Recipient") </dg:chunk> <dg:chunk> <dg:chunk structure="p"> (collectively referred to as the "Parties"). </dg:chunk> <dg:chunk> <docset:ConfidentialityObligations structure="ol" style="list-style-type: decimal; boundingBox:{left: 300.0; top: 936.0; width: 30.0; height: 1881.0; page: 1;}; boundingBox:{left: 300.0; top: 309.0; width: 30.0; height: 777.0; page: 2;}; "> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 936.0; width: 30.0; height: 45.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 936.0; width: 48.0; height: 45.0; page: 1;}; "> 1. </dg:chunk> <docset:DefinitionofConfidentialInformation-section> <dg:chunk structure="h1">Definition of <dg:chunk>Confidential Information </dg:chunk></dg:chunk> <docset:DefinitionofConfidentialInformation structure="div">For purposes of this Agreement, "<dg:chunk>Confidential Information</dg:chunk>" shall include all information or material that has or could have commercial value or other utility in the business in which Disclosing Party is engaged. If <dg:chunk>Confidential Information </dg:chunk>is in written form, the <dg:chunk>Disclosing Party </dg:chunk>shall label or stamp the materials with the word "Confidential" or some similar warning. If <dg:chunk>Confidential Information </dg:chunk>is transmitted orally, the <dg:chunk>Disclosing Party </dg:chunk>shall promptly provide writing indicating that such oral communication constituted <dg:chunk>Confidential Information</dg:chunk> . </docset:DefinitionofConfidentialInformation> </docset:DefinitionofConfidentialInformation-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 1428.0; width: 30.0; height: 48.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 1428.0; width: 48.0; height: 48.0; page: 1;}; "> 2. </dg:chunk> <docset:ExclusionsFromConfidentialInformation-section> <dg:chunk structure="h1">Exclusions from <dg:chunk>Confidential Information </dg:chunk></dg:chunk> <docset:ExclusionsFromConfidentialInformation structure="div">Recipient's obligations under this Agreement do not extend to information that is: (a) publicly known at the time of disclosure or subsequently becomes publicly known through no fault of the Recipient; (b) discovered or created by the Recipient before disclosure by <dg:chunk>Disclosing Party</dg:chunk>; (c) learned by the Recipient through legitimate means other than from the <dg:chunk>Disclosing Party </dg:chunk>or Disclosing Party's representatives; or (d) is disclosed by Recipient with Disclosing Party's prior written approval. </docset:ExclusionsFromConfidentialInformation> </docset:ExclusionsFromConfidentialInformation-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 1866.0; width: 30.0; height: 45.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 1866.0; width: 48.0; height: 45.0; page: 1;}; "> 3. </dg:chunk> <docset:ObligationsofReceivingParty-section> <dg:chunk structure="h1">Obligations of Receiving Party </dg:chunk> <docset:ObligationsofReceivingParty structure="div">Recipient shall hold and maintain the <dg:chunk>Confidential Information </dg:chunk>in strictest confidence for the sole and exclusive benefit of the <dg:chunk>Disclosing Party</dg:chunk>. Recipient shall carefully restrict access to <dg:chunk>Confidential Information </dg:chunk>to employees, contractors, and third parties as is reasonably required and shall require those persons to sign nondisclosure restrictions at least as protective as those in this Agreement. </docset:ObligationsofReceivingParty> </docset:ObligationsofReceivingParty-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 2244.0; width: 30.0; height: 48.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 2244.0; width: 48.0; height: 48.0; page: 1;}; "> 4. </dg:chunk> <docset:TimePeriods-section> <dg:chunk structure="h1">Time Periods </dg:chunk> <docset:TimePeriods structure="div">The nondisclosure provisions of this Agreement shall survive the termination of this Agreement and Recipient's duty to hold <dg:chunk>Confidential Information </dg:chunk>in confidence shall remain in effect until the <dg:chunk>Confidential Information </dg:chunk>no longer qualifies as a trade secret or until <dg:chunk>Disclosing Party </dg:chunk>sends Recipient written notice releasing Recipient from this Agreement, whichever occurs first. </docset:TimePeriods> </docset:TimePeriods-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 2565.0; width: 30.0; height: 48.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 2565.0; width: 48.0; height: 48.0; page: 1;}; "> 5. </dg:chunk> <docset:Relationships-section> <dg:chunk structure="h1">Relationships </dg:chunk> <docset:Relationships structure="div">Nothing contained in this Agreement shall be deemed to constitute either party a partner, joint venture, or employee of the other party for any purpose. </docset:Relationships> </docset:Relationships-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 2772.0; width: 30.0; height: 45.0; page: 1;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 2772.0; width: 48.0; height: 45.0; page: 1;}; "> 6. </dg:chunk> <docset:Severability-section> <dg:chunk structure="h1">Severability </dg:chunk> <docset:Severability structure="div">If a court finds any provision of this Agreement invalid or unenforceable, the remainder of this Agreement shall be interpreted so as best to effect the intent of the parties. </docset:Severability> </docset:Severability-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 309.0; width: 30.0; height: 45.0; page: 2;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 309.0; width: 48.0; height: 45.0; page: 2;}; "> 7. </dg:chunk> <docset:Integration-section> <dg:chunk structure="h1">Integration </dg:chunk> <docset:Integration structure="div">This Agreement expresses the complete understanding of the parties with respect to the subject matter and supersedes all prior proposals, agreements, representations, and understandings. This Agreement may not be amended except in writing signed by both parties. </docset:Integration> </docset:Integration-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 573.0; width: 30.0; height: 45.0; page: 2;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 573.0; width: 48.0; height: 45.0; page: 2;}; "> 8. </dg:chunk> <docset:Waiver-section> <dg:chunk structure="h1">Waiver </dg:chunk> <docset:Waiver structure="div">The failure to exercise any right provided in this Agreement shall not be a waiver of prior or subsequent rights. </docset:Waiver> </docset:Waiver-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 720.0; width: 30.0; height: 48.0; page: 2;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 720.0; width: 48.0; height: 48.0; page: 2;}; "> 9. </dg:chunk> <docset:NoticeofImmunity-section> <dg:chunk structure="h1">Notice of Immunity </dg:chunk> <docset:NoticeofImmunity structure="div">Employee is provided notice that an individual shall not be held criminally or civilly liable under any federal or state trade secret law for the disclosure of a trade secret that is made (i) in confidence to a federal, state, or local government official, either directly or indirectly, or to an attorney; and (ii) solely for the purpose of reporting or investigating a suspected violation of law. </docset:NoticeofImmunity> </docset:NoticeofImmunity-section> </dg:chunk> <dg:chunk structure="li" style="boundingBox:{left: 300.0; top: 1041.0; width: 30.0; height: 45.0; page: 2;}; "> <dg:chunk structure="lim" style="boundingBox:{left: 300.0; top: 1041.0; width: 81.0; height: 45.0; page: 2;}; "> 10. </dg:chunk> <dg:chunk>Table of <dg:chunk>Authorized Disclosures </dg:chunk> </dg:chunk> </dg:chunk> </docset:ConfidentialityObligations> <dg:chunk> <docset:AuthorizedRecipients structure="p">The following table outlines individuals who are authorized to receive <dg:chunk>Confidential Information</dg:chunk>, their role, and the purpose of disclosure: </docset:AuthorizedRecipients> <docset:TableofAuthorizedDisclosures> <xhtml:table structure="table" style="boundingBox:{left: 300.0; top: 1272.0; width: 2040.0; height: 372.0; page: 2;}; "> <xhtml:tbody structure="tbody" style="boundingBox:{left: 300.0; top: 1272.0; width: 2040.0; height: 372.0; page: 2;}; "> <xhtml:tr structure="tr" style="boundingBox:{left: 300.0; top: 1272.0; width: 2040.0; height: 93.0; page: 2;}; "> <xhtml:td structure="td" style="boundingBox:{left: 300.0; top: 1272.0; width: 603.0; height: 93.0; page: 2;}; "> <dg:chunk>Authorized Individual </dg:chunk> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 924.0; top: 1272.0; width: 114.0; height: 93.0; page: 2;}; "> Role </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 1338.0; top: 1272.0; width: 1002.0; height: 93.0; page: 2;}; ">Purpose of Disclosure </xhtml:td> </xhtml:tr> <xhtml:tr structure="tr" style="boundingBox:{left: 300.0; top: 1365.0; width: 2040.0; height: 93.0; page: 2;}; "> <xhtml:td structure="td" style="boundingBox:{left: 300.0; top: 1365.0; width: 603.0; height: 93.0; page: 2;}; "> <docset:AuthorizedIndividualJohnSmith> <docset:Name>John Smith </docset:Name> </docset:AuthorizedIndividualJohnSmith> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 903.0; top: 1365.0; width: 435.0; height: 93.0; page: 2;}; "> <docset:JohnSmithRole> <docset:ProjectManagerName>Project Manager </docset:ProjectManagerName> </docset:JohnSmithRole> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 1338.0; top: 1365.0; width: 1002.0; height: 93.0; page: 2;}; "> <docset:JohnSmithPurposeofDisclosure> <dg:chunk structure="p">Oversee project to which the NDA relates </dg:chunk> </docset:JohnSmithPurposeofDisclosure> </xhtml:td> </xhtml:tr> <xhtml:tr structure="tr" style="boundingBox:{left: 300.0; top: 1458.0; width: 2040.0; height: 93.0; page: 2;}; "> <xhtml:td structure="td" style="boundingBox:{left: 300.0; top: 1458.0; width: 603.0; height: 93.0; page: 2;}; "> <docset:AuthorizedIndividualLisaWhite> <docset:Author>Lisa White </docset:Author> </docset:AuthorizedIndividualLisaWhite> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 903.0; top: 1458.0; width: 435.0; height: 93.0; page: 2;}; "> <docset:LisaWhiteRole> <dg:chunk>Lead Developer </dg:chunk> </docset:LisaWhiteRole> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 1338.0; top: 1458.0; width: 1002.0; height: 93.0; page: 2;}; "> <docset:LisaWhitePurposeofDisclosure>Software development and analysis </docset:LisaWhitePurposeofDisclosure> </xhtml:td> </xhtml:tr> <xhtml:tr structure="tr" style="boundingBox:{left: 300.0; top: 1551.0; width: 2040.0; height: 93.0; page: 2;}; "> <xhtml:td structure="td" style="boundingBox:{left: 300.0; top: 1551.0; width: 603.0; height: 93.0; page: 2;}; "> <docset:AuthorizedIndividualMichaelBrown> <docset:Name>Michael Brown </docset:Name> </docset:AuthorizedIndividualMichaelBrown> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 903.0; top: 1551.0; width: 435.0; height: 93.0; page: 2;}; "> <docset:MichaelBrownRole> <dg:chunk>Financial <docset:FinancialAnalyst> Analyst </docset:FinancialAnalyst></dg:chunk> </docset:MichaelBrownRole> </xhtml:td> <xhtml:td structure="td" style="boundingBox:{left: 1338.0; top: 1551.0; width: 1002.0; height: 93.0; page: 2;}; "> <docset:MichaelBrownPurposeofDisclosure>Financial analysis and reporting </docset:MichaelBrownPurposeofDisclosure> </xhtml:td> </xhtml:tr> </xhtml:tbody> </xhtml:table> </docset:TableofAuthorizedDisclosures> </dg:chunk> </dg:chunk> </dg:chunk> </docset:ReceivingParty> </docset:ReceivingParty-section> <docset:INWITNESSWHEREOF-section> <dg:chunk structure="h1"> IN <dg:chunk>WITNESS WHEREOF</dg:chunk>, </dg:chunk> <docset:INWITNESSWHEREOF structure="div">the Parties have executed this Non-Disclosure Agreement as of the <dg:chunk>Effective Date </dg:chunk>first above written. </docset:INWITNESSWHEREOF> </docset:INWITNESSWHEREOF-section> </dg:chunk> <docset:WidgetCorp-section> <dg:chunk structure="h1"> <docset:CompanyName>Widget Corp. </docset:CompanyName> </dg:chunk> <docset:By-section structure="div"> <dg:chunk structure="h1"> By: </dg:chunk> <docset:By structure="div">_____________________________ </docset:By> </docset:By-section> </docset:WidgetCorp-section> <dg:chunk structure="h1"> Name: <docset:Name>Alan Black </docset:Name></dg:chunk> <dg:chunk> <dg:chunk structure="h1"> Title: <docset:ChiefExecutiveOfficer>Chief Executive Officer </docset:ChiefExecutiveOfficer></dg:chunk> <docset:Date-section structure="div"> <dg:chunk structure="h1"> Date: </dg:chunk> <docset:Date structure="div">___________________________ </docset:Date> </docset:Date-section> </dg:chunk> <docset:Recipient-section> <dg:chunk structure="h1"> Recipient </dg:chunk> <docset:By-section structure="div"> <dg:chunk structure="h1"> By: </dg:chunk> <docset:By structure="div">_____________________________ </docset:By> </docset:By-section> </docset:Recipient-section> <docset:NameJaneDoe-section> <dg:chunk structure="h1"> Name: <docset:Name>Jane Doe </docset:Name></dg:chunk> <docset:Date-section structure="div"> <dg:chunk structure="h1"> Date: </dg:chunk> <docset:Date structure="div">___________________________</docset:Date> </docset:Date-section> </docset:NameJaneDoe-section> </dg:chunk>
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_public_api.py
from langchain_community.document_loaders.parsers import __all__ def test_parsers_public_api_correct() -> None: """Test public API of parsers for breaking changes.""" assert set(__all__) == { "AzureAIDocumentIntelligenceParser", "BS4HTMLParser", "DocAIParser", "GrobidParser", "LanguageParser", "OpenAIWhisperParser", "PyPDFParser", "PDFMinerParser", "PyMuPDFParser", "PyPDFium2Parser", "PDFPlumberParser", "VsdxParser", }
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_vsdx_parser.py
"""Tests for the VSDX parsers.""" from pathlib import Path from typing import Iterator import pytest from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.document_loaders.parsers import VsdxParser _THIS_DIR = Path(__file__).parents[3] _EXAMPLES_DIR = _THIS_DIR / "examples" # Paths to test VSDX file FAKE_FILE = _EXAMPLES_DIR / "fake.vsdx" def _assert_with_parser(parser: BaseBlobParser, splits_by_page: bool = True) -> None: """Standard tests to verify that the given parser works. Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. """ blob = Blob.from_path(FAKE_FILE) doc_generator = parser.lazy_parse(blob) assert isinstance(doc_generator, Iterator) docs = list(doc_generator) if splits_by_page: assert len(docs) == 14 else: assert len(docs) == 1 # Test is imprecise since the parsers yield different parse information depending # on configuration. Each parser seems to yield a slightly different result # for this page! assert "This is a title" in docs[0].page_content metadata = docs[0].metadata assert metadata["source"] == str(FAKE_FILE) if splits_by_page: assert int(metadata["page"]) == 0 @pytest.mark.requires("xmltodict") def test_vsdx_parser() -> None: """Test the VSDX parser.""" _assert_with_parser(VsdxParser())
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_pdf_parsers.py
"""Tests for the various PDF parsers.""" from pathlib import Path from typing import Iterator import pytest from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.document_loaders.parsers.pdf import ( PDFMinerParser, PyMuPDFParser, PyPDFium2Parser, PyPDFParser, ) _THIS_DIR = Path(__file__).parents[3] _EXAMPLES_DIR = _THIS_DIR / "examples" # Paths to test PDF files HELLO_PDF = _EXAMPLES_DIR / "hello.pdf" LAYOUT_PARSER_PAPER_PDF = _EXAMPLES_DIR / "layout-parser-paper.pdf" def _assert_with_parser(parser: BaseBlobParser, splits_by_page: bool = True) -> None: """Standard tests to verify that the given parser works. Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. """ blob = Blob.from_path(HELLO_PDF) doc_generator = parser.lazy_parse(blob) assert isinstance(doc_generator, Iterator) docs = list(doc_generator) assert len(docs) == 1 page_content = docs[0].page_content assert isinstance(page_content, str) # The different parsers return different amount of whitespace, so using # startswith instead of equals. assert docs[0].page_content.startswith("Hello world!") blob = Blob.from_path(LAYOUT_PARSER_PAPER_PDF) doc_generator = parser.lazy_parse(blob) assert isinstance(doc_generator, Iterator) docs = list(doc_generator) if splits_by_page: assert len(docs) == 16 else: assert len(docs) == 1 # Test is imprecise since the parsers yield different parse information depending # on configuration. Each parser seems to yield a slightly different result # for this page! assert "LayoutParser" in docs[0].page_content metadata = docs[0].metadata assert metadata["source"] == str(LAYOUT_PARSER_PAPER_PDF) if splits_by_page: assert int(metadata["page"]) == 0 @pytest.mark.requires("pypdf") def test_pypdf_parser() -> None: """Test PyPDF parser.""" _assert_with_parser(PyPDFParser()) @pytest.mark.requires("pdfminer") def test_pdfminer_parser() -> None: """Test PDFMiner parser.""" # Does not follow defaults to split by page. _assert_with_parser(PDFMinerParser(), splits_by_page=False) @pytest.mark.requires("fitz") # package is PyMuPDF def test_pymupdf_loader() -> None: """Test PyMuPDF loader.""" _assert_with_parser(PyMuPDFParser()) @pytest.mark.requires("pypdfium2") def test_pypdfium2_parser() -> None: """Test PyPDFium2 parser.""" # Does not follow defaults to split by page. _assert_with_parser(PyPDFium2Parser())
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_html_parsers.py
"""Tests for the HTML parsers.""" from pathlib import Path import pytest from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.document_loaders.parsers.html import BS4HTMLParser HERE = Path(__file__).parent EXAMPLES = HERE.parent.parent.parent / "integration_tests" / "examples" @pytest.mark.requires("bs4", "lxml") def test_bs_html_loader() -> None: """Test unstructured loader.""" file_path = EXAMPLES / "example.html" blob = Blob.from_path(file_path) parser = BS4HTMLParser(get_text_separator="|") docs = list(parser.lazy_parse(blob)) assert isinstance(docs, list) assert len(docs) == 1 metadata = docs[0].metadata content = docs[0].page_content assert metadata["title"] == "Chew dad's slippers" assert metadata["source"] == str(file_path) assert content[:2] == "\n|"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_doc_intelligence.py
"""Tests for the Google Cloud DocAI parser.""" from unittest.mock import MagicMock, patch import pytest from langchain_community.document_loaders.parsers import ( AzureAIDocumentIntelligenceParser, ) @pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence") @patch("azure.ai.documentintelligence.DocumentIntelligenceClient") @patch("azure.core.credentials.AzureKeyCredential") def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock) -> None: endpoint = "endpoint" key = "key" parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key) mock_credential.assert_called_once_with(key) mock_client.assert_called_once_with( endpoint=endpoint, credential=mock_credential(), headers={ "x-ms-useragent": "langchain-parser/1.0.0", }, features=None, ) assert parser.client == mock_client() assert parser.api_model == "prebuilt-layout" assert parser.mode == "markdown" @pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence") @patch("azure.ai.documentintelligence.DocumentIntelligenceClient") @patch("azure.core.credentials.AzureKeyCredential") def test_doc_intelligence_with_analysis_features( mock_credential: MagicMock, mock_client: MagicMock ) -> None: endpoint = "endpoint" key = "key" analysis_features = ["ocrHighResolution", "barcodes"] parser = AzureAIDocumentIntelligenceParser( api_endpoint=endpoint, api_key=key, analysis_features=analysis_features ) mock_credential.assert_called_once_with(key) mock_client.assert_called_once_with( endpoint=endpoint, credential=mock_credential(), headers={ "x-ms-useragent": "langchain-parser/1.0.0", }, features=analysis_features, ) assert parser.client == mock_client() assert parser.api_model == "prebuilt-layout" assert parser.mode == "markdown" with pytest.raises(ValueError): analysis_features = ["invalid"] parser = AzureAIDocumentIntelligenceParser( api_endpoint=endpoint, api_key=key, analysis_features=analysis_features )
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_generic.py
"""Module to test generic parsers.""" from typing import Iterator import pytest from langchain_core.documents import Document from langchain_community.document_loaders.base import BaseBlobParser from langchain_community.document_loaders.blob_loaders import Blob from langchain_community.document_loaders.parsers.generic import MimeTypeBasedParser class TestMimeBasedParser: """Test mime based parser.""" def test_without_fallback_parser(self) -> None: class FirstCharParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the first character of a blob.""" yield Document(page_content=blob.as_string()[0]) class SecondCharParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the second character of a blob.""" yield Document(page_content=blob.as_string()[1]) parser = MimeTypeBasedParser( handlers={ "text/plain": FirstCharParser(), "text/html": SecondCharParser(), }, ) blob = Blob(data=b"Hello World", mimetype="text/plain") docs = parser.parse(blob) assert len(docs) == 1 doc = docs[0] assert doc.page_content == "H" # Check text/html handler. blob = Blob(data=b"Hello World", mimetype="text/html") docs = parser.parse(blob) assert len(docs) == 1 doc = docs[0] assert doc.page_content == "e" blob = Blob(data=b"Hello World", mimetype="text/csv") with pytest.raises(ValueError, match="Unsupported mime type"): # Check that the fallback parser is used when the mimetype is not found. parser.parse(blob) def test_with_fallback_parser(self) -> None: class FirstCharParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the first character of a blob.""" yield Document(page_content=blob.as_string()[0]) class SecondCharParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the second character of a blob.""" yield Document(page_content=blob.as_string()[1]) class ThirdCharParser(BaseBlobParser): def lazy_parse(self, blob: Blob) -> Iterator[Document]: """Extract the third character of a blob.""" yield Document(page_content=blob.as_string()[2]) parser = MimeTypeBasedParser( handlers={ "text/plain": FirstCharParser(), "text/html": SecondCharParser(), }, fallback_parser=ThirdCharParser(), ) blob = Blob(data=b"Hello World", mimetype="text/plain") docs = parser.parse(blob) assert len(docs) == 1 doc = docs[0] assert doc.page_content == "H" # Check text/html handler. blob = Blob(data=b"Hello World", mimetype="text/html") docs = parser.parse(blob) assert len(docs) == 1 doc = docs[0] assert doc.page_content == "e" # Check that the fallback parser is used when the mimetype is not found. blob = Blob(data=b"Hello World", mimetype="text/csv") docs = parser.parse(blob) assert len(docs) == 1 doc = docs[0] assert doc.page_content == "l"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/test_azure_whisper_parser.py
"""Tests for the Azure OpenAI Whisper parser.""" from pathlib import Path from typing import Any from unittest.mock import Mock, patch import pytest from langchain_core.documents import Document from langchain_core.documents.base import Blob from langchain_community.document_loaders.parsers.audio import AzureOpenAIWhisperParser _THIS_DIR = Path(__file__).parents[3] _EXAMPLES_DIR = _THIS_DIR / "examples" AUDIO_M4A = _EXAMPLES_DIR / "hello_world.m4a" @pytest.mark.requires("openai") @patch("openai.AzureOpenAI") def test_azure_openai_whisper(mock_client: Mock) -> None: endpoint = "endpoint" key = "key" version = "115" name = "model" parser = AzureOpenAIWhisperParser( api_key=key, azure_endpoint=endpoint, api_version=version, deployment_name=name ) mock_client.assert_called_once_with( api_key=key, azure_endpoint=endpoint, api_version=version, max_retries=3, azure_ad_token=None, ) assert parser._client == mock_client() @pytest.mark.requires("openai") def test_is_openai_v1_lazy_parse(mocker: Any) -> None: endpoint = "endpoint" key = "key" version = "115" name = "model" mock_blob = mocker.Mock(spec=Blob) mock_blob.path = AUDIO_M4A mock_blob.source = "test_source" mock_openai_client = mocker.Mock() mock_openai_client.audio.transcriptions.create.return_value = mocker.Mock() mock_openai_client.audio.transcriptions.create.return_value.text = ( "Transcribed text" ) mocker.patch("langchain_community.utils.openai.is_openai_v1", return_value=True) parser = AzureOpenAIWhisperParser( api_key=key, azure_endpoint=endpoint, api_version=version, deployment_name=name ) parser._client = mock_openai_client result = list(parser.lazy_parse(mock_blob)) assert len(result) == 1 assert isinstance(result[0], Document) assert result[0].page_content == "Transcribed text" assert result[0].metadata["source"] == "test_source" @pytest.mark.requires("openai") def test_is_not_openai_v1_lazy_parse(mocker: Any) -> None: endpoint = "endpoint" key = "key" version = "115" name = "model" mock_blob = mocker.Mock(spec=Blob) mock_blob.path = AUDIO_M4A mock_blob.source = "test_source" mock_openai_client = mocker.Mock() mock_openai_client.audio.transcriptions.create.return_value = mocker.Mock() mock_openai_client.audio.transcriptions.create.return_value.text = ( "Transcribed text" ) mocker.patch("langchain_community.utils.openai.is_openai_v1", return_value=False) parser = AzureOpenAIWhisperParser( api_key=key, azure_endpoint=endpoint, api_version=version, deployment_name=name ) parser._client = mock_openai_client result = list(parser.lazy_parse(mock_blob)) assert len(result) == 1 assert isinstance(result[0], Document) assert result[0].page_content == "Transcribed text" assert result[0].metadata["source"] == "test_source"
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_typescript.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.typescript import ( TypeScriptSegmenter, ) @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestTypeScriptSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """function foo(): number { return 1; } class Autumn { leafCount = 45; reduceTemperature(desiredTemperature: number): number { return desiredTemperature * 0.6; } } interface Season { change(): void; } enum Colors { Green = 'green', Red = 'red', } """ self.expected_simplified_code = """// Code for: function foo(): number // Code for: class Autumn // Code for: interface Season // Code for: enum Colors""" self.expected_extracted_code = [ "function foo(): number\n{\n return 1;\n}", "class Autumn\n{\n leafCount = 45;\n " "reduceTemperature(desiredTemperature: number): number {\n " "return desiredTemperature * 0.6;\n }\n}", "interface Season\n{\n change(): void;\n}", "enum Colors\n{\n Green = 'green',\n Red = 'red',\n}", ] def test_is_valid(self) -> None: self.assertTrue(TypeScriptSegmenter("let a;").is_valid()) self.assertFalse(TypeScriptSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = TypeScriptSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = TypeScriptSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_scala.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.scala import ScalaSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestScalaSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """def foo() { return 1 } object T { def baz() { val x = 1 } } class S() { } trait T { def P(x: Any): Boolean }""" self.expected_simplified_code = """// Code for: def foo() { // Code for: object T { // Code for: class S() { // Code for: trait T {""" self.expected_extracted_code = [ "def foo() {\n return 1\n}", "object T {\n def baz() {\n val x = 1\n }\n}", "class S() {\n\n}", "trait T {\n def P(x: Any): Boolean\n}", ] def test_is_valid(self) -> None: self.assertFalse(ScalaSegmenter("val x").is_valid()) self.assertFalse(ScalaSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = ScalaSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = ScalaSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_go.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.go import GoSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestGoSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """func foo(a int) int { return a; } type T struct { a int b bool c string } type S interface { bar() float64 } """ self.expected_simplified_code = """// Code for: func foo(a int) int { // Code for: type T struct { // Code for: type S interface {""" self.expected_extracted_code = [ "func foo(a int) int {\n return a;\n}", "type T struct {\n a int\n b bool\n c string\n}", "type S interface {\n bar() float64\n}", ] def test_is_valid(self) -> None: self.assertTrue(GoSegmenter("var a int;").is_valid()) self.assertFalse(GoSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = GoSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = GoSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_cpp.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.cpp import CPPSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestCPPSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """int foo() { return 1; } class T { auto bar() const -> int; template<class U> void baz(U) { } }; struct S { }; union U { }; auto T::bar() const -> int { return 1; }""" self.expected_simplified_code = """// Code for: int foo() { // Code for: class T { // Code for: struct S { // Code for: union U { // Code for: auto T::bar() const -> int {""" self.expected_extracted_code = [ "int foo() {\n return 1;\n}", "class T {\n auto bar() const -> int;\n " "template<class U>\n void baz(U) {\n }\n}", "struct S {\n}", "union U {\n}", "auto T::bar() const -> int {\n return 1;\n}", ] def test_is_valid(self) -> None: self.assertTrue(CPPSegmenter("int a;").is_valid()) self.assertFalse(CPPSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = CPPSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = CPPSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_ruby.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.ruby import RubySegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestRubySegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """def foo i = 0 end module M def hi i = 2 end end class T def bar j = 1 end end""" self.expected_simplified_code = """# Code for: def foo # Code for: module M # Code for: class T""" self.expected_extracted_code = [ "def foo\n i = 0\nend", "module M\n def hi\n i = 2\n end\nend", "class T\n def bar\n j = 1\n end\nend", ] def test_is_valid(self) -> None: self.assertTrue(RubySegmenter("def a; end").is_valid()) self.assertFalse(RubySegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = RubySegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = RubySegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_php.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.php import PHPSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestPHPSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """<?php namespace foo; class Hello { public function __construct() { } } function hello() { echo "Hello World!"; } interface Human { public function breath(); } trait Foo { } enum Color { case Red; case Blue; }""" self.expected_simplified_code = """<?php // Code for: namespace foo; // Code for: class Hello { // Code for: function hello() { // Code for: interface Human { // Code for: trait Foo { } // Code for: enum Color""" self.expected_extracted_code = [ "namespace foo;", "class Hello {\n public function __construct() { }\n}", 'function hello() {\n echo "Hello World!";\n}', "interface Human {\n public function breath();\n}", "trait Foo { }", "enum Color\n{\n case Red;\n case Blue;\n}", ] def test_is_valid(self) -> None: self.assertTrue(PHPSegmenter("<?php $a = 0;").is_valid()) self.assertFalse(PHPSegmenter("<?php a ?b}+ c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = PHPSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = PHPSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_rust.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.rust import RustSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestRustSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """fn foo() -> i32 { return 1; } struct T { a: i32, b: bool, c: String } trait S { fn bar() -> Self } """ self.expected_simplified_code = """// Code for: fn foo() -> i32 { // Code for: struct T { // Code for: trait S {""" self.expected_extracted_code = [ "fn foo() -> i32 {\n return 1;\n}", "struct T {\n a: i32,\n b: bool,\n c: String\n}", "trait S {\n fn bar() -> Self\n}", ] def test_is_valid(self) -> None: self.assertTrue(RustSegmenter("let a: i32;").is_valid()) self.assertFalse(RustSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = RustSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = RustSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_javascript.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.javascript import ( JavaScriptSegmenter, ) @pytest.mark.requires("esprima") class TestJavaScriptSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """const os = require('os'); function hello(text) { console.log(text); } class Simple { constructor() { this.a = 1; } } hello("Hello!");""" self.expected_simplified_code = """const os = require('os'); // Code for: function hello(text) { // Code for: class Simple { hello("Hello!");""" self.expected_extracted_code = [ "function hello(text) {\n console.log(text);\n}", "class Simple {\n constructor() {\n this.a = 1;\n }\n}", ] def test_extract_functions_classes(self) -> None: segmenter = JavaScriptSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = JavaScriptSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_cobol.py
from langchain_community.document_loaders.parsers.language.cobol import CobolSegmenter EXAMPLE_CODE = """ IDENTIFICATION DIVISION. PROGRAM-ID. SampleProgram. DATA DIVISION. WORKING-STORAGE SECTION. 01 SAMPLE-VAR PIC X(20) VALUE 'Sample Value'. PROCEDURE DIVISION. A000-INITIALIZE-PARA. DISPLAY 'Initialization Paragraph'. MOVE 'New Value' TO SAMPLE-VAR. A100-PROCESS-PARA. DISPLAY SAMPLE-VAR. STOP RUN. """ def test_extract_functions_classes() -> None: """Test that functions and classes are extracted correctly.""" segmenter = CobolSegmenter(EXAMPLE_CODE) extracted_code = segmenter.extract_functions_classes() assert extracted_code == [ "A000-INITIALIZE-PARA.\n " "DISPLAY 'Initialization Paragraph'.\n " "MOVE 'New Value' TO SAMPLE-VAR.", "A100-PROCESS-PARA.\n DISPLAY SAMPLE-VAR.\n STOP RUN.", ] def test_simplify_code() -> None: """Test that code is simplified correctly.""" expected_simplified_code = ( "IDENTIFICATION DIVISION.\n" "PROGRAM-ID. SampleProgram.\n" "DATA DIVISION.\n" "WORKING-STORAGE SECTION.\n" "* OMITTED CODE *\n" "PROCEDURE DIVISION.\n" "A000-INITIALIZE-PARA.\n" "* OMITTED CODE *\n" "A100-PROCESS-PARA.\n" "* OMITTED CODE *\n" ) segmenter = CobolSegmenter(EXAMPLE_CODE) simplified_code = segmenter.simplify_code() assert simplified_code.strip() == expected_simplified_code.strip()
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_c.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.c import CSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestCSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """int main() { return 0; } struct S { }; union U { }; enum Evens { Two = 2, Four = 4 };""" self.expected_simplified_code = """// Code for: int main() { // Code for: struct S { // Code for: union U { // Code for: enum Evens {""" self.expected_extracted_code = [ "int main() {\n return 0;\n}", "struct S {\n}", "union U {\n}", "enum Evens {\n Two = 2,\n Four = 4\n}", ] def test_is_valid(self) -> None: self.assertTrue(CSegmenter("int a;").is_valid()) self.assertFalse(CSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = CSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = CSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_lua.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.lua import LuaSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestLuaSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """function F() print("Hello") end local function G() print("Goodbye") end""" self.expected_simplified_code = """-- Code for: function F() -- Code for: local function G()""" self.expected_extracted_code = [ 'function F()\n print("Hello")\nend', 'local function G()\n print("Goodbye")\nend', ] def test_is_valid(self) -> None: self.assertTrue(LuaSegmenter("local a").is_valid()) self.assertFalse(LuaSegmenter("a b c 1 2 3").is_valid()) # TODO: Investigate flakey-ness. @pytest.mark.skip( reason=( "Flakey. To be investigated. See " "https://github.com/langchain-ai/langchain/actions/runs/7907779756/job/21585580650." ) ) def test_extract_functions_classes(self) -> None: segmenter = LuaSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) # TODO: Investigate flakey-ness. @pytest.mark.skip( reason=( "Flakey. To be investigated. See " "https://github.com/langchain-ai/langchain/actions/runs/7923203031/job/21632416298?pr=17599 " # noqa: E501 "and https://github.com/langchain-ai/langchain/actions/runs/7923784089/job/2163420864." ) ) def test_simplify_code(self) -> None: segmenter = LuaSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_perl.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.perl import PerlSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestPerlSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """sub Hello { print "Hello, World!"; } sub new { my $class = shift; my $self = {}; bless $self, $class; return $self; }""" self.expected_simplified_code = """# Code for: sub Hello { # Code for: sub new {""" self.expected_extracted_code = [ 'sub Hello {\n print "Hello, World!";\n}', "sub new {\n my $class = shift;\n my $self = {};\n " "bless $self, $class;\n return $self;\n}", ] def test_is_valid(self) -> None: self.assertTrue(PerlSegmenter("$age = 25;").is_valid()) self.assertFalse(PerlSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = PerlSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = PerlSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_kotlin.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.kotlin import KotlinSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestKotlinSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """fun foo(a: Int): Int { return a } class T { var a: Int = 0 var b: Boolean = false var c: String = "" } interface S { fun bar(): Double } enum class P { A, B, C } """ self.expected_simplified_code = """// Code for: fun foo(a: Int): Int { // Code for: class T { // Code for: interface S { // Code for: enum class P {""" self.expected_extracted_code = [ "fun foo(a: Int): Int {\n return a\n}", "class T {\n var a: Int = 0\n var b: Boolean = false\n " 'var c: String = ""\n}', "interface S {\n fun bar(): Double\n}", "enum class P {\n A,\n B,\n C\n}", ] def test_is_valid(self) -> None: self.assertTrue(KotlinSegmenter("val a: Int = 5").is_valid()) self.assertFalse(KotlinSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = KotlinSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = KotlinSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_elixir.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.elixir import ElixirSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestElixirSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """@doc "some comment" def foo do i = 0 end defmodule M do def hi do i = 2 end defp wave do :ok end end""" self.expected_simplified_code = """# Code for: @doc "some comment" # Code for: def foo do # Code for: defmodule M do""" self.expected_extracted_code = [ '@doc "some comment"', "def foo do\n i = 0\nend", "defmodule M do\n" " def hi do\n" " i = 2\n" " end\n\n" " defp wave do\n" " :ok\n" " end\n" "end", ] def test_is_valid(self) -> None: self.assertTrue(ElixirSegmenter("def a do; end").is_valid()) self.assertFalse(ElixirSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = ElixirSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(len(extracted_code), 3) self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = ElixirSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_java.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.java import JavaSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestJavaSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """class Hello { public static void main(String[] args) { System.out.println("Hello, world."); } } interface Human { void breathe(); } enum Tens { TEN, TWENTY } """ self.expected_simplified_code = """// Code for: class Hello // Code for: interface Human // Code for: enum Tens""" self.expected_extracted_code = [ "class Hello\n{\n " "public static void main(String[] args)\n {\n " 'System.out.println("Hello, world.");\n }\n}', "interface Human\n{\n void breathe();\n}", "enum Tens\n{\n TEN,\n TWENTY\n}", ] def test_is_valid(self) -> None: self.assertTrue(JavaSegmenter("int a;").is_valid()) self.assertFalse(JavaSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = JavaSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = JavaSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_csharp.py
import unittest import pytest from langchain_community.document_loaders.parsers.language.csharp import CSharpSegmenter @pytest.mark.requires("tree_sitter", "tree_sitter_languages") class TestCSharpSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """namespace World { } class Hello { static void Main(string []args) { System.Console.WriteLine("Hello, world."); } } interface Human { void breathe(); } enum Tens { Ten = 10, Twenty = 20 } struct T { } record Person(string FirstName, string LastName, string Id) { internal string Id { get; init; } = Id; }""" self.expected_simplified_code = """// Code for: namespace World // Code for: class Hello // Code for: interface Human // Code for: enum Tens // Code for: struct T // Code for: record Person(string FirstName, string LastName, string Id)""" self.expected_extracted_code = [ "namespace World\n{\n}", "class Hello\n{\n static void Main(string []args)\n {\n " 'System.Console.WriteLine("Hello, world.");\n }\n}', "interface Human\n{\n void breathe();\n}", "enum Tens\n{\n Ten = 10,\n Twenty = 20\n}", "struct T\n{\n}", "record Person(string FirstName, string LastName, string Id)\n{\n " "internal string Id { get; init; } = Id;\n}", ] def test_is_valid(self) -> None: self.assertTrue(CSharpSegmenter("int a;").is_valid()) self.assertFalse(CSharpSegmenter("a b c 1 2 3").is_valid()) def test_extract_functions_classes(self) -> None: segmenter = CSharpSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = CSharpSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers
lc_public_repos/langchain/libs/community/tests/unit_tests/document_loaders/parsers/language/test_python.py
import unittest from langchain_community.document_loaders.parsers.language.python import PythonSegmenter class TestPythonSegmenter(unittest.TestCase): def setUp(self) -> None: self.example_code = """import os def hello(text): print(text) class Simple: def __init__(self): self.a = 1 hello("Hello!")""" self.expected_simplified_code = """import os # Code for: def hello(text): # Code for: class Simple: hello("Hello!")""" self.expected_extracted_code = [ "def hello(text):\n" " print(text)", "class Simple:\n" " def __init__(self):\n" " self.a = 1", ] def test_extract_functions_classes(self) -> None: segmenter = PythonSegmenter(self.example_code) extracted_code = segmenter.extract_functions_classes() self.assertEqual(extracted_code, self.expected_extracted_code) def test_simplify_code(self) -> None: segmenter = PythonSegmenter(self.example_code) simplified_code = segmenter.simplify_code() self.assertEqual(simplified_code, self.expected_simplified_code)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/chat_message_histories/test_sql.py
from pathlib import Path from typing import Any, AsyncGenerator, Generator, List, Tuple import pytest from langchain_core.messages import AIMessage, BaseMessage, HumanMessage from sqlalchemy import Column, Integer, Text try: from sqlalchemy.orm import DeclarativeBase class Base(DeclarativeBase): pass except ImportError: # for sqlalchemy < 2 from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # type:ignore from langchain_community.chat_message_histories import SQLChatMessageHistory from langchain_community.chat_message_histories.sql import DefaultMessageConverter @pytest.fixture() def con_str(tmp_path: Path) -> str: file_path = tmp_path / "db.sqlite3" con_str = f"sqlite:///{file_path}" return con_str @pytest.fixture() def acon_str(tmp_path: Path) -> str: file_path = tmp_path / "adb.sqlite3" con_str = f"sqlite+aiosqlite:///{file_path}" return con_str @pytest.fixture() def sql_histories( con_str: str, ) -> Generator[Tuple[SQLChatMessageHistory, SQLChatMessageHistory], None, None]: message_history = SQLChatMessageHistory( session_id="123", connection=con_str, table_name="test_table" ) # Create history for other session other_history = SQLChatMessageHistory( session_id="456", connection=con_str, table_name="test_table" ) yield message_history, other_history message_history.clear() other_history.clear() @pytest.fixture() async def asql_histories( acon_str: str, ) -> AsyncGenerator[Tuple[SQLChatMessageHistory, SQLChatMessageHistory], None]: message_history = SQLChatMessageHistory( session_id="123", connection=acon_str, table_name="test_table", async_mode=True, engine_args={"echo": False}, ) # Create history for other session other_history = SQLChatMessageHistory( session_id="456", connection=acon_str, table_name="test_table", async_mode=True, engine_args={"echo": False}, ) yield message_history, other_history await message_history.aclear() await other_history.aclear() def test_add_messages( sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = sql_histories sql_history.add_messages( [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")] ) messages = sql_history.messages assert len(messages) == 2 assert isinstance(messages[0], HumanMessage) assert isinstance(messages[1], AIMessage) assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" @pytest.mark.requires("aiosqlite") async def test_async_add_messages( asql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = asql_histories await sql_history.aadd_messages( [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")] ) messages = await sql_history.aget_messages() assert len(messages) == 2 assert isinstance(messages[0], HumanMessage) assert isinstance(messages[1], AIMessage) assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" def test_multiple_sessions( sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = sql_histories sql_history.add_messages( [ HumanMessage(content="Hello!"), AIMessage(content="Hi there!"), HumanMessage(content="Whats cracking?"), ] ) # Ensure the messages are added correctly in the first session messages = sql_history.messages assert len(messages) == 3, "waat" assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" assert messages[2].content == "Whats cracking?" # second session other_history.add_messages([HumanMessage(content="Hellox")]) assert len(other_history.messages) == 1 messages = sql_history.messages assert len(messages) == 3 assert other_history.messages[0].content == "Hellox" assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" assert messages[2].content == "Whats cracking?" @pytest.mark.requires("aiosqlite") async def test_async_multiple_sessions( asql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = asql_histories await sql_history.aadd_messages( [ HumanMessage(content="Hello!"), AIMessage(content="Hi there!"), HumanMessage(content="Whats cracking?"), ] ) # Ensure the messages are added correctly in the first session messages: List[BaseMessage] = await sql_history.aget_messages() assert len(messages) == 3, "waat" assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" assert messages[2].content == "Whats cracking?" # second session await other_history.aadd_messages([HumanMessage(content="Hellox")]) messages = await sql_history.aget_messages() assert len(await other_history.aget_messages()) == 1 assert len(messages) == 3 assert (await other_history.aget_messages())[0].content == "Hellox" assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" assert messages[2].content == "Whats cracking?" def test_clear_messages( sql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = sql_histories sql_history.add_messages( [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")] ) assert len(sql_history.messages) == 2 # Now create another history with different session id other_history.add_messages([HumanMessage(content="Hellox")]) assert len(other_history.messages) == 1 assert len(sql_history.messages) == 2 # Now clear the first history sql_history.clear() assert len(sql_history.messages) == 0 assert len(other_history.messages) == 1 @pytest.mark.requires("aiosqlite") async def test_async_clear_messages( asql_histories: Tuple[SQLChatMessageHistory, SQLChatMessageHistory], ) -> None: sql_history, other_history = asql_histories await sql_history.aadd_messages( [HumanMessage(content="Hello!"), AIMessage(content="Hi there!")] ) assert len(await sql_history.aget_messages()) == 2 # Now create another history with different session id await other_history.aadd_messages([HumanMessage(content="Hellox")]) assert len(await other_history.aget_messages()) == 1 assert len(await sql_history.aget_messages()) == 2 # Now clear the first history await sql_history.aclear() assert len(await sql_history.aget_messages()) == 0 assert len(await other_history.aget_messages()) == 1 def test_model_no_session_id_field_error(con_str: str) -> None: class Model(Base): __tablename__ = "test_table" id = Column(Integer, primary_key=True) test_field = Column(Text) class CustomMessageConverter(DefaultMessageConverter): def get_sql_model_class(self) -> Any: return Model with pytest.raises(ValueError): SQLChatMessageHistory( "test", con_str, custom_message_converter=CustomMessageConverter("test_table"), )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/chat_message_histories/test_imports.py
from langchain_community.chat_message_histories import __all__, _module_lookup EXPECTED_ALL = [ "AstraDBChatMessageHistory", "CassandraChatMessageHistory", "ChatMessageHistory", "CosmosDBChatMessageHistory", "DynamoDBChatMessageHistory", "ElasticsearchChatMessageHistory", "FileChatMessageHistory", "FirestoreChatMessageHistory", "MomentoChatMessageHistory", "MongoDBChatMessageHistory", "Neo4jChatMessageHistory", "PostgresChatMessageHistory", "RedisChatMessageHistory", "RocksetChatMessageHistory", "SQLChatMessageHistory", "SingleStoreDBChatMessageHistory", "StreamlitChatMessageHistory", "TiDBChatMessageHistory", "UpstashRedisChatMessageHistory", "XataChatMessageHistory", "ZepChatMessageHistory", "ZepCloudChatMessageHistory", "KafkaChatMessageHistory", ] def test_all_imports() -> None: """Test that __all__ is correctly set.""" assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/chat_message_histories/test_file_chat_message_history.py
import tempfile from pathlib import Path from typing import Generator import pytest from langchain_core.messages import AIMessage, HumanMessage from langchain_community.chat_message_histories import FileChatMessageHistory @pytest.fixture def file_chat_message_history() -> Generator[FileChatMessageHistory, None, None]: with tempfile.TemporaryDirectory() as temp_dir: file_path = Path(temp_dir) / "test_chat_history.json" file_chat_message_history = FileChatMessageHistory(str(file_path)) yield file_chat_message_history def test_add_messages(file_chat_message_history: FileChatMessageHistory) -> None: file_chat_message_history.add_user_message("Hello!") file_chat_message_history.add_ai_message("Hi there!") messages = file_chat_message_history.messages assert len(messages) == 2 assert isinstance(messages[0], HumanMessage) assert isinstance(messages[1], AIMessage) assert messages[0].content == "Hello!" assert messages[1].content == "Hi there!" def test_clear_messages(file_chat_message_history: FileChatMessageHistory) -> None: file_chat_message_history.add_user_message("Hello!") file_chat_message_history.add_ai_message("Hi there!") file_chat_message_history.clear() messages = file_chat_message_history.messages assert len(messages) == 0 def test_multiple_sessions(file_chat_message_history: FileChatMessageHistory) -> None: # First session file_chat_message_history.add_user_message("Hello, AI!") file_chat_message_history.add_ai_message("Hello, how can I help you?") file_chat_message_history.add_user_message("Tell me a joke.") file_chat_message_history.add_ai_message( "Why did the chicken cross the road? To get to the other side!" ) # Ensure the messages are added correctly in the first session messages = file_chat_message_history.messages assert len(messages) == 4 assert messages[0].content == "Hello, AI!" assert messages[1].content == "Hello, how can I help you?" assert messages[2].content == "Tell me a joke." expected_content = "Why did the chicken cross the road? To get to the other side!" assert messages[3].content == expected_content # Second session (reinitialize FileChatMessageHistory) file_path = file_chat_message_history.file_path second_session_chat_message_history = FileChatMessageHistory( file_path=str(file_path) ) # Ensure the history is maintained in the second session messages = second_session_chat_message_history.messages assert len(messages) == 4 assert messages[0].content == "Hello, AI!" assert messages[1].content == "Hello, how can I help you?" assert messages[2].content == "Tell me a joke." expected_content = "Why did the chicken cross the road? To get to the other side!" assert messages[3].content == expected_content
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/chat_message_histories/__init__.py
"""Unit tests for chat_message_history modules"""
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/test_imports.py
from langchain_community.callbacks import __all__, _module_lookup EXPECTED_ALL = [ "AimCallbackHandler", "ArgillaCallbackHandler", "ArizeCallbackHandler", "PromptLayerCallbackHandler", "ArthurCallbackHandler", "ClearMLCallbackHandler", "CometCallbackHandler", "ContextCallbackHandler", "HumanApprovalCallbackHandler", "InfinoCallbackHandler", "MlflowCallbackHandler", "LLMonitorCallbackHandler", "OpenAICallbackHandler", "LLMThoughtLabeler", "StreamlitCallbackHandler", "WandbCallbackHandler", "WhyLabsCallbackHandler", "get_openai_callback", "wandb_tracing_enabled", "FlyteCallbackHandler", "SageMakerCallbackHandler", "LabelStudioCallbackHandler", "TrubricsCallbackHandler", "FiddlerCallbackHandler", "UpTrainCallbackHandler", "UpstashRatelimitError", "UpstashRatelimitHandler", ] def test_all_imports() -> None: """Test that __all__ is correctly set.""" assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/test_openai_info.py
from unittest.mock import MagicMock from uuid import uuid4 import numpy as np import pytest from langchain_core.outputs import LLMResult from langchain_core.utils.pydantic import get_fields from langchain_community.callbacks import OpenAICallbackHandler from langchain_community.llms.openai import BaseOpenAI @pytest.fixture def handler() -> OpenAICallbackHandler: return OpenAICallbackHandler() def test_on_llm_end(handler: OpenAICallbackHandler) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": get_fields(BaseOpenAI)["model_name"].default, }, ) handler.on_llm_end(response) assert handler.successful_requests == 1 assert handler.total_tokens == 3 assert handler.prompt_tokens == 2 assert handler.completion_tokens == 1 assert handler.total_cost > 0 def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": "foo-bar", }, ) handler.on_llm_end(response) assert handler.total_cost == 0 @pytest.mark.parametrize( "model_name, expected_cost", [ ("ada:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.0032), ("babbage:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.0048), ("curie:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.024), ("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24), ("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032), ("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024), ("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.009), ("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008), ("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004), ("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035), ], ) def test_on_llm_end_finetuned_model( handler: OpenAICallbackHandler, model_name: str, expected_cost: float ) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000, }, "model_name": model_name, }, ) handler.on_llm_end(response) assert np.isclose(handler.total_cost, expected_cost) @pytest.mark.parametrize( "model_name,expected_cost", [ ("gpt-35-turbo", 0.0035), ("gpt-35-turbo-0301", 0.004), ( "gpt-35-turbo-0613", 0.0035, ), ( "gpt-35-turbo-16k-0613", 0.007, ), ( "gpt-35-turbo-16k", 0.007, ), ("gpt-4", 0.09), ("gpt-4-0314", 0.09), ("gpt-4-0613", 0.09), ("gpt-4-32k", 0.18), ("gpt-4-32k-0314", 0.18), ("gpt-4-32k-0613", 0.18), ], ) def test_on_llm_end_azure_openai( handler: OpenAICallbackHandler, model_name: str, expected_cost: float ) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000, }, "model_name": model_name, }, ) handler.on_llm_end(response) assert handler.total_cost == expected_cost @pytest.mark.parametrize( "model_name", ["gpt-35-turbo-16k-0301", "gpt-4-0301", "gpt-4-32k-0301"] ) def test_on_llm_end_no_cost_invalid_model( handler: OpenAICallbackHandler, model_name: str ) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 1000, "completion_tokens": 1000, "total_tokens": 2000, }, "model_name": model_name, }, ) handler.on_llm_end(response) assert handler.total_cost == 0 def test_on_retry_works(handler: OpenAICallbackHandler) -> None: handler.on_retry(MagicMock(), run_id=uuid4())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py
"""A fake callback handler for testing purposes.""" from itertools import chain from typing import Any, Dict, List, Optional, Union from uuid import UUID from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler from langchain_core.messages import BaseMessage from pydantic import BaseModel class BaseFakeCallbackHandler(BaseModel): """Base fake callback handler for testing.""" starts: int = 0 ends: int = 0 errors: int = 0 text: int = 0 ignore_llm_: bool = False ignore_chain_: bool = False ignore_agent_: bool = False ignore_retriever_: bool = False ignore_chat_model_: bool = False # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests chain_starts: int = 0 chain_ends: int = 0 llm_starts: int = 0 llm_ends: int = 0 llm_streams: int = 0 tool_starts: int = 0 tool_ends: int = 0 agent_actions: int = 0 agent_ends: int = 0 chat_model_starts: int = 0 retriever_starts: int = 0 retriever_ends: int = 0 retriever_errors: int = 0 retries: int = 0 class BaseFakeCallbackHandlerMixin(BaseFakeCallbackHandler): """Base fake callback handler mixin for testing.""" def on_llm_start_common(self) -> None: self.llm_starts += 1 self.starts += 1 def on_llm_end_common(self) -> None: self.llm_ends += 1 self.ends += 1 def on_llm_error_common(self) -> None: self.errors += 1 def on_llm_new_token_common(self) -> None: self.llm_streams += 1 def on_retry_common(self) -> None: self.retries += 1 def on_chain_start_common(self) -> None: self.chain_starts += 1 self.starts += 1 def on_chain_end_common(self) -> None: self.chain_ends += 1 self.ends += 1 def on_chain_error_common(self) -> None: self.errors += 1 def on_tool_start_common(self) -> None: self.tool_starts += 1 self.starts += 1 def on_tool_end_common(self) -> None: self.tool_ends += 1 self.ends += 1 def on_tool_error_common(self) -> None: self.errors += 1 def on_agent_action_common(self) -> None: self.agent_actions += 1 self.starts += 1 def on_agent_finish_common(self) -> None: self.agent_ends += 1 self.ends += 1 def on_chat_model_start_common(self) -> None: self.chat_model_starts += 1 self.starts += 1 def on_text_common(self) -> None: self.text += 1 def on_retriever_start_common(self) -> None: self.starts += 1 self.retriever_starts += 1 def on_retriever_end_common(self) -> None: self.ends += 1 self.retriever_ends += 1 def on_retriever_error_common(self) -> None: self.errors += 1 self.retriever_errors += 1 class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin): """Fake callback handler for testing.""" @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ @property def ignore_retriever(self) -> bool: """Whether to ignore retriever callbacks.""" return self.ignore_retriever_ def on_llm_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_start_common() def on_llm_new_token( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_new_token_common() def on_llm_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_end_common() def on_llm_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_llm_error_common() def on_retry( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retry_common() def on_chain_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_start_common() def on_chain_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_end_common() def on_chain_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_chain_error_common() def on_tool_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_start_common() def on_tool_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_end_common() def on_tool_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_tool_error_common() def on_agent_action( self, *args: Any, **kwargs: Any, ) -> Any: self.on_agent_action_common() def on_agent_finish( self, *args: Any, **kwargs: Any, ) -> Any: self.on_agent_finish_common() def on_text( self, *args: Any, **kwargs: Any, ) -> Any: self.on_text_common() def on_retriever_start( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_start_common() def on_retriever_end( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_end_common() def on_retriever_error( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retriever_error_common() def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore return self class FakeCallbackHandlerWithChatStart(FakeCallbackHandler): def on_chat_model_start( self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> Any: assert all(isinstance(m, BaseMessage) for m in chain(*messages)) self.on_chat_model_start_common() class FakeAsyncCallbackHandler(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin): """Fake async callback handler for testing.""" @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" return self.ignore_llm_ @property def ignore_chain(self) -> bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_ @property def ignore_agent(self) -> bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_ async def on_retry( self, *args: Any, **kwargs: Any, ) -> Any: self.on_retry_common() async def on_llm_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_start_common() async def on_llm_new_token( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_new_token_common() async def on_llm_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_end_common() async def on_llm_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_llm_error_common() async def on_chain_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_start_common() async def on_chain_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_end_common() async def on_chain_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_chain_error_common() async def on_tool_start( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_start_common() async def on_tool_end( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_end_common() async def on_tool_error( self, *args: Any, **kwargs: Any, ) -> None: self.on_tool_error_common() async def on_agent_action( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_action_common() async def on_agent_finish( self, *args: Any, **kwargs: Any, ) -> None: self.on_agent_finish_common() async def on_text( self, *args: Any, **kwargs: Any, ) -> None: self.on_text_common() def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore return self
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/test_streamlit_callback.py
import builtins import unittest from typing import Any from unittest import mock from unittest.mock import MagicMock from langchain_community.callbacks.streamlit import StreamlitCallbackHandler class TestImport(unittest.TestCase): """Test the StreamlitCallbackHandler 'auto-updating' API""" def setUp(self) -> None: self.builtins_import = builtins.__import__ def tearDown(self) -> None: builtins.__import__ = self.builtins_import @mock.patch( "langchain_community.callbacks.streamlit._InternalStreamlitCallbackHandler" ) def test_create_internal_handler(self, mock_internal_handler: Any) -> None: """If we're using a Streamlit that does not expose its own StreamlitCallbackHandler, use our own implementation. """ def external_import_error( name: str, globals: Any, locals: Any, fromlist: Any, level: int ) -> Any: if name == "streamlit.external.langchain": raise ImportError return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_error # type: ignore[assignment] parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler( parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler, ) # Our internal handler should be created mock_internal_handler.assert_called_once_with( parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler, ) def test_create_external_handler(self) -> None: """If we're using a Streamlit that *does* expose its own callback handler, delegate to that implementation. """ mock_streamlit_module = MagicMock() def external_import_success( name: str, globals: Any, locals: Any, fromlist: Any, level: int ) -> Any: if name == "streamlit.external.langchain": return mock_streamlit_module return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_success # type: ignore[assignment] parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler( parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler, ) # Streamlit's handler should be created mock_streamlit_module.StreamlitCallbackHandler.assert_called_once_with( parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler, )
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/test_upstash_ratelimit_callback.py
import logging from typing import Any from unittest.mock import create_autospec import pytest from langchain_core.outputs import LLMResult from langchain_community.callbacks import UpstashRatelimitError, UpstashRatelimitHandler logger = logging.getLogger(__name__) try: from upstash_ratelimit import Ratelimit, Response except ImportError: Ratelimit, Response = None, None # Fixtures @pytest.fixture def request_ratelimit() -> Ratelimit: ratelimit = create_autospec(Ratelimit) response = Response(allowed=True, limit=10, remaining=10, reset=10000) ratelimit.limit.return_value = response return ratelimit @pytest.fixture def token_ratelimit() -> Ratelimit: ratelimit = create_autospec(Ratelimit) response = Response(allowed=True, limit=1000, remaining=1000, reset=10000) ratelimit.limit.return_value = response ratelimit.get_remaining.return_value = 1000 return ratelimit @pytest.fixture def handler_with_both_limits( request_ratelimit: Ratelimit, token_ratelimit: Ratelimit ) -> UpstashRatelimitHandler: return UpstashRatelimitHandler( identifier="user123", token_ratelimit=token_ratelimit, request_ratelimit=request_ratelimit, include_output_tokens=False, ) # Tests @pytest.mark.requires("upstash_ratelimit") def test_init_no_limits() -> None: with pytest.raises(ValueError): UpstashRatelimitHandler(identifier="user123") @pytest.mark.requires("upstash_ratelimit") def test_init_request_limit_only(request_ratelimit: Ratelimit) -> None: handler = UpstashRatelimitHandler( identifier="user123", request_ratelimit=request_ratelimit ) assert handler.request_ratelimit is not None assert handler.token_ratelimit is None @pytest.mark.requires("upstash_ratelimit") def test_init_token_limit_only(token_ratelimit: Ratelimit) -> None: handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=token_ratelimit ) assert handler.token_ratelimit is not None assert handler.request_ratelimit is None @pytest.mark.requires("upstash_ratelimit") def test_on_chain_start_request_limit(handler_with_both_limits: Any) -> None: handler_with_both_limits.on_chain_start(serialized={}, inputs={}) handler_with_both_limits.request_ratelimit.limit.assert_called_once_with("user123") handler_with_both_limits.token_ratelimit.limit.assert_not_called() @pytest.mark.requires("upstash_ratelimit") def test_on_chain_start_request_limit_reached(request_ratelimit: Any) -> None: request_ratelimit.limit.return_value = Response( allowed=False, limit=10, remaining=0, reset=10000 ) handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=None, request_ratelimit=request_ratelimit ) with pytest.raises(UpstashRatelimitError): handler.on_chain_start(serialized={}, inputs={}) @pytest.mark.requires("upstash_ratelimit") def test_on_llm_start_token_limit_reached(token_ratelimit: Any) -> None: token_ratelimit.get_remaining.return_value = 0 handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=token_ratelimit, request_ratelimit=None ) with pytest.raises(UpstashRatelimitError): handler.on_llm_start(serialized={}, prompts=["test"]) @pytest.mark.requires("upstash_ratelimit") def test_on_llm_start_token_limit_reached_negative(token_ratelimit: Any) -> None: token_ratelimit.get_remaining.return_value = -10 handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=token_ratelimit, request_ratelimit=None ) with pytest.raises(UpstashRatelimitError): handler.on_llm_start(serialized={}, prompts=["test"]) @pytest.mark.requires("upstash_ratelimit") def test_on_llm_end_with_token_limit(handler_with_both_limits: Any) -> None: response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 3, "total_tokens": 5, } }, ) handler_with_both_limits.on_llm_end(response) handler_with_both_limits.token_ratelimit.limit.assert_called_once_with("user123", 2) @pytest.mark.requires("upstash_ratelimit") def test_on_llm_end_with_token_limit_include_output_tokens( token_ratelimit: Any, ) -> None: handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=token_ratelimit, request_ratelimit=None, include_output_tokens=True, ) response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 3, "total_tokens": 5, } }, ) handler.on_llm_end(response) token_ratelimit.limit.assert_called_once_with("user123", 5) @pytest.mark.requires("upstash_ratelimit") def test_on_llm_end_without_token_usage(handler_with_both_limits: Any) -> None: response = LLMResult(generations=[], llm_output={}) with pytest.raises(ValueError): handler_with_both_limits.on_llm_end(response) @pytest.mark.requires("upstash_ratelimit") def test_reset_handler(handler_with_both_limits: Any) -> None: new_handler = handler_with_both_limits.reset(identifier="user456") assert new_handler.identifier == "user456" assert not new_handler._checked @pytest.mark.requires("upstash_ratelimit") def test_reset_handler_no_new_identifier(handler_with_both_limits: Any) -> None: new_handler = handler_with_both_limits.reset() assert new_handler.identifier == "user123" assert not new_handler._checked @pytest.mark.requires("upstash_ratelimit") def test_on_chain_start_called_once(handler_with_both_limits: Any) -> None: handler_with_both_limits.on_chain_start(serialized={}, inputs={}) handler_with_both_limits.on_chain_start(serialized={}, inputs={}) assert handler_with_both_limits.request_ratelimit.limit.call_count == 1 @pytest.mark.requires("upstash_ratelimit") def test_on_chain_start_reset_checked(handler_with_both_limits: Any) -> None: handler_with_both_limits.on_chain_start(serialized={}, inputs={}) new_handler = handler_with_both_limits.reset(identifier="user456") new_handler.on_chain_start(serialized={}, inputs={}) # becomes two because the mock object is kept in reset assert new_handler.request_ratelimit.limit.call_count == 2 @pytest.mark.requires("upstash_ratelimit") def test_on_llm_start_no_token_limit(request_ratelimit: Any) -> None: handler = UpstashRatelimitHandler( identifier="user123", token_ratelimit=None, request_ratelimit=request_ratelimit ) handler.on_llm_start(serialized={}, prompts=["test"]) assert request_ratelimit.limit.call_count == 0 @pytest.mark.requires("upstash_ratelimit") def test_on_llm_start_token_limit(handler_with_both_limits: Any) -> None: handler_with_both_limits.on_llm_start(serialized={}, prompts=["test"]) assert handler_with_both_limits.token_ratelimit.get_remaining.call_count == 1 @pytest.mark.requires("upstash_ratelimit") def test_full_chain_with_both_limits(handler_with_both_limits: Any) -> None: handler_with_both_limits.on_chain_start(serialized={}, inputs={}) handler_with_both_limits.on_chain_start(serialized={}, inputs={}) assert handler_with_both_limits.request_ratelimit.limit.call_count == 1 assert handler_with_both_limits.token_ratelimit.limit.call_count == 0 assert handler_with_both_limits.token_ratelimit.get_remaining.call_count == 0 handler_with_both_limits.on_llm_start(serialized={}, prompts=["test"]) assert handler_with_both_limits.request_ratelimit.limit.call_count == 1 assert handler_with_both_limits.token_ratelimit.limit.call_count == 0 assert handler_with_both_limits.token_ratelimit.get_remaining.call_count == 1 response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 3, "total_tokens": 5, } }, ) handler_with_both_limits.on_llm_end(response) assert handler_with_both_limits.request_ratelimit.limit.call_count == 1 assert handler_with_both_limits.token_ratelimit.limit.call_count == 1 assert handler_with_both_limits.token_ratelimit.get_remaining.call_count == 1
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/test_callback_manager.py
"""Test CallbackManager.""" from unittest.mock import patch import pytest from langchain_core.callbacks.manager import CallbackManager, trace_as_chain_group from langchain_core.outputs import LLMResult from langchain_core.tracers.langchain import LangChainTracer, wait_for_all_tracers from langchain_core.utils.pydantic import get_fields from langsmith import utils as ls_utils from langchain_community.callbacks import get_openai_callback from langchain_community.callbacks.manager import get_bedrock_anthropic_callback from langchain_community.llms.openai import BaseOpenAI def test_callback_manager_configure_context_vars( monkeypatch: pytest.MonkeyPatch, ) -> None: """Test callback manager configuration.""" ls_utils.get_env_var.cache_clear() ls_utils.get_tracer_project.cache_clear() monkeypatch.setenv("LANGCHAIN_TRACING_V2", "true") monkeypatch.setenv("LANGCHAIN_TRACING", "false") monkeypatch.setenv("LANGCHAIN_API_KEY", "foo") with patch.object(LangChainTracer, "_update_run_single"): with patch.object(LangChainTracer, "_persist_run_single"): with trace_as_chain_group("test") as group_manager: assert len(group_manager.handlers) == 1 tracer = group_manager.handlers[0] assert isinstance(tracer, LangChainTracer) with get_openai_callback() as cb: # This is a new empty callback handler assert cb.successful_requests == 0 assert cb.total_tokens == 0 # configure adds this openai cb but doesn't modify the group manager mngr = CallbackManager.configure(group_manager) assert mngr.handlers == [tracer, cb] assert group_manager.handlers == [tracer] response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": get_fields(BaseOpenAI)["model_name"].default, }, ) mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) # The callback handler has been updated assert cb.successful_requests == 1 assert cb.total_tokens == 3 assert cb.prompt_tokens == 2 assert cb.completion_tokens == 1 assert cb.total_cost > 0 with get_openai_callback() as cb: # This is a new empty callback handler assert cb.successful_requests == 0 assert cb.total_tokens == 0 # configure adds this openai cb but doesn't modify the group manager mngr = CallbackManager.configure(group_manager) assert mngr.handlers == [tracer, cb] assert group_manager.handlers == [tracer] response = LLMResult( generations=[], llm_output={ "token_usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_name": get_fields(BaseOpenAI)["model_name"].default, }, ) mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) # The callback handler has been updated assert cb.successful_requests == 1 assert cb.total_tokens == 3 assert cb.prompt_tokens == 2 assert cb.completion_tokens == 1 assert cb.total_cost > 0 with get_bedrock_anthropic_callback() as cb: # This is a new empty callback handler assert cb.successful_requests == 0 assert cb.total_tokens == 0 # configure adds this bedrock anthropic cb, # but doesn't modify the group manager mngr = CallbackManager.configure(group_manager) assert mngr.handlers == [tracer, cb] assert group_manager.handlers == [tracer] response = LLMResult( generations=[], llm_output={ "usage": { "prompt_tokens": 2, "completion_tokens": 1, "total_tokens": 3, }, "model_id": "anthropic.claude-instant-v1", }, ) mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) # The callback handler has been updated assert cb.successful_requests == 1 assert cb.total_tokens == 3 assert cb.prompt_tokens == 2 assert cb.completion_tokens == 1 assert cb.total_cost > 0 wait_for_all_tracers() assert LangChainTracer._persist_run_single.call_count == 4 # type: ignore
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/__init__.py
"""Tests for correct functioning of callbacks."""
0
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/tracers/__init__.py
"""Tests for correct functioning of tracers."""
0
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks
lc_public_repos/langchain/libs/community/tests/unit_tests/callbacks/tracers/test_comet.py
import uuid from types import SimpleNamespace from unittest import mock from langchain_core.outputs import LLMResult from langchain_community.callbacks.tracers import comet def test_comet_tracer__trace_chain_with_single_span__happyflow() -> None: # Setup mocks chain_module_mock = mock.Mock() chain_instance_mock = mock.Mock() chain_module_mock.Chain.return_value = chain_instance_mock span_module_mock = mock.Mock() span_instance_mock = mock.MagicMock() span_instance_mock.__api__start__ = mock.Mock() span_instance_mock.__api__end__ = mock.Mock() span_module_mock.Span.return_value = span_instance_mock experiment_info_module_mock = mock.Mock() experiment_info_module_mock.get.return_value = "the-experiment-info" chain_api_module_mock = mock.Mock() comet_ml_api_mock = SimpleNamespace( chain=chain_module_mock, span=span_module_mock, experiment_info=experiment_info_module_mock, chain_api=chain_api_module_mock, flush="not-used-in-this-test", ) # Create tracer with mock.patch.object( comet, "import_comet_llm_api", return_value=comet_ml_api_mock ): tracer = comet.CometTracer() run_id_1 = uuid.UUID("9d878ab3-e5ca-4218-aef6-44cbdc90160a") run_id_2 = uuid.UUID("4f31216e-7c26-4027-a5fd-0bbf9ace17dc") # Parent run tracer.on_chain_start( {"name": "chain-input"}, {"input": "chain-input-prompt"}, parent_run_id=None, run_id=run_id_1, ) # Check that chain was created chain_module_mock.Chain.assert_called_once_with( inputs={"input": "chain-input-prompt"}, metadata=None, experiment_info="the-experiment-info", ) # Child run tracer.on_llm_start( {"name": "span-input"}, ["span-input-prompt"], parent_run_id=run_id_1, run_id=run_id_2, ) # Check that Span was created and attached to chain span_module_mock.Span.assert_called_once_with( inputs={"prompts": ["span-input-prompt"]}, category=mock.ANY, metadata=mock.ANY, name=mock.ANY, ) span_instance_mock.__api__start__(chain_instance_mock) # Child run end tracer.on_llm_end( LLMResult(generations=[], llm_output={"span-output-key": "span-output-value"}), run_id=run_id_2, ) # Check that Span outputs are set and span is ended span_instance_mock.set_outputs.assert_called_once() actual_span_outputs = span_instance_mock.set_outputs.call_args[1]["outputs"] assert { "llm_output": {"span-output-key": "span-output-value"}, "generations": [], }.items() <= actual_span_outputs.items() span_instance_mock.__api__end__() # Parent run end tracer.on_chain_end({"chain-output-key": "chain-output-value"}, run_id=run_id_1) # Check that chain outputs are set and chain is logged chain_instance_mock.set_outputs.assert_called_once() actual_chain_outputs = chain_instance_mock.set_outputs.call_args[1]["outputs"] assert ("chain-output-key", "chain-output-value") in actual_chain_outputs.items() chain_api_module_mock.log_chain.assert_called_once_with(chain_instance_mock)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/docstore/test_arbitrary_fn.py
from langchain_core.documents import Document from langchain_community.docstore.arbitrary_fn import DocstoreFn def test_document_found() -> None: # we use a dict here for simiplicity, but this could be any function # including a remote lookup dummy_dict = {"foo": Document(page_content="bar")} docstore = DocstoreFn(lambda x: dummy_dict[x]) output = docstore.search("foo") assert isinstance(output, Document) assert output.page_content == "bar"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/docstore/test_imports.py
from langchain_community.docstore import __all__, _module_lookup EXPECTED_ALL = ["DocstoreFn", "InMemoryDocstore", "Wikipedia"] def test_all_imports() -> None: assert set(__all__) == set(EXPECTED_ALL) assert set(__all__) == set(_module_lookup.keys())
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/docstore/__init__.py
"""Test functionality related to the docstore objects."""
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/docstore/test_inmemory.py
"""Test in memory docstore.""" import pytest from langchain_core.documents import Document from langchain_community.docstore.in_memory import InMemoryDocstore def test_document_found() -> None: """Test document found.""" _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) output = docstore.search("foo") assert isinstance(output, Document) assert output.page_content == "bar" def test_document_not_found() -> None: """Test when document is not found.""" _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) output = docstore.search("bar") assert output == "ID bar not found." def test_adding_document() -> None: """Test that documents are added correctly.""" _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) new_dict = {"bar": Document(page_content="foo")} docstore.add(new_dict) # Test that you can find new document. foo_output = docstore.search("bar") assert isinstance(foo_output, Document) assert foo_output.page_content == "foo" # Test that old document is the same. bar_output = docstore.search("foo") assert isinstance(bar_output, Document) assert bar_output.page_content == "bar" def test_adding_document_already_exists() -> None: """Test that error is raised if document id already exists.""" _dict = {"foo": Document(page_content="bar")} docstore = InMemoryDocstore(_dict) new_dict = {"foo": Document(page_content="foo")} # Test that error is raised. with pytest.raises(ValueError): docstore.add(new_dict) # Test that old document is the same. bar_output = docstore.search("foo") assert isinstance(bar_output, Document) assert bar_output.page_content == "bar" def test_default_dict_value_in_constructor() -> None: """Test proper functioning if no _dict is provided to the constructor.""" docstore = InMemoryDocstore() docstore.add({"foo": Document(page_content="bar")}) output = docstore.search("foo") assert isinstance(output, Document) assert output.page_content == "bar"
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/imports/test_langchain_proxy_imports.py
"""Verify proxy imports from langchain to community are behaving as expected.""" def test_all_proxy_llms_are_llm_subclasses() -> None: """Simple test to make sure all things are subclasses of BaseLLM.""" from langchain import llms from langchain_core.language_models import BaseLLM for cls in llms.__all__: assert issubclass(getattr(llms, cls), BaseLLM) def test_vectorstores() -> None: """Simple test to make sure all things can be imported.""" from langchain import vectorstores from langchain_core.vectorstores import VectorStore for cls in vectorstores.__all__: if cls not in [ "AlibabaCloudOpenSearchSettings", "ClickhouseSettings", "MyScaleSettings", "AzureCosmosDBVectorSearch", ]: assert issubclass(getattr(vectorstores, cls), VectorStore)
0
lc_public_repos/langchain/libs/community/tests/unit_tests
lc_public_repos/langchain/libs/community/tests/unit_tests/chains/test_graph_qa.py
import pathlib from typing import Any, Dict, List import pandas as pd from langchain.chains.graph_qa.prompts import CYPHER_GENERATION_PROMPT, CYPHER_QA_PROMPT from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain_core.prompts import PromptTemplate from langchain_community.chains.graph_qa.cypher import ( GraphCypherQAChain, construct_schema, extract_cypher, ) from langchain_community.chains.graph_qa.cypher_utils import ( CypherQueryCorrector, Schema, ) from langchain_community.graphs.graph_document import GraphDocument from langchain_community.graphs.graph_store import GraphStore from tests.unit_tests.llms.fake_llm import FakeLLM class FakeGraphStore(GraphStore): @property def get_schema(self) -> str: """Returns the schema of the Graph database""" return "" @property def get_structured_schema(self) -> Dict[str, Any]: """Returns the schema of the Graph database""" return {} def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query the graph.""" return [] def refresh_schema(self) -> None: """Refreshes the graph schema information.""" pass def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """Take GraphDocument as input as uses it to construct a graph.""" pass def test_graph_cypher_qa_chain_prompt_selection_1() -> None: # Pass prompts directly. No kwargs is specified. qa_prompt_template = "QA Prompt" cypher_prompt_template = "Cypher Prompt" qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm( llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, qa_prompt=qa_prompt, cypher_prompt=cypher_prompt, allow_dangerous_requests=True, ) assert chain.qa_chain.prompt == qa_prompt # type: ignore[union-attr] assert chain.cypher_generation_chain.prompt == cypher_prompt def test_graph_cypher_qa_chain_prompt_selection_2() -> None: # Default case. Pass nothing chain = GraphCypherQAChain.from_llm( llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, allow_dangerous_requests=True, ) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT # type: ignore[union-attr] assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT def test_graph_cypher_qa_chain_prompt_selection_3() -> None: # Pass non-prompt args only to sub-chains via kwargs memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) chain = GraphCypherQAChain.from_llm( llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={"memory": readonlymemory}, qa_llm_kwargs={"memory": readonlymemory}, allow_dangerous_requests=True, ) assert chain.qa_chain.prompt == CYPHER_QA_PROMPT # type: ignore[union-attr] assert chain.cypher_generation_chain.prompt == CYPHER_GENERATION_PROMPT def test_graph_cypher_qa_chain_prompt_selection_4() -> None: # Pass prompt, non-prompt args to subchains via kwargs qa_prompt_template = "QA Prompt" cypher_prompt_template = "Cypher Prompt" memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm( llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={"prompt": cypher_prompt, "memory": readonlymemory}, qa_llm_kwargs={"prompt": qa_prompt, "memory": readonlymemory}, allow_dangerous_requests=True, ) assert chain.qa_chain.prompt == qa_prompt # type: ignore[union-attr] assert chain.cypher_generation_chain.prompt == cypher_prompt def test_graph_cypher_qa_chain_prompt_selection_5() -> None: # Can't pass both prompt and kwargs at the same time qa_prompt_template = "QA Prompt" cypher_prompt_template = "Cypher Prompt" memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) try: GraphCypherQAChain.from_llm( llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, qa_prompt=qa_prompt, cypher_prompt=cypher_prompt, cypher_llm_kwargs={"memory": readonlymemory}, qa_llm_kwargs={"memory": readonlymemory}, allow_dangerous_requests=True, ) assert False except ValueError: assert True def test_graph_cypher_qa_chain() -> None: template = """You are a nice chatbot having a conversation with a human. Schema: {schema} Previous conversation: {chat_history} New human question: {question} Response:""" prompt = PromptTemplate( input_variables=["schema", "question", "chat_history"], template=template ) memory = ConversationBufferMemory(memory_key="chat_history") readonlymemory = ReadOnlySharedMemory(memory=memory) prompt1 = ( "You are a nice chatbot having a conversation with a human.\n\n " "Schema:\n Node properties are the following:\n\nRelationship " "properties are the following:\n\nThe relationships are the " "following:\n\n\n " "Previous conversation:\n \n\n New human question: " "Test question\n Response:" ) prompt2 = ( "You are a nice chatbot having a conversation with a human.\n\n " "Schema:\n Node properties are the following:\n\nRelationship " "properties are the following:\n\nThe relationships are the " "following:\n\n\n " "Previous conversation:\n Human: Test question\nAI: foo\n\n " "New human question: Test new question\n Response:" ) llm = FakeLLM(queries={prompt1: "answer1", prompt2: "answer2"}) chain = GraphCypherQAChain.from_llm( cypher_llm=llm, qa_llm=FakeLLM(), graph=FakeGraphStore(), verbose=True, return_intermediate_steps=False, cypher_llm_kwargs={"prompt": prompt, "memory": readonlymemory}, memory=memory, allow_dangerous_requests=True, ) chain.run("Test question") chain.run("Test new question") # If we get here without a key error, that means memory # was used properly to create prompts. assert True def test_no_backticks() -> None: """Test if there are no backticks, so the original text should be returned.""" query = "MATCH (n) RETURN n" output = extract_cypher(query) assert output == query def test_backticks() -> None: """Test if there are backticks. Query from within backticks should be returned.""" query = "You can use the following query: ```MATCH (n) RETURN n```" output = extract_cypher(query) assert output == "MATCH (n) RETURN n" def test_exclude_types() -> None: structured_schema = { "node_props": { "Movie": [{"property": "title", "type": "STRING"}], "Actor": [{"property": "name", "type": "STRING"}], "Person": [{"property": "name", "type": "STRING"}], }, "rel_props": {}, "relationships": [ {"start": "Actor", "end": "Movie", "type": "ACTED_IN"}, {"start": "Person", "end": "Movie", "type": "DIRECTED"}, ], } exclude_types = ["Person", "DIRECTED"] output = construct_schema(structured_schema, [], exclude_types) expected_schema = ( "Node properties are the following:\n" "Movie {title: STRING},Actor {name: STRING}\n" "Relationship properties are the following:\n\n" "The relationships are the following:\n" "(:Actor)-[:ACTED_IN]->(:Movie)" ) assert output == expected_schema def test_include_types() -> None: structured_schema = { "node_props": { "Movie": [{"property": "title", "type": "STRING"}], "Actor": [{"property": "name", "type": "STRING"}], "Person": [{"property": "name", "type": "STRING"}], }, "rel_props": {}, "relationships": [ {"start": "Actor", "end": "Movie", "type": "ACTED_IN"}, {"start": "Person", "end": "Movie", "type": "DIRECTED"}, ], } include_types = ["Movie", "Actor", "ACTED_IN"] output = construct_schema(structured_schema, include_types, []) expected_schema = ( "Node properties are the following:\n" "Movie {title: STRING},Actor {name: STRING}\n" "Relationship properties are the following:\n\n" "The relationships are the following:\n" "(:Actor)-[:ACTED_IN]->(:Movie)" ) assert output == expected_schema def test_include_types2() -> None: structured_schema = { "node_props": { "Movie": [{"property": "title", "type": "STRING"}], "Actor": [{"property": "name", "type": "STRING"}], "Person": [{"property": "name", "type": "STRING"}], }, "rel_props": {}, "relationships": [ {"start": "Actor", "end": "Movie", "type": "ACTED_IN"}, {"start": "Person", "end": "Movie", "type": "DIRECTED"}, ], } include_types = ["Movie", "Actor"] output = construct_schema(structured_schema, include_types, []) expected_schema = ( "Node properties are the following:\n" "Movie {title: STRING},Actor {name: STRING}\n" "Relationship properties are the following:\n\n" "The relationships are the following:\n" ) assert output == expected_schema def test_include_types3() -> None: structured_schema = { "node_props": { "Movie": [{"property": "title", "type": "STRING"}], "Actor": [{"property": "name", "type": "STRING"}], "Person": [{"property": "name", "type": "STRING"}], }, "rel_props": {}, "relationships": [ {"start": "Actor", "end": "Movie", "type": "ACTED_IN"}, {"start": "Person", "end": "Movie", "type": "DIRECTED"}, ], } include_types = ["Movie", "Actor", "ACTED_IN"] output = construct_schema(structured_schema, include_types, []) expected_schema = ( "Node properties are the following:\n" "Movie {title: STRING},Actor {name: STRING}\n" "Relationship properties are the following:\n\n" "The relationships are the following:\n" "(:Actor)-[:ACTED_IN]->(:Movie)" ) assert output == expected_schema HERE = pathlib.Path(__file__).parent UNIT_TESTS_ROOT = HERE.parent def test_validating_cypher_statements() -> None: cypher_file = str(UNIT_TESTS_ROOT / "data/cypher_corrector.csv") examples = pd.read_csv(cypher_file) examples.fillna("", inplace=True) for _, row in examples.iterrows(): schema = load_schemas(row["schema"]) corrector = CypherQueryCorrector(schema) assert corrector(row["statement"]) == row["correct_query"] def load_schemas(str_schemas: str) -> List[Schema]: """ Args: str_schemas: string of schemas """ values = str_schemas.replace("(", "").replace(")", "").split(",") schemas = [] for i in range(len(values) // 3): schemas.append( Schema( values[i * 3].strip(), values[i * 3 + 1].strip(), values[i * 3 + 2].strip(), ) ) return schemas