method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_llm_type
return 'chat_glm'
@property def _llm_type(self) ->str: return 'chat_glm'
null
test_search_call
"""Test that call gives the correct answer from search.""" search = GoogleSerperAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
def test_search_call() ->None: """Test that call gives the correct answer from search.""" search = GoogleSerperAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
Test that call gives the correct answer from search.
_get_python_function_required_args
"""Get the required arguments for a Python function.""" spec = inspect.getfullargspec(function) required = spec.args[:-len(spec.defaults)] if spec.defaults else spec.args required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {}) ] is_class = type(function) is type if is_class and required[0] =...
def _get_python_function_required_args(function: Callable) ->List[str]: """Get the required arguments for a Python function.""" spec = inspect.getfullargspec(function) required = spec.args[:-len(spec.defaults)] if spec.defaults else spec.args required += [k for k in spec.kwonlyargs if k not in (spec. ...
Get the required arguments for a Python function.
__init__
"""Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use. """ self.resource = resource api_token = api_token or get_from_env('api_token', 'IUGU_API_TOKEN') self.headers = {'Authorization': f'Bearer {api_token}'}
def __init__(self, resource: str, api_token: Optional[str]=None) ->None: """Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use. """ self.resource = resource api_token = api_token or get_from_env('api_...
Initialize the IUGU resource. Args: resource: The name of the resource to fetch. api_token: The IUGU API token to use.
test_with_include_parameter
"""Test end to end construction and include parameter.""" texts = ['hello bagel', 'this is langchain'] docsearch = Bagel.from_texts(cluster_name='testing', texts=texts) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None output = docsearch.get() assert output['embeddings'] is None doc...
def test_with_include_parameter() ->None: """Test end to end construction and include parameter.""" texts = ['hello bagel', 'this is langchain'] docsearch = Bagel.from_texts(cluster_name='testing', texts=texts) output = docsearch.get(include=['embeddings']) assert output['embeddings'] is not None ...
Test end to end construction and include parameter.
mset
"""Set the values for the given keys.""" encoded_pairs = [(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs] self.store.mset(encoded_pairs)
def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) ->None: """Set the values for the given keys.""" encoded_pairs = [(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs] self.store.mset(encoded_pairs)
Set the values for the given keys.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_fireworks_streaming
"""Test stream completion.""" generator = llm.stream("Who's the best quarterback in the NFL?") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
@pytest.mark.scheduled def test_fireworks_streaming(llm: Fireworks) ->None: """Test stream completion.""" generator = llm.stream("Who's the best quarterback in the NFL?") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
Test stream completion.
run
"""Run Places search and get k number of places that exists that match.""" search_results = self.google_map_client.places(query)['results'] num_to_return = len(search_results) places = [] if num_to_return == 0: return 'Google Places did not find any places that match the description' num_to_return = num_to_return i...
def run(self, query: str) ->str: """Run Places search and get k number of places that exists that match.""" search_results = self.google_map_client.places(query)['results'] num_to_return = len(search_results) places = [] if num_to_return == 0: return ( 'Google Places did not find...
Run Places search and get k number of places that exists that match.
_auth
"""Authenticates the OneDrive API client Returns: The authenticated Account object. """ try: from O365 import Account, FileSystemTokenBackend except ImportError: raise ImportError( 'O365 package not found, please install it with `pip install o365`') if self.auth_with_token: ...
def _auth(self) ->Account: """Authenticates the OneDrive API client Returns: The authenticated Account object. """ try: from O365 import Account, FileSystemTokenBackend except ImportError: raise ImportError( 'O365 package not found, please install it ...
Authenticates the OneDrive API client Returns: The authenticated Account object.
_import_arcee
from langchain_community.llms.arcee import Arcee return Arcee
def _import_arcee() ->Any: from langchain_community.llms.arcee import Arcee return Arcee
null
_load_rapidfuzz
""" Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module. """ try: import rapidfuzz except ImportError: raise ImportError( 'Please install the rapidfuzz library to use the FuzzyMatchStrin...
def _load_rapidfuzz() ->Any: """ Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module. """ try: import rapidfuzz except ImportError: raise ImportError( 'Please ins...
Load the RapidFuzz library. Raises: ImportError: If the rapidfuzz library is not installed. Returns: Any: The rapidfuzz.distance module.
_create_engine
return sqlalchemy.create_engine(url=self.connection_string, **self.engine_args)
def _create_engine(self) ->sqlalchemy.engine.Engine: return sqlalchemy.create_engine(url=self.connection_string, **self. engine_args)
null
test_simple_memory
"""Test SimpleMemory.""" memory = SimpleMemory(memories={'baz': 'foo'}) output = memory.load_memory_variables({}) assert output == {'baz': 'foo'} assert ['baz'] == memory.memory_variables
def test_simple_memory() ->None: """Test SimpleMemory.""" memory = SimpleMemory(memories={'baz': 'foo'}) output = memory.load_memory_variables({}) assert output == {'baz': 'foo'} assert ['baz'] == memory.memory_variables
Test SimpleMemory.
_call
if self.streaming: completion = '' for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completion params = self._convert_prompt_msg_params(prompt, **kwargs) response = self.client.chat(params) return response.get('choice', {}).get('message', {}).get('conte...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: if self.streaming: completion = '' for chunk in self._stream(prompt, stop, run_manager, **kwargs): completion += chunk.text return completi...
null
from_texts
""" Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from(texts, embeddi...
@classmethod def from_texts(cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str= _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy =DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None, pre_delete_collection: ...
Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable.
test_importable_all
for path in glob.glob('../experimental/langchain_experimental/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_experimental.' + module_name) all_ = getattr(module, '__...
def test_importable_all() ->None: for path in glob.glob('../experimental/langchain_experimental/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_ex...
null
test_hybrid_score_normalization
"""Test if we can get two 1.0 documents with RRF""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(['foo'], text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url= u...
def test_hybrid_score_normalization() ->None: """Test if we can get two 1.0 documents with RRF""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(['foo'], text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embe...
Test if we can get two 1.0 documents with RRF
_validate_prompt
if DOCUMENTS_KEY not in prompt.input_variables: raise ValueError( f'Prompt must accept {DOCUMENTS_KEY} as an input variable. Received prompt with input variables: {prompt.input_variables}' )
def _validate_prompt(prompt: BasePromptTemplate) ->None: if DOCUMENTS_KEY not in prompt.input_variables: raise ValueError( f'Prompt must accept {DOCUMENTS_KEY} as an input variable. Received prompt with input variables: {prompt.input_variables}' )
null
_import_requests_tool_RequestsGetTool
from langchain_community.tools.requests.tool import RequestsGetTool return RequestsGetTool
def _import_requests_tool_RequestsGetTool() ->Any: from langchain_community.tools.requests.tool import RequestsGetTool return RequestsGetTool
null
delete
"""DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs)
def delete(self, url: str, **kwargs: Any) ->requests.Response: """DELETE the URL and return the text.""" return requests.delete(url, headers=self.headers, auth=self.auth, **kwargs)
DELETE the URL and return the text.
_create_action_request
data = self._create_action_payload(instructions, params, preview_only) return Request('POST', self._create_action_url(action_id), json=data)
def _create_action_request(self, action_id: str, instructions: str, params: Optional[Dict]=None, preview_only=False) ->Request: data = self._create_action_payload(instructions, params, preview_only) return Request('POST', self._create_action_url(action_id), json=data)
null
test_large_batches
documents = ['foo bar' for _ in range(0, 251)] model_uscentral1 = VertexAIEmbeddings(location='us-central1') model_asianortheast1 = VertexAIEmbeddings(location='asia-northeast1') model_uscentral1.embed_documents(documents) model_asianortheast1.embed_documents(documents) assert model_uscentral1.instance['batch_size'] >=...
def test_large_batches() ->None: documents = ['foo bar' for _ in range(0, 251)] model_uscentral1 = VertexAIEmbeddings(location='us-central1') model_asianortheast1 = VertexAIEmbeddings(location='asia-northeast1') model_uscentral1.embed_documents(documents) model_asianortheast1.embed_documents(documen...
null
test_character_text_splitter_long
"""Test splitting by character count on long words.""" text = 'foo bar baz a a' splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', 'a a'] assert output == expected_output
def test_character_text_splitter_long() ->None: """Test splitting by character count on long words.""" text = 'foo bar baz a a' splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', 'a a'] ...
Test splitting by character count on long words.
evaluation_name
return 'Contextual Accuracy'
@property def evaluation_name(self) ->str: return 'Contextual Accuracy'
null
test_amazontextract_loader_failures
two_page_pdf = str(Path(__file__).parent.parent / 'examples/multi-page-forms-sample-2-page.pdf') loader = AmazonTextractPDFLoader(two_page_pdf) with pytest.raises(ValueError): loader.load()
@pytest.mark.skip(reason='Requires AWS credentials to run') def test_amazontextract_loader_failures() ->None: two_page_pdf = str(Path(__file__).parent.parent / 'examples/multi-page-forms-sample-2-page.pdf') loader = AmazonTextractPDFLoader(two_page_pdf) with pytest.raises(ValueError): loader...
null
_create_message_dicts
params = self._client_params if stop is not None: if 'stop' in params: raise ValueError('`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional [List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = self._client_params if stop is not None: if 'stop' in params: raise ValueError( '`stop` found in both the input and default par...
null
_format_func
self._validate_func(func) return f'${func.value}'
def _format_func(self, func: Union[Operator, Comparator]) ->str: self._validate_func(func) return f'${func.value}'
null
test_aleph_alpha_call
"""Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
def test_aleph_alpha_call() ->None: """Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to cohere.
index_exists
"""Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection """ cursor = self._collection.list_indexes() index_name = self._index_name for res in curs...
def index_exists(self) ->bool: """Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection """ cursor = self._collection.list_indexes() in...
Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection
lazy_parse
"""Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [Document(page_content=page.extract_text() + self. _extract_images_from_page(page), metadata={'source': blob.source, 'page': page_numbe...
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import pypdf with blob.as_bytes_io() as pdf_file_obj: pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password) yield from [Document(page_content=page.extract_text() + self. _extract_imag...
Lazily parse the blob.
test_gpt_router_call_incorrect_model
"""Test invalid modelName""" anthropic_claude = GPTRouterModel(name='model_does_not_exist', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content='Hello World') with pytest.raises(Exception): chat([message])
def test_gpt_router_call_incorrect_model() ->None: """Test invalid modelName""" anthropic_claude = GPTRouterModel(name='model_does_not_exist', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content='Hello World') with pytest.raises...
Test invalid modelName
embeddings
"""Access the query embedding object if available.""" logger.debug( f'{Embeddings.__name__} is not implemented for {self.__class__.__name__}') return None
@property def embeddings(self) ->Optional[Embeddings]: """Access the query embedding object if available.""" logger.debug( f'{Embeddings.__name__} is not implemented for {self.__class__.__name__}' ) return None
Access the query embedding object if available.
_call
"""Call to Banana endpoint.""" try: from banana_dev import Client except ImportError: raise ImportError( 'Could not import banana-dev python package. Please install it with `pip install banana-dev`.' ) params = self.model_kwargs or {} params = {**params, **kwargs} api_key = self.banana_api_key m...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call to Banana endpoint.""" try: from banana_dev import Client except ImportError: raise ImportError( 'Could not import banana-dev pytho...
Call to Banana endpoint.
_stream
self._load_model(self.model_name) invocation_params = self._get_invocation_params(**kwargs, prompt=[[prompt]]) stop_words = stop if stop is not None else self.stop inputs = self._generate_inputs(stream=True, **invocation_params) outputs = self._generate_outputs() result_queue = self._invoke_triton(self.model_name, inpu...
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: self._load_model(self.model_name) invocation_params = self._get_invocation_params(**kwargs, prompt=[[prompt]] ) stop_words = stop ...
null
_run
try: result = self.client.chat_postMessage(channel=channel, text=message) output = 'Message sent: ' + str(result) return output except Exception as e: return 'Error creating conversation: {}'.format(e)
def _run(self, message: str, channel: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: try: result = self.client.chat_postMessage(channel=channel, text=message) output = 'Message sent: ' + str(result) return output except Exception as e: return 'Error creat...
null
_stream_response_to_generation_chunk
"""Convert a stream response to a generation chunk.""" if not stream_response['results']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['results'][0]['generated_text'], generation_info=dict(finish_reason=stream_response['results'][0].get( 'stop_reason', None), llm_output={'gene...
def _stream_response_to_generation_chunk(self, stream_response: Dict[str, Any] ) ->GenerationChunk: """Convert a stream response to a generation chunk.""" if not stream_response['results']: return GenerationChunk(text='') return GenerationChunk(text=stream_response['results'][0][ 'genera...
Convert a stream response to a generation chunk.
__getattr__
if name == 'AI21': return _import_ai21() elif name == 'AlephAlpha': return _import_aleph_alpha() elif name == 'AmazonAPIGateway': return _import_amazon_api_gateway() elif name == 'Anthropic': return _import_anthropic() elif name == 'Anyscale': return _import_anyscale() elif name == 'Aphrodite': ...
def __getattr__(name: str) ->Any: if name == 'AI21': return _import_ai21() elif name == 'AlephAlpha': return _import_aleph_alpha() elif name == 'AmazonAPIGateway': return _import_amazon_api_gateway() elif name == 'Anthropic': return _import_anthropic() elif name == 'A...
null
__validate_distance_strategy
if distance_strategy not in [DistanceStrategy.COSINE, DistanceStrategy. MAX_INNER_PRODUCT, DistanceStrategy.MAX_INNER_PRODUCT]: raise ValueError(f'Distance strategy {distance_strategy} not implemented.')
@staticmethod def __validate_distance_strategy(distance_strategy: DistanceStrategy) ->None: if distance_strategy not in [DistanceStrategy.COSINE, DistanceStrategy. MAX_INNER_PRODUCT, DistanceStrategy.MAX_INNER_PRODUCT]: raise ValueError( f'Distance strategy {distance_strategy} not implem...
null
query
if similarity is DistanceStrategy.COSINE: similarityAlgo = ( f"cosineSimilarity(params.query_vector, '{vector_query_field}') + 1.0") elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE: similarityAlgo = ( f"1 / (1 + l2norm(params.query_vector, '{vector_query_field}'))") elif similarity is Dis...
def query(self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: Union[List[dict], None], similarity: Union[DistanceStrategy, None] ) ->Dict: if similarity is DistanceStrategy.COSINE: similarityAlgo = ( ...
null
plan
"""Given input, decide what to do."""
@abstractmethod def plan(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any) ->Plan: """Given input, decide what to do."""
Given input, decide what to do.
__init__
super().__init__(**kwargs) self.tot_controller.c = self.c
def __init__(self, **kwargs: Any): super().__init__(**kwargs) self.tot_controller.c = self.c
null
_embed_query
"""Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings is not None: embedding = self.embeddings.embed_query(qu...
def _embed_query(self, query: str) ->List[float]: """Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding. """ if self.embeddings i...
Embed query text. Used to provide backward compatibility with `embedding_function` argument. Args: query: Query text. Returns: List of floats representing the query embedding.
test_json_schema_evaluator_evaluation_name
assert json_schema_evaluator.evaluation_name == 'json_schema_validation'
@pytest.mark.requires('jsonschema') def test_json_schema_evaluator_evaluation_name(json_schema_evaluator: JsonSchemaEvaluator) ->None: assert json_schema_evaluator.evaluation_name == 'json_schema_validation'
null
test_multiple_messages
"""Tests multiple messages works.""" chat = VolcEngineMaasChat() message = HumanMessage(content='Hi, how are you?') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 1 ...
def test_multiple_messages() ->None: """Tests multiple messages works.""" chat = VolcEngineMaasChat() message = HumanMessage(content='Hi, how are you?') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generatio...
Tests multiple messages works.
_get_tracer_project
run_tree = get_run_tree_context() return getattr(run_tree, 'session_name', getattr(tracing_v2_callback_var. get(), 'project', str(ls_utils.get_tracer_project())))
def _get_tracer_project() ->str: run_tree = get_run_tree_context() return getattr(run_tree, 'session_name', getattr( tracing_v2_callback_var.get(), 'project', str(ls_utils. get_tracer_project())))
null
test_custom_index_add_documents
"""This test checks the construction of a custom ElasticSearch index using the 'add_documents'.""" from elasticsearch import Elasticsearch index_name = f'custom_index_{uuid.uuid4().hex}' elastic_vector_search = ElasticVectorSearch(embedding=embedding_openai, elasticsearch_url=elasticsearch_url, index_name=i...
@pytest.mark.vcr(ignore_localhost=True) def test_custom_index_add_documents(self, documents: List[Document], embedding_openai: OpenAIEmbeddings, elasticsearch_url: str) ->None: """This test checks the construction of a custom ElasticSearch index using the 'add_documents'.""" from elasticsearch impor...
This test checks the construction of a custom ElasticSearch index using the 'add_documents'.
construct_instance
try: import qdrant_client except ImportError: raise ValueError( 'Could not import qdrant-client python package. Please install it with `pip install qdrant-client`.' ) from grpc import RpcError from qdrant_client.http import models as rest from qdrant_client.http.exceptions import UnexpectedRespo...
@classmethod def construct_instance(cls: Type[Qdrant], texts: List[str], embedding: Embeddings, location: Optional[str]=None, url: Optional[str]=None, port: Optional[int]=6333, grpc_port: int=6334, prefer_grpc: bool=False, https: Optional[bool]=None, api_key: Optional[str]=None, prefix: Optional[str] =N...
null
exception
"""Add an Exception element to the container and return its index.""" kwargs = {'exception': exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) return self._add_record(record, index)
def exception(self, exception: BaseException, *, index: Optional[int]=None ) ->int: """Add an Exception element to the container and return its index.""" kwargs = {'exception': exception} new_dg = self._get_dg(index).exception(**kwargs) record = ChildRecord(ChildType.EXCEPTION, kwargs, new_dg) r...
Add an Exception element to the container and return its index.
_call
if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return 'foo' else: return 'bar'
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: ...
null
__init__
super().__init__(mapper=mapper, **kwargs)
def __init__(self, mapper: RunnableParallel[Dict[str, Any]], **kwargs: Any ) ->None: super().__init__(mapper=mapper, **kwargs)
null
test_maximal_marginal_relevance
query_embedding = np.array([1, 0]) embedding_list = [[3 ** 0.5, 1], [1, 1], [1, 2 + 3 ** 0.5]] expected = [0, 2] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=25 / 71, k=2) assert expected == actual expected = [0, 1] actual = maximal_marginal_relevance(query_embedding, embedding_l...
def test_maximal_marginal_relevance() ->None: query_embedding = np.array([1, 0]) embedding_list = [[3 ** 0.5, 1], [1, 1], [1, 2 + 3 ** 0.5]] expected = [0, 2] actual = maximal_marginal_relevance(query_embedding, embedding_list, lambda_mult=25 / 71, k=2) assert expected == actual expected...
null
_get_schema_type_for_array
from openapi_pydantic import Reference, Schema items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ref_name = items.ref.split('/')[-1] schema_type = ref_name else: raise ValueError(f'Unsupported array items: {items...
@staticmethod def _get_schema_type_for_array(schema: Schema) ->Optional[Union[str, Tuple[ str, ...]]]: from openapi_pydantic import Reference, Schema items = schema.items if isinstance(items, Schema): schema_type = APIProperty._cast_schema_list_type(items) elif isinstance(items, Reference): ...
null
test_google_palm_embedding_query
"""Test Google PaLM embeddings.""" document = 'foo bar' embedding = GooglePalmEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
def test_google_palm_embedding_query() ->None: """Test Google PaLM embeddings.""" document = 'foo bar' embedding = GooglePalmEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
Test Google PaLM embeddings.
test_singlestoredb_euclidean_distance
"""Test adding a new document""" table_name = 'test_singlestoredb_euclidean_distance' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, FakeEmbeddings(), distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name= table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = d...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_euclidean_distance(texts: List[str]) ->None: """Test adding a new document""" table_name = 'test_singlestoredb_euclidean_distance' drop(table_name) docsearch = SingleStoreDB.from_texts(text...
Test adding a new document
test_unstructured_email_loader_with_attachments
file_path = Path(__file__).parent.parent / 'examples/fake-email-attachment.eml' loader = UnstructuredEmailLoader(str(file_path), mode='elements', process_attachments=True) docs = loader.load() assert docs[-1].page_content == 'Hey this is a fake attachment!' assert docs[-1].metadata['filename'] == 'fake-attachment.t...
def test_unstructured_email_loader_with_attachments() ->None: file_path = Path(__file__ ).parent.parent / 'examples/fake-email-attachment.eml' loader = UnstructuredEmailLoader(str(file_path), mode='elements', process_attachments=True) docs = loader.load() assert docs[-1].page_content == ...
null
test_load
os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : { 'value': []}, links=None)) loader = OneNoteLoader(notebook_name='test_notebook', section_name= 'test_section', page_title='test_title',...
@pytest.mark.requires('bs4') def test_load(mocker: MockerFixture) ->None: os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : {'value': []}, links=None)) loader = OneNoteLoa...
null
__init__
super().__init__() self.directory_path = Path(path) if isinstance(path, str) else path
def __init__(self, path: Union[str, Path]) ->None: super().__init__() self.directory_path = Path(path) if isinstance(path, str) else path
null
with_retry
"""Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_type: A tuple of exception types to retry on wait_exponential_jitter: Whether to add jitter to the wait time between retries stop_after_at...
def with_retry(self, *, retry_if_exception_type: Tuple[Type[BaseException], ...]=(Exception,), wait_exponential_jitter: bool=True, stop_after_attempt: int=3) ->Runnable[Input, Output]: """Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_ty...
Create a new Runnable that retries the original runnable on exceptions. Args: retry_if_exception_type: A tuple of exception types to retry on wait_exponential_jitter: Whether to add jitter to the wait time between retries stop_after_attempt: The maximum number of attempts to ma...
search_results_with_query_embedding
return_count = 2 return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(return_count) ], gen_vector()
@pytest.fixture def search_results_with_query_embedding() ->Tuple[List['ZepDocument'], List [float]]: return_count = 2 return [gen_mock_zep_document(collection_name='test_collection', embedding_dimensions=VECTOR_DIMS) for _ in range(return_count) ], gen_vector()
null
create
""" create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + '.' + self._store """ source column is required. v:text column is...
def create(self, metadata_str: str, text_size: int) ->None: """ create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + '.' + self._s...
create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful
_default_knn_query
knn: Dict = {'field': self.vector_query_field, 'k': k, 'num_candidates': num_candidates} if query_vector and not model_id: knn['query_vector'] = query_vector elif query and model_id: knn['query_vector_builder'] = {'text_embedding': {'model_id': model_id, 'model_text': query}} else: raise ValueEr...
def _default_knn_query(self, query_vector: Optional[List[float]]=None, query: Optional[str]=None, model_id: Optional[str]=None, k: Optional[ int]=10, num_candidates: Optional[int]=10) ->Dict: knn: Dict = {'field': self.vector_query_field, 'k': k, 'num_candidates': num_candidates} if query_vector...
null
cosine_similarity
"""Row-wise cosine similarity between two equal-width matrices.""" if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f'Number of columns in X and Y must be the same. X has shape {X.shape} and Y has shape {Y.shape}.' ...
def cosine_similarity(X: Matrix, Y: Matrix) ->np.ndarray: """Row-wise cosine similarity between two equal-width matrices.""" if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f'Number of colum...
Row-wise cosine similarity between two equal-width matrices.
test_minimax_call_successful
"""Test valid call to minimax.""" llm = Minimax() output = llm( 'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in compression but linear, rigid, and load-bearing in...
def test_minimax_call_successful() ->None: """Test valid call to minimax.""" llm = Minimax() output = llm( 'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in...
Test valid call to minimax.
create_demo_server_configurable
return create_demo_server(config_keys=['configurable'])
def create_demo_server_configurable(): return create_demo_server(config_keys=['configurable'])
null
raise_deprecation
if 'llm' in values: warnings.warn( 'Directly instantiating an LLMCheckerChain with an llm is deprecated. Please instantiate with question_to_checked_assertions_chain or using the from_llm class method.' ) if 'question_to_checked_assertions_chain' not in values and values['llm' ] is not N...
@root_validator(pre=True) def raise_deprecation(cls, values: Dict) ->Dict: if 'llm' in values: warnings.warn( 'Directly instantiating an LLMCheckerChain with an llm is deprecated. Please instantiate with question_to_checked_assertions_chain or using the from_llm class method.' ) ...
null
from_texts
index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, metadatas= metadatas, **kwargs)
@classmethod def from_texts(cls, texts: List[str], embeddings: Embeddings, metadatas: Optional[List[dict]]=None, **kwargs: Any) ->SVMRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, metadatas= metadatas, **kwargs)
null
_llm_type
"""Return type of llm.""" return 'fake'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fake'
Return type of llm.
parse_issues
""" Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers """ parsed = [] for issue in issues: title = issue.title ...
def parse_issues(self, issues: List[Issue]) ->List[dict]: """ Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers ...
Extracts title and number from each Issue and puts them in a dictionary Parameters: issues(List[Issue]): A list of Github Issue objects Returns: List[dict]: A dictionary of issue titles and numbers
img_prompt_func
""" GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """ messages = [] if data_dict['context']['images']: ...
def img_prompt_func(data_dict, num_images=1): """ GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt. """...
GPT-4V prompt for image analysis. :param data_dict: A dict with images and a user-provided question. :param num_images: Number of images to include in the prompt. :return: A list containing message objects for each image and the text prompt.
add_texts
"""Insert text data into TencentVectorDB.""" texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug('Nothing to insert, skipping.') return [] pks: ...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, timeout: Optional[int]=None, batch_size: int=1000, **kwargs: Any ) ->List[str]: """Insert text data into TencentVectorDB.""" texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) e...
Insert text data into TencentVectorDB.
_log_trace_from_run
"""Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() root_span = self.run_processor.process_span(run) model_dict = self.run_processor.process_model(run) if root_span is None: return model_trace = self._trace_tree.WBTraceTree(root_span=root_span, model_dict= model_dict) if self._wandb.run is not ...
def _log_trace_from_run(self, run: Run) ->None: """Logs a LangChain Run to W*B as a W&B Trace.""" self._ensure_run() root_span = self.run_processor.process_span(run) model_dict = self.run_processor.process_model(run) if root_span is None: return model_trace = self._trace_tree.WBTraceTree...
Logs a LangChain Run to W*B as a W&B Trace.
from_chain_type
"""Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain(llm, chain_type=chain_type, ** _chain_type_kwargs) return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@classmethod def from_chain_type(cls, llm: BaseLanguageModel, chain_type: str='stuff', chain_type_kwargs: Optional[dict]=None, **kwargs: Any) ->BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain(llm, chain_type=chain_ty...
Load chain from chain type.
prep_prompts
"""Prepare prompts from inputs.""" stop = None if len(input_list) == 0: return [], stop if 'stop' in input_list[0]: stop = input_list[0]['stop'] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inpu...
def prep_prompts(self, input_list: List[Dict[str, Any]], run_manager: Optional[CallbackManagerForChainRun]=None) ->Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if len(input_list) == 0: return [], stop if 'stop' in input_list[0]: st...
Prepare prompts from inputs.
query_with_sources
"""Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = RetrievalQAWithSourcesChain.from_chain_type(llm, retriever=self. vectorstore.as_retriever(**retriever_kwargs), **kwargs) return chain({chain.question_key: question})
def query_with_sources(self, question: str, llm: Optional[BaseLanguageModel ]=None, retriever_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any ) ->dict: """Query the vectorstore and get back sources.""" llm = llm or OpenAI(temperature=0) retriever_kwargs = retriever_kwargs or {} chain = Retr...
Query the vectorstore and get back sources.
test_vertexai_instantiation
if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == 'vertexai' try: assert model.model_name == model.client._model_id except AttributeError: assert model.model_name == model.client._model_name.split('/')[-1]
@pytest.mark.parametrize('model_name', model_names_to_test) def test_vertexai_instantiation(model_name: str) ->None: if model_name: model = ChatVertexAI(model_name=model_name) else: model = ChatVertexAI() assert model._llm_type == 'vertexai' try: assert model.model_name == model....
null
deprecated
"""Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ...
def deprecated(since: str, *, message: str='', name: str='', alternative: str='', pending: bool=False, obj_type: str='', addendum: str='', removal: str='') ->Callable[[T], T]: """Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a pro...
Decorator to mark a function, a class, or a property as deprecated. When deprecating a classmethod, a staticmethod, or a property, the ``@deprecated`` decorator should go *under* ``@classmethod`` and ``@staticmethod`` (i.e., `deprecated` should directly decorate the underlying callable), but *over* ``@property``. Whe...
get_triples
"""Get all triples in the graph.""" return [(u, v, d['relation']) for u, v, d in self._graph.edges(data=True)]
def get_triples(self) ->List[Tuple[str, str, str]]: """Get all triples in the graph.""" return [(u, v, d['relation']) for u, v, d in self._graph.edges(data=True)]
Get all triples in the graph.
_make_id
return f'{_hash(prompt)}#{_hash(llm_string)}'
@staticmethod def _make_id(prompt: str, llm_string: str) ->str: return f'{_hash(prompt)}#{_hash(llm_string)}'
null
_call
"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python ...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when...
Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" p...
_get_diffbot_data
"""Get Diffbot file from Diffbot REST API.""" diffbot_url = self._diffbot_api_url('article') params = {'token': self.api_token, 'url': url} response = requests.get(diffbot_url, params=params, timeout=10) return response.json() if response.ok else {}
def _get_diffbot_data(self, url: str) ->Any: """Get Diffbot file from Diffbot REST API.""" diffbot_url = self._diffbot_api_url('article') params = {'token': self.api_token, 'url': url} response = requests.get(diffbot_url, params=params, timeout=10) return response.json() if response.ok else {}
Get Diffbot file from Diffbot REST API.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output']
Get the namespace of the langchain object.
parse
"""Returns the input text with no changes.""" return text
def parse(self, text: str) ->str: """Returns the input text with no changes.""" return text
Returns the input text with no changes.
on_llm_start
self.on_llm_start_common()
def on_llm_start(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_start_common()
null
test_timescalevector
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) a...
def test_timescalevector() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=T...
Test end to end construction and search.
test_parse_json_with_python_dict
parsed = parse_json_markdown(JSON_WITH_PYTHON_DICT) assert parsed == {'action': 'Final Answer', 'action_input': {'foo': 'bar', 'bar': 'foo'}}
def test_parse_json_with_python_dict() ->None: parsed = parse_json_markdown(JSON_WITH_PYTHON_DICT) assert parsed == {'action': 'Final Answer', 'action_input': {'foo': 'bar', 'bar': 'foo'}}
null
parse
"""Parse text into agent action/finish."""
@abstractmethod def parse(self, text: str) ->Union[AgentAction, AgentFinish]: """Parse text into agent action/finish."""
Parse text into agent action/finish.
test_embed_query_different_lengths
"""Test embedding queries of different lengths.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) result = model.embed_query(query) assert len(result) == 768
@pytest.mark.parametrize('query', ['Hi', 'This is a longer query string to test the embedding functionality of the model against the pickle rick?' ]) def test_embed_query_different_lengths(query: str) ->None: """Test embedding queries of different lengths.""" model = GoogleGenerativeAIEmbeddings(model=_...
Test embedding queries of different lengths.
load
"""Load data into document objects.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load data into document objects.""" return list(self.lazy_load())
Load data into document objects.
test_bedrock_streaming
"""Test streaming tokens from OpenAI.""" for token in chat.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
@pytest.mark.scheduled def test_bedrock_streaming(chat: BedrockChat) ->None: """Test streaming tokens from OpenAI.""" for token in chat.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from OpenAI.
test_run_kwargs
"""Test run method with kwargs.""" chain = FakeChain(the_input_keys=['foo', 'bar']) output = chain.run(foo='bar', bar='foo') assert output == 'baz'
def test_run_kwargs() ->None: """Test run method with kwargs.""" chain = FakeChain(the_input_keys=['foo', 'bar']) output = chain.run(foo='bar', bar='foo') assert output == 'baz'
Test run method with kwargs.
test_partial_with_chat_prompts
prompt_a = ChatPromptTemplate(input_variables=['foo'], messages=[ MessagesPlaceholder(variable_name='foo')]) prompt_b = ChatPromptTemplate.from_template('jim {bar}') pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_a, pipeline_prompts=[('foo', prompt_b)]) assert pipeline_prompt.input_variables == ['...
def test_partial_with_chat_prompts() ->None: prompt_a = ChatPromptTemplate(input_variables=['foo'], messages=[ MessagesPlaceholder(variable_name='foo')]) prompt_b = ChatPromptTemplate.from_template('jim {bar}') pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_a, pipeline_prompts=...
null
test_elasticsearch_with_relevance_score
"""Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = FakeEmbeddings() docsearch = ElasticsearchStore.from_texts(index_name=index_name, texts= texts, embedding=embeddings, metadatas=metadatas, ** elastic...
def test_elasticsearch_with_relevance_score(self, elasticsearch_connection: dict, index_name: str) ->None: """Test to make sure the relevance score is scaled to 0-1.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = FakeEmbeddings() docsearch...
Test to make sure the relevance score is scaled to 0-1.
test_shell_input_validation
shell_input = ShellInput(commands=test_commands) assert isinstance(shell_input.commands, list) assert len(shell_input.commands) == 2 with warnings.catch_warnings(record=True) as w: ShellInput(commands=test_commands) assert len(w) == 1 assert str(w[-1].message ) == 'The shell tool has no safeguards b...
def test_shell_input_validation() ->None: shell_input = ShellInput(commands=test_commands) assert isinstance(shell_input.commands, list) assert len(shell_input.commands) == 2 with warnings.catch_warnings(record=True) as w: ShellInput(commands=test_commands) assert len(w) == 1 ass...
null
replace_file
try: content = source.read_text() except UnicodeDecodeError: return new_content = find_and_replace(content, replacements) if new_content != content: source.write_text(new_content)
def replace_file(source: Path, replacements: Dict[str, str]) ->None: try: content = source.read_text() except UnicodeDecodeError: return new_content = find_and_replace(content, replacements) if new_content != content: source.write_text(new_content)
null
lazy_parse
"""Load documents from a blob.""" mimetype = blob.mimetype if mimetype is None: raise ValueError(f'{blob} does not have a mimetype.') if mimetype in self.handlers: handler = self.handlers[mimetype] yield from handler.lazy_parse(blob) elif self.fallback_parser is not None: yield from self.fallback_parser...
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Load documents from a blob.""" mimetype = blob.mimetype if mimetype is None: raise ValueError(f'{blob} does not have a mimetype.') if mimetype in self.handlers: handler = self.handlers[mimetype] yield from handler.lazy_par...
Load documents from a blob.
test_find_all_links_single
htmls = ["href='foobar.com'", 'href="foobar.com"', '<div><a class="blah" href="foobar.com">hullo</a></div>'] actual = [find_all_links(html) for html in htmls] assert actual == [['foobar.com']] * 3
def test_find_all_links_single() ->None: htmls = ["href='foobar.com'", 'href="foobar.com"', '<div><a class="blah" href="foobar.com">hullo</a></div>'] actual = [find_all_links(html) for html in htmls] assert actual == [['foobar.com']] * 3
null
test_singlestoredb_new_vector
"""Test adding a new document""" table_name = 'test_singlestoredb_new_vector' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLESTOREDB_URL) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == TE...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_new_vector(texts: List[str]) ->None: """Test adding a new document""" table_name = 'test_singlestoredb_new_vector' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFak...
Test adding a new document
test_transform_empty_html
bs_transformer = BeautifulSoupTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ''
@pytest.mark.requires('bs4') def test_transform_empty_html() ->None: bs_transformer = BeautifulSoupTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = bs_transformer.transform_documents(documents) assert docs_transformed[0].page_content == ''
null
worker
old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals, locals) sys.stdout = old_stdout queue.put(mystdout.getvalue()) except Exception as e: sys.stdout = old_stdout queue.put(repr(e))
@classmethod def worker(cls, command: str, globals: Optional[Dict], locals: Optional[ Dict], queue: multiprocessing.Queue) ->None: old_stdout = sys.stdout sys.stdout = mystdout = StringIO() try: exec(command, globals, locals) sys.stdout = old_stdout queue.put(mystdout.getvalue())...
null