method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
get_format_instructions
if self.pydantic_object is None: return 'Return a JSON object.' else: schema = self.pydantic_object.schema() reduced_schema = schema if 'title' in reduced_schema: del reduced_schema['title'] if 'type' in reduced_schema: del reduced_schema['type'] schema_str = json.dumps(reduced_schema) return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
def get_format_instructions(self) ->str: if self.pydantic_object is None: return 'Return a JSON object.' else: schema = self.pydantic_object.schema() reduced_schema = schema if 'title' in reduced_schema: del reduced_schema['title'] if 'type' in reduced_schema: del reduced_schema['type'] schema_str = json.dumps(reduced_schema) return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
is_gemini
"""Returns whether a model is belongs to a Gemini family or not.""" return _is_gemini_model(self.model_name)
@property def is_gemini(self) ->bool: """Returns whether a model is belongs to a Gemini family or not.""" return _is_gemini_model(self.model_name)
Returns whether a model is belongs to a Gemini family or not.
_stream
"""Call the IBM watsonx.ai inference endpoint which then streams the response. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The iterator which yields generation chunks. Example: .. code-block:: python response = watsonx_llm.stream("What is a molecule") for chunk in response: print(chunk, end='') """ if stop: if self.params: self.params.update({'stop_sequences': stop}) else: self.params = {'stop_sequences': stop} for stream_resp in self.watsonx_model.generate_text_stream(prompt=prompt, raw_response=True, params=self.params): chunk = self._stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: """Call the IBM watsonx.ai inference endpoint which then streams the response. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The iterator which yields generation chunks. Example: .. code-block:: python response = watsonx_llm.stream("What is a molecule") for chunk in response: print(chunk, end='') """ if stop: if self.params: self.params.update({'stop_sequences': stop}) else: self.params = {'stop_sequences': stop} for stream_resp in self.watsonx_model.generate_text_stream(prompt= prompt, raw_response=True, params=self.params): chunk = self._stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk)
Call the IBM watsonx.ai inference endpoint which then streams the response. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. run_manager: Optional callback manager. Returns: The iterator which yields generation chunks. Example: .. code-block:: python response = watsonx_llm.stream("What is a molecule") for chunk in response: print(chunk, end='')
delete
"""Delete by vector IDs. Args: ids: List of ids to delete. """ self._collection.delete(ids=ids)
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None: """Delete by vector IDs. Args: ids: List of ids to delete. """ self._collection.delete(ids=ids)
Delete by vector IDs. Args: ids: List of ids to delete.
on_tool_start
"""Do nothing when tool starts.""" pass
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Do nothing when tool starts.""" pass
Do nothing when tool starts.
test_multiple_msg
human_msg = HumanMessage(content='human', additional_kwargs={'key': 'value'}) ai_msg = AIMessage(content='ai') sys_msg = SystemMessage(content='sys') msgs = [human_msg, ai_msg, sys_msg] assert messages_from_dict(messages_to_dict(msgs)) == msgs
def test_multiple_msg() ->None: human_msg = HumanMessage(content='human', additional_kwargs={'key': 'value'}) ai_msg = AIMessage(content='ai') sys_msg = SystemMessage(content='sys') msgs = [human_msg, ai_msg, sys_msg] assert messages_from_dict(messages_to_dict(msgs)) == msgs
null
embeddings
"""Access the query embedding object if available.""" return self._embedding
@property def embeddings(self) ->Optional[Embeddings]: """Access the query embedding object if available.""" return self._embedding
Access the query embedding object if available.
_build_query_sql
"""Builds Rockset SQL query to query similar vectors to query_vector""" q_embedding_str = ','.join(map(str, query_embedding)) distance_str = ( f'{distance_func.value}({self._embedding_key}, [{q_embedding_str}]) as dist' ) where_str = f'WHERE {where_str}\n' if where_str else '' return f"""SELECT * EXCEPT({self._embedding_key}), {distance_str} FROM {self._workspace}.{self._collection_name} {where_str}ORDER BY dist {distance_func.order_by()} LIMIT {str(k)} """
def _build_query_sql(self, query_embedding: List[float], distance_func: DistanceFunction, k: int=4, where_str: Optional[str]=None) ->str: """Builds Rockset SQL query to query similar vectors to query_vector""" q_embedding_str = ','.join(map(str, query_embedding)) distance_str = ( f'{distance_func.value}({self._embedding_key}, [{q_embedding_str}]) as dist' ) where_str = f'WHERE {where_str}\n' if where_str else '' return f"""SELECT * EXCEPT({self._embedding_key}), {distance_str} FROM {self._workspace}.{self._collection_name} {where_str}ORDER BY dist {distance_func.order_by()} LIMIT {str(k)} """
Builds Rockset SQL query to query similar vectors to query_vector
similarity_search_with_score_by_vector
try: from timescale_vector import client except ImportError: raise ImportError( 'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.' ) results = self.sync_client.search(embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter(**kwargs) ) docs = [(Document(page_content=result[client.SEARCH_RESULT_CONTENTS_IDX], metadata=result[client.SEARCH_RESULT_METADATA_IDX]), result[client. SEARCH_RESULT_DISTANCE_IDX]) for result in results] return docs
def similarity_search_with_score_by_vector(self, embedding: Optional[List[ float]], k: int=4, filter: Optional[Union[dict, list]]=None, predicates: Optional[Predicates]=None, **kwargs: Any) ->List[Tuple[Document, float]]: try: from timescale_vector import client except ImportError: raise ImportError( 'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.' ) results = self.sync_client.search(embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter( **kwargs)) docs = [(Document(page_content=result[client.SEARCH_RESULT_CONTENTS_IDX ], metadata=result[client.SEARCH_RESULT_METADATA_IDX]), result[ client.SEARCH_RESULT_DISTANCE_IDX]) for result in results] return docs
null
test_neo4jvector_special_character
"""Test removing lucene.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url= url, username=username, password=password, pre_delete_collection=True, search_type=SearchType.HYBRID) output = docsearch.similarity_search( 'It is the end of the world. Take shelter!', k=1) assert output == [Document(page_content= 'It is the end of the world. Take shelter!', metadata={})] drop_vector_indexes(docsearch)
def test_neo4jvector_special_character() ->None: """Test removing lucene.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, pre_delete_collection=True, search_type=SearchType.HYBRID) output = docsearch.similarity_search( 'It is the end of the world. Take shelter!', k=1) assert output == [Document(page_content= 'It is the end of the world. Take shelter!', metadata={})] drop_vector_indexes(docsearch)
Test removing lucene.
get_pinecone_index
"""Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.""" try: import pinecone except ImportError: raise ValueError( 'Could not import pinecone python package. Please install it with `pip install pinecone-client`.' ) indexes = pinecone.list_indexes() if index_name in indexes: index = pinecone.Index(index_name, pool_threads=pool_threads) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, are you sure you're using the right Pinecone API key and Environment? Please double check your Pinecone dashboard." ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. Did you mean one of the following indexes: {', '.join(indexes)}" ) return index
@classmethod def get_pinecone_index(cls, index_name: Optional[str], pool_threads: int=4 ) ->Index: """Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.""" try: import pinecone except ImportError: raise ValueError( 'Could not import pinecone python package. Please install it with `pip install pinecone-client`.' ) indexes = pinecone.list_indexes() if index_name in indexes: index = pinecone.Index(index_name, pool_threads=pool_threads) elif len(indexes) == 0: raise ValueError( "No active indexes found in your Pinecone project, are you sure you're using the right Pinecone API key and Environment? Please double check your Pinecone dashboard." ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. Did you mean one of the following indexes: {', '.join(indexes)}" ) return index
Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.
lazy_load
"""Download a selected dataset lazily. Returns: an iterator of Documents. """ return (self.sample_to_document_function(s) for s in self.dataset.take(self .load_max_docs) if self.sample_to_document_function is not None)
def lazy_load(self) ->Iterator[Document]: """Download a selected dataset lazily. Returns: an iterator of Documents. """ return (self.sample_to_document_function(s) for s in self.dataset.take( self.load_max_docs) if self.sample_to_document_function is not None)
Download a selected dataset lazily. Returns: an iterator of Documents.
_transform_prompt
"""Transform prompt.""" if self.inject_instruction_format: prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=prompt) return prompt
def _transform_prompt(self, prompt: str) ->str: """Transform prompt.""" if self.inject_instruction_format: prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=prompt) return prompt
Transform prompt.
test_action_w_namespace_w_some_emb
str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'test_namespace': str1}, {'test_namespace': encoded_str2}, { 'test_namespace': encoded_str3}] assert base.embed([{'test_namespace': str1}, {'test_namespace': base.Embed( str2)}, {'test_namespace': base.Embed(str3)}], MockEncoder()) == expected expected_embed_and_keep = [{'test_namespace': str1}, {'test_namespace': str2 + ' ' + encoded_str2}, {'test_namespace': str3 + ' ' + encoded_str3}] assert base.embed([{'test_namespace': str1}, {'test_namespace': base. EmbedAndKeep(str2)}, {'test_namespace': base.EmbedAndKeep(str3)}], MockEncoder()) == expected_embed_and_keep
@pytest.mark.requires('vowpal_wabbit_next') def test_action_w_namespace_w_some_emb() ->None: str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'test_namespace': str1}, {'test_namespace': encoded_str2}, {'test_namespace': encoded_str3}] assert base.embed([{'test_namespace': str1}, {'test_namespace': base. Embed(str2)}, {'test_namespace': base.Embed(str3)}], MockEncoder() ) == expected expected_embed_and_keep = [{'test_namespace': str1}, {'test_namespace': str2 + ' ' + encoded_str2}, {'test_namespace': str3 + ' ' + encoded_str3}] assert base.embed([{'test_namespace': str1}, {'test_namespace': base. EmbedAndKeep(str2)}, {'test_namespace': base.EmbedAndKeep(str3)}], MockEncoder()) == expected_embed_and_keep
null
__init__
""" Create an AstraDB vector store object. See class docstring for help. """ try: from astrapy.db import AstraDB as LibAstraDB from astrapy.db import AstraDBCollection as LibAstraDBCollection except (ImportError, ModuleNotFoundError): raise ImportError( 'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.' ) if astra_db_client is not None: if token is not None or api_endpoint is not None: raise ValueError( "You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'." ) self.embedding = embedding self.collection_name = collection_name self.token = token self.api_endpoint = api_endpoint self.namespace = namespace self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE self.bulk_insert_batch_concurrency: int = (bulk_insert_batch_concurrency or DEFAULT_BULK_INSERT_BATCH_CONCURRENCY) self.bulk_insert_overwrite_concurrency: int = ( bulk_insert_overwrite_concurrency or DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY) self.bulk_delete_concurrency: int = (bulk_delete_concurrency or DEFAULT_BULK_DELETE_CONCURRENCY) self._embedding_dimension: Optional[int] = None self.metric = metric if astra_db_client is not None: self.astra_db = astra_db_client else: self.astra_db = LibAstraDB(token=self.token, api_endpoint=self. api_endpoint, namespace=self.namespace) if not pre_delete_collection: self._provision_collection() else: self.clear() self.collection = LibAstraDBCollection(collection_name=self.collection_name, astra_db=self.astra_db)
def __init__(self, *, embedding: Embeddings, collection_name: str, token: Optional[str]=None, api_endpoint: Optional[str]=None, astra_db_client: Optional[Any]=None, namespace: Optional[str]=None, metric: Optional[str ]=None, batch_size: Optional[int]=None, bulk_insert_batch_concurrency: Optional[int]=None, bulk_insert_overwrite_concurrency: Optional[int]= None, bulk_delete_concurrency: Optional[int]=None, pre_delete_collection: bool=False) ->None: """ Create an AstraDB vector store object. See class docstring for help. """ try: from astrapy.db import AstraDB as LibAstraDB from astrapy.db import AstraDBCollection as LibAstraDBCollection except (ImportError, ModuleNotFoundError): raise ImportError( 'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.' ) if astra_db_client is not None: if token is not None or api_endpoint is not None: raise ValueError( "You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'." ) self.embedding = embedding self.collection_name = collection_name self.token = token self.api_endpoint = api_endpoint self.namespace = namespace self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE self.bulk_insert_batch_concurrency: int = ( bulk_insert_batch_concurrency or DEFAULT_BULK_INSERT_BATCH_CONCURRENCY) self.bulk_insert_overwrite_concurrency: int = ( bulk_insert_overwrite_concurrency or DEFAULT_BULK_INSERT_OVERWRITE_CONCURRENCY) self.bulk_delete_concurrency: int = (bulk_delete_concurrency or DEFAULT_BULK_DELETE_CONCURRENCY) self._embedding_dimension: Optional[int] = None self.metric = metric if astra_db_client is not None: self.astra_db = astra_db_client else: self.astra_db = LibAstraDB(token=self.token, api_endpoint=self. api_endpoint, namespace=self.namespace) if not pre_delete_collection: self._provision_collection() else: self.clear() self.collection = LibAstraDBCollection(collection_name=self. collection_name, astra_db=self.astra_db)
Create an AstraDB vector store object. See class docstring for help.
_import_sklearn
from langchain_community.vectorstores.sklearn import SKLearnVectorStore return SKLearnVectorStore
def _import_sklearn() ->Any: from langchain_community.vectorstores.sklearn import SKLearnVectorStore return SKLearnVectorStore
null
test_qdrant_add_texts_stores_ids
"""Test end to end Qdrant.add_texts stores provided ids.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest ids = ['fa38d572-4c31-4579-aedc-1960d79df6df', 'cdc1aa36-d6ab-4fb2-8a94-56674fd27484'] client = QdrantClient(':memory:') collection_name = uuid.uuid4().hex client.recreate_collection(collection_name, vectors_config=rest. VectorParams(size=10, distance=rest.Distance.COSINE)) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings()) returned_ids = vec_store.add_texts(['abc', 'def'], ids=ids, batch_size= batch_size) assert all(first == second for first, second in zip(ids, returned_ids)) assert 2 == client.count(collection_name).count stored_ids = [point.id for point in client.scroll(collection_name)[0]] assert set(ids) == set(stored_ids)
@pytest.mark.parametrize('batch_size', [1, 64]) def test_qdrant_add_texts_stores_ids(batch_size: int) ->None: """Test end to end Qdrant.add_texts stores provided ids.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest ids = ['fa38d572-4c31-4579-aedc-1960d79df6df', 'cdc1aa36-d6ab-4fb2-8a94-56674fd27484'] client = QdrantClient(':memory:') collection_name = uuid.uuid4().hex client.recreate_collection(collection_name, vectors_config=rest. VectorParams(size=10, distance=rest.Distance.COSINE)) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings()) returned_ids = vec_store.add_texts(['abc', 'def'], ids=ids, batch_size= batch_size) assert all(first == second for first, second in zip(ids, returned_ids)) assert 2 == client.count(collection_name).count stored_ids = [point.id for point in client.scroll(collection_name)[0]] assert set(ids) == set(stored_ids)
Test end to end Qdrant.add_texts stores provided ids.
set_llm_cache
"""Set a new LLM cache, overwriting the previous value, if any.""" import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing llm_cache from langchain root module is no longer supported' ) langchain.llm_cache = value global _llm_cache _llm_cache = value
def set_llm_cache(value: Optional['BaseCache']) ->None: """Set a new LLM cache, overwriting the previous value, if any.""" import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing llm_cache from langchain root module is no longer supported' ) langchain.llm_cache = value global _llm_cache _llm_cache = value
Set a new LLM cache, overwriting the previous value, if any.
on_agent_action
"""Do nothing when agent takes a specific action.""" pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Do nothing when agent takes a specific action.""" pass
Do nothing when agent takes a specific action.
run
return self._run(commands)
def run(self, commands: List[str]) ->str: return self._run(commands)
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_imports
"""Test that you can import all top level things okay.""" from langchain_community.chat_models import ChatOpenAI from langchain_community.document_loaders import BSHTMLLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.llms import OpenAI from langchain_community.utilities import SearchApiAPIWrapper, SerpAPIWrapper from langchain_community.vectorstores import FAISS from langchain_core.prompts import BasePromptTemplate from langchain.agents import OpenAIFunctionsAgent from langchain.callbacks import OpenAICallbackHandler from langchain.chains import LLMChain from langchain.retrievers import VespaRetriever from langchain.tools import DuckDuckGoSearchResults
def test_imports() ->None: """Test that you can import all top level things okay.""" from langchain_community.chat_models import ChatOpenAI from langchain_community.document_loaders import BSHTMLLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.llms import OpenAI from langchain_community.utilities import SearchApiAPIWrapper, SerpAPIWrapper from langchain_community.vectorstores import FAISS from langchain_core.prompts import BasePromptTemplate from langchain.agents import OpenAIFunctionsAgent from langchain.callbacks import OpenAICallbackHandler from langchain.chains import LLMChain from langchain.retrievers import VespaRetriever from langchain.tools import DuckDuckGoSearchResults
Test that you can import all top level things okay.
extract_functions_classes
def extract_func(elements: List[str], start_idx: int, end_idx: int) ->None: elements.append(self._extract_code(start_idx, end_idx)) return self._process_lines(extract_func)
def extract_functions_classes(self) ->List[str]: def extract_func(elements: List[str], start_idx: int, end_idx: int) ->None: elements.append(self._extract_code(start_idx, end_idx)) return self._process_lines(extract_func)
null
test_context_w_namespace_w_emb
str1 = 'test' encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) expected = [{'test_namespace': encoded_str1}] assert base.embed({'test_namespace': base.Embed(str1)}, MockEncoder() ) == expected expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1}] assert base.embed({'test_namespace': base.EmbedAndKeep(str1)}, MockEncoder() ) == expected_embed_and_keep
@pytest.mark.requires('vowpal_wabbit_next') def test_context_w_namespace_w_emb() ->None: str1 = 'test' encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) expected = [{'test_namespace': encoded_str1}] assert base.embed({'test_namespace': base.Embed(str1)}, MockEncoder() ) == expected expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1}] assert base.embed({'test_namespace': base.EmbedAndKeep(str1)}, MockEncoder()) == expected_embed_and_keep
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
get_time
"""Get the current server time as a high resolution timestamp! It's important to get this from the server to ensure a monotonic clock, otherwise there may be data loss when cleaning up old documents! Returns: The current server time as a float timestamp. """
@abstractmethod def get_time(self) ->float: """Get the current server time as a high resolution timestamp! It's important to get this from the server to ensure a monotonic clock, otherwise there may be data loss when cleaning up old documents! Returns: The current server time as a float timestamp. """
Get the current server time as a high resolution timestamp! It's important to get this from the server to ensure a monotonic clock, otherwise there may be data loss when cleaning up old documents! Returns: The current server time as a float timestamp.
get_schema
"""Returns the schema of the FalkorDB database""" return self.schema
@property def get_schema(self) ->str: """Returns the schema of the FalkorDB database""" return self.schema
Returns the schema of the FalkorDB database
_import_tigris
from langchain_community.vectorstores.tigris import Tigris return Tigris
def _import_tigris() ->Any: from langchain_community.vectorstores.tigris import Tigris return Tigris
null
__init__
"""Load a list of URLs using Selenium and unstructured.""" try: import selenium except ImportError: raise ImportError( 'selenium package not found, please install it with `pip install selenium`' ) try: import unstructured except ImportError: raise ImportError( 'unstructured package not found, please install it with `pip install unstructured`' ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments
def __init__(self, urls: List[str], continue_on_failure: bool=True, browser: Literal['chrome', 'firefox']='chrome', binary_location: Optional[str]= None, executable_path: Optional[str]=None, headless: bool=True, arguments: List[str]=[]): """Load a list of URLs using Selenium and unstructured.""" try: import selenium except ImportError: raise ImportError( 'selenium package not found, please install it with `pip install selenium`' ) try: import unstructured except ImportError: raise ImportError( 'unstructured package not found, please install it with `pip install unstructured`' ) self.urls = urls self.continue_on_failure = continue_on_failure self.browser = browser self.binary_location = binary_location self.executable_path = executable_path self.headless = headless self.arguments = arguments
Load a list of URLs using Selenium and unstructured.
_identifying_params
"""Get the identifying parameters.""" return {**{'endpoint_url': self.endpoint_url}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'endpoint_url': self.endpoint_url}, **self._default_params}
Get the identifying parameters.
similarity_search
"""Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return self.similarity_search_by_vector(embedding=embedding, k=k, filter= filter, predicates=predicates, **kwargs)
def similarity_search(self, query: str, k: int=4, filter: Optional[Union[ dict, list]]=None, predicates: Optional[Predicates]=None, **kwargs: Any ) ->List[Document]: """Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return self.similarity_search_by_vector(embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs)
Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query.
_parse_prediction
if isinstance(prediction, str): return prediction if self.result_arg: try: return prediction[self.result_arg] except KeyError: if isinstance(prediction, str): error_desc = ( f'Provided non-None `result_arg` (result_arg={self.result_arg}). But got prediction of type {type(prediction)} instead of dict. Most probably, youneed to set `result_arg=None` during VertexAIModelGarden initialization.' ) raise ValueError(error_desc) else: raise ValueError(f'{self.result_arg} key not found in prediction!') return prediction
def _parse_prediction(self, prediction: Any) ->str: if isinstance(prediction, str): return prediction if self.result_arg: try: return prediction[self.result_arg] except KeyError: if isinstance(prediction, str): error_desc = ( f'Provided non-None `result_arg` (result_arg={self.result_arg}). But got prediction of type {type(prediction)} instead of dict. Most probably, youneed to set `result_arg=None` during VertexAIModelGarden initialization.' ) raise ValueError(error_desc) else: raise ValueError( f'{self.result_arg} key not found in prediction!') return prediction
null
_default_params
"""Get the default parameters for calling Petals API.""" normal_params = {'temperature': self.temperature, 'max_new_tokens': self. max_new_tokens, 'top_p': self.top_p, 'top_k': self.top_k, 'do_sample': self.do_sample, 'max_length': self.max_length} return {**normal_params, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling Petals API.""" normal_params = {'temperature': self.temperature, 'max_new_tokens': self.max_new_tokens, 'top_p': self.top_p, 'top_k': self.top_k, 'do_sample': self.do_sample, 'max_length': self.max_length} return {**normal_params, **self.model_kwargs}
Get the default parameters for calling Petals API.
__getitem__
...
@overload def __getitem__(self, item: int) ->AsyncIterator[T]: ...
null
test_batch_iterate
"""Test batching function.""" assert list(batch_iterate(input_size, input_iterable)) == expected_output
@pytest.mark.parametrize('input_size, input_iterable, expected_output', [(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]), (3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]), (1, [100, 200, 300], [[100], [200], [300]]), (4, [ ], [])]) def test_batch_iterate(input_size: int, input_iterable: List[str], expected_output: List[str]) ->None: """Test batching function.""" assert list(batch_iterate(input_size, input_iterable)) == expected_output
Test batching function.
_import_arxiv_tool
from langchain_community.tools.arxiv.tool import ArxivQueryRun return ArxivQueryRun
def _import_arxiv_tool() ->Any: from langchain_community.tools.arxiv.tool import ArxivQueryRun return ArxivQueryRun
null
test_psychic_loader_initialization
PsychicLoader(api_key=self.MOCK_API_KEY, connector_id=self. MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID) mock_psychic.assert_called_once_with(secret_key=self.MOCK_API_KEY) mock_connector_id.assert_called_once_with(self.MOCK_CONNECTOR_ID)
def test_psychic_loader_initialization(self, mock_psychic: MagicMock, mock_connector_id: MagicMock) ->None: PsychicLoader(api_key=self.MOCK_API_KEY, connector_id=self. MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID) mock_psychic.assert_called_once_with(secret_key=self.MOCK_API_KEY) mock_connector_id.assert_called_once_with(self.MOCK_CONNECTOR_ID)
null
extract_tags
""" Extract specific tags from a given HTML content. Args: html_content: The original HTML content string. tags: A list of tags to be extracted from the HTML. Returns: A string combining the content of the extracted tags. """ from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, 'html.parser') text_parts: List[str] = [] for element in soup.find_all(): if element.name in tags: text_parts += get_navigable_strings(element) element.decompose() return ' '.join(text_parts)
@staticmethod def extract_tags(html_content: str, tags: List[str]) ->str: """ Extract specific tags from a given HTML content. Args: html_content: The original HTML content string. tags: A list of tags to be extracted from the HTML. Returns: A string combining the content of the extracted tags. """ from bs4 import BeautifulSoup soup = BeautifulSoup(html_content, 'html.parser') text_parts: List[str] = [] for element in soup.find_all(): if element.name in tags: text_parts += get_navigable_strings(element) element.decompose() return ' '.join(text_parts)
Extract specific tags from a given HTML content. Args: html_content: The original HTML content string. tags: A list of tags to be extracted from the HTML. Returns: A string combining the content of the extracted tags.
__deepcopy__
return self
def __deepcopy__(self, memo: dict) ->'FakeAsyncCallbackHandler': return self
null
test_lancedb
import lancedb embeddings = FakeEmbeddings() db = lancedb.connect('/tmp/lancedb') texts = ['text 1', 'text 2', 'item 3'] vectors = embeddings.embed_documents(texts) table = db.create_table('my_table', data=[{'vector': vectors[idx], 'id': text, 'text': text} for idx, text in enumerate(texts)], mode='overwrite') store = LanceDB(table, embeddings) result = store.similarity_search('text 1') result_texts = [doc.page_content for doc in result] assert 'text 1' in result_texts
def test_lancedb() ->None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect('/tmp/lancedb') texts = ['text 1', 'text 2', 'item 3'] vectors = embeddings.embed_documents(texts) table = db.create_table('my_table', data=[{'vector': vectors[idx], 'id': text, 'text': text} for idx, text in enumerate(texts)], mode= 'overwrite') store = LanceDB(table, embeddings) result = store.similarity_search('text 1') result_texts = [doc.page_content for doc in result] assert 'text 1' in result_texts
null
_load_env
dotenv_path = os.path.join(PROJECT_DIR, 'tests', 'integration_tests', '.env') if os.path.exists(dotenv_path): from dotenv import load_dotenv load_dotenv(dotenv_path)
def _load_env() ->None: dotenv_path = os.path.join(PROJECT_DIR, 'tests', 'integration_tests', '.env') if os.path.exists(dotenv_path): from dotenv import load_dotenv load_dotenv(dotenv_path)
null
test_multiple_ToSelectFrom_throws
llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder())) actions = ['0', '1', '2'] with pytest.raises(ValueError): chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(actions), another_action=rl_chain.ToSelectFrom(actions))
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_multiple_ToSelectFrom_throws() ->None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed =False, model=MockEncoder())) actions = ['0', '1', '2'] with pytest.raises(ValueError): chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(actions), another_action=rl_chain.ToSelectFrom( actions))
null
_fetch_mime_types
"""Return a dict of supported file types to corresponding mime types.""" return fetch_mime_types(self._file_types)
@property def _fetch_mime_types(self) ->Dict[str, str]: """Return a dict of supported file types to corresponding mime types.""" return fetch_mime_types(self._file_types)
Return a dict of supported file types to corresponding mime types.
__init__
"""Initializes private fields.""" super().__init__(**data) self._client = ArceeWrapper(arcee_api_key=self.arcee_api_key. get_secret_value(), arcee_api_url=self.arcee_api_url, arcee_api_version =self.arcee_api_version, model_kwargs=self.model_kwargs, model_name= self.model) self._client.validate_model_training_status()
def __init__(self, **data: Any) ->None: """Initializes private fields.""" super().__init__(**data) self._client = ArceeWrapper(arcee_api_key=self.arcee_api_key. get_secret_value(), arcee_api_url=self.arcee_api_url, arcee_api_version=self.arcee_api_version, model_kwargs=self. model_kwargs, model_name=self.model) self._client.validate_model_training_status()
Initializes private fields.
create_structured_output_runnable
"""Create a runnable that uses an OpenAI function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the OpenAI function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: A runnable sequence that will pass the given function to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.openai_functions import create_structured_output_chain from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field class Dog(BaseModel): ""\"Identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a world class algorithm for extracting information in structured formats."), ("human", "Use the given format to extract information from the following input: {input}"), ("human", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> Dog(name="Harry", color="brown", fav_food="chicken") """ if isinstance(output_schema, dict): function: Any = {'name': 'output_formatter', 'description': 'Output formatter. Should always be used to format your response to the user.' , 'parameters': output_schema} else: class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" output: output_schema function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( pydantic_schema=_OutputFormatter, attr_name='output') return create_openai_fn_runnable([function], llm, prompt, output_parser= output_parser, **kwargs)
def create_structured_output_runnable(output_schema: Union[Dict[str, Any], Type[BaseModel]], llm: Runnable, prompt: BasePromptTemplate, *, output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]]=None, **kwargs: Any) ->Runnable: """Create a runnable that uses an OpenAI function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the OpenAI function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: A runnable sequence that will pass the given function to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.openai_functions import create_structured_output_chain from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field class Dog(BaseModel): ""\"Identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a world class algorithm for extracting information in structured formats."), ("human", "Use the given format to extract information from the following input: {input}"), ("human", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> Dog(name="Harry", color="brown", fav_food="chicken") """ if isinstance(output_schema, dict): function: Any = {'name': 'output_formatter', 'description': 'Output formatter. Should always be used to format your response to the user.' , 'parameters': output_schema} else: class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" output: output_schema function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( pydantic_schema=_OutputFormatter, attr_name='output') return create_openai_fn_runnable([function], llm, prompt, output_parser =output_parser, **kwargs)
Create a runnable that uses an OpenAI function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the OpenAI function-calling API. prompt: BasePromptTemplate to pass to the model. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: A runnable sequence that will pass the given function to the model when run. Example: .. code-block:: python from typing import Optional from langchain.chains.openai_functions import create_structured_output_chain from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field class Dog(BaseModel): """Identifying information about a dog.""" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0) prompt = ChatPromptTemplate.from_messages( [ ("system", "You are a world class algorithm for extracting information in structured formats."), ("human", "Use the given format to extract information from the following input: {input}"), ("human", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"}) # -> Dog(name="Harry", color="brown", fav_food="chicken")
input_keys
"""Defines the input keys.""" return self.prompt.input_variables
@property def input_keys(self) ->List[str]: """Defines the input keys.""" return self.prompt.input_variables
Defines the input keys.
_parse_document_tags
"""Return a set of all tags in within the document.""" if not self.collect_metadata: return set() match = self.TAG_REGEX.findall(content) if not match: return set() return {tag for tag in match}
def _parse_document_tags(self, content: str) ->set: """Return a set of all tags in within the document.""" if not self.collect_metadata: return set() match = self.TAG_REGEX.findall(content) if not match: return set() return {tag for tag in match}
Return a set of all tags in within the document.
test_memory_with_message_store
"""Test the memory with a message store.""" message_history = FirestoreChatMessageHistory(collection_name= 'chat_history', session_id='my-test-session', user_id='my-test-user') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') message_history = FirestoreChatMessageHistory(collection_name= 'chat_history', session_id='my-test-session', user_id='my-test-user') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
def test_memory_with_message_store() ->None: """Test the memory with a message store.""" message_history = FirestoreChatMessageHistory(collection_name= 'chat_history', session_id='my-test-session', user_id='my-test-user') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') message_history = FirestoreChatMessageHistory(collection_name= 'chat_history', session_id='my-test-session', user_id='my-test-user') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
Test the memory with a message store.
test_concurrent_language_loader_for_python_with_parser_threshold
"""Test Python ConcurrentLoader with parser enabled and below threshold.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = ConcurrentLoader.from_filesystem(file_path, glob='hello_world.py', parser=LanguageParser(language='python', parser_threshold=1000)) docs = loader.load() assert len(docs) == 1
def test_concurrent_language_loader_for_python_with_parser_threshold() ->None: """Test Python ConcurrentLoader with parser enabled and below threshold.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = ConcurrentLoader.from_filesystem(file_path, glob= 'hello_world.py', parser=LanguageParser(language='python', parser_threshold=1000)) docs = loader.load() assert len(docs) == 1
Test Python ConcurrentLoader with parser enabled and below threshold.
_score_memories_importance
"""Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( 'On the scale of 1 to 10, where 1 is purely mundane' + ' (e.g., brushing teeth, making bed) and 10 is' + ' extremely poignant (e.g., a break up, college' + ' acceptance), rate the likely poignancy of the' + ' following piece of memory. Always answer with only a list of numbers.' + ' If just given one memory still respond in a list.' + ' Memories are separated by semi colans (;)' + '\\Memories: {memory_content}' + '\nRating: ') scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f'Importance scores: {scores}') scores_list = [float(x) for x in scores.split(';')] return scores_list
def _score_memories_importance(self, memory_content: str) ->List[float]: """Score the absolute importance of the given memory.""" prompt = PromptTemplate.from_template( 'On the scale of 1 to 10, where 1 is purely mundane' + ' (e.g., brushing teeth, making bed) and 10 is' + ' extremely poignant (e.g., a break up, college' + ' acceptance), rate the likely poignancy of the' + ' following piece of memory. Always answer with only a list of numbers.' + ' If just given one memory still respond in a list.' + ' Memories are separated by semi colans (;)' + '\\Memories: {memory_content}' + '\nRating: ') scores = self.chain(prompt).run(memory_content=memory_content).strip() if self.verbose: logger.info(f'Importance scores: {scores}') scores_list = [float(x) for x in scores.split(';')] return scores_list
Score the absolute importance of the given memory.
_import_scenexplain
from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper return SceneXplainAPIWrapper
def _import_scenexplain() ->Any: from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper return SceneXplainAPIWrapper
null
ignore_agent
"""Whether to ignore agent callbacks.""" return self.ignore_agent_
@property def ignore_agent(self) ->bool: """Whether to ignore agent callbacks.""" return self.ignore_agent_
Whether to ignore agent callbacks.
messages_to_dict
"""Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [message_to_dict(m) for m in messages]
def messages_to_dict(messages: Sequence[BaseMessage]) ->List[dict]: """Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts. """ return [message_to_dict(m) for m in messages]
Convert a sequence of Messages to a list of dictionaries. Args: messages: Sequence of messages (as BaseMessages) to convert. Returns: List of messages as dicts.
clear
"""Clear session memory from DB""" self.blob_history.clear_session_id(self.session_id)
def clear(self) ->None: """Clear session memory from DB""" self.blob_history.clear_session_id(self.session_id)
Clear session memory from DB
_import_python_tool_PythonAstREPLTool
raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
def _import_python_tool_PythonAstREPLTool() ->Any: raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
null
lc_attributes
return {'you_can_see_me': self.you_can_see_me}
@property def lc_attributes(self) ->Dict[str, str]: return {'you_can_see_me': self.you_can_see_me}
null
test_self_ask_with_search
"""Test functionality on a prompt.""" question = "What is the hometown of the reigning men's U.S. Open champion?" chain = SelfAskWithSearchChain(llm=OpenAI(temperature=0), search_chain= SearchApiAPIWrapper(), input_key='q', output_key='a') answer = chain.run(question) final_answer = answer.split('\n')[-1] assert final_answer == 'Belgrade, Serbia'
def test_self_ask_with_search() ->None: """Test functionality on a prompt.""" question = "What is the hometown of the reigning men's U.S. Open champion?" chain = SelfAskWithSearchChain(llm=OpenAI(temperature=0), search_chain= SearchApiAPIWrapper(), input_key='q', output_key='a') answer = chain.run(question) final_answer = answer.split('\n')[-1] assert final_answer == 'Belgrade, Serbia'
Test functionality on a prompt.
test_warning
_ = VertexAIEmbeddings() assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' expected_message = ( 'Model_name will become a required arg for VertexAIEmbeddings starting from Feb-01-2024. Currently the default is set to textembedding-gecko@001' ) assert record.message == expected_message
def test_warning(caplog: pytest.LogCaptureFixture) ->None: _ = VertexAIEmbeddings() assert len(caplog.records) == 1 record = caplog.records[0] assert record.levelname == 'WARNING' expected_message = ( 'Model_name will become a required arg for VertexAIEmbeddings starting from Feb-01-2024. Currently the default is set to textembedding-gecko@001' ) assert record.message == expected_message
null
_Import
self.fill('import ') interleave(lambda : self.write(', '), self.dispatch, t.names)
def _Import(self, t): self.fill('import ') interleave(lambda : self.write(', '), self.dispatch, t.names)
null
get_table_names
"""Get names of tables available.""" return self.table_names
def get_table_names(self) ->Iterable[str]: """Get names of tables available.""" return self.table_names
Get names of tables available.
build_extra
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
@root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
Build extra kwargs from additional params that were passed in.
test_timescalevector_from_documents
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'a': 'b'})]
def test_timescalevector_from_documents() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name='test_collection', embedding= FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'a': 'b'})]
Test end to end construction and search.
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( 'For MMR search, you must specify an embedding function oncreation.') embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, where_document=where_document) return docs
def max_marginal_relevance_search(self, query: str, k: int=DEFAULT_K, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[str, str ]]=None, where_document: Optional[Dict[str, str]]=None, **kwargs: Any ) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( 'For MMR search, you must specify an embedding function oncreation.' ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, where_document= where_document) return docs
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance.
add_texts
"""Upload texts to NucliaDB""" ids = [] from nuclia.sdk import NucliaResource factory = NucliaResource() for i, text in enumerate(texts): extra: Dict[str, Any] = {'metadata': ''} if metadatas: extra = {'metadata': metadatas[i]} id = factory.create(texts={'text': {'body': text}}, extra=extra, url= self.kb_url, api_key=self._config['TOKEN']) ids.append(id) return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Upload texts to NucliaDB""" ids = [] from nuclia.sdk import NucliaResource factory = NucliaResource() for i, text in enumerate(texts): extra: Dict[str, Any] = {'metadata': ''} if metadatas: extra = {'metadata': metadatas[i]} id = factory.create(texts={'text': {'body': text}}, extra=extra, url=self.kb_url, api_key=self._config['TOKEN']) ids.append(id) return ids
Upload texts to NucliaDB
test_bad_action_line
"""Test handling when no action found.""" llm_output = """Thought: I need to search for NBA Thought: Search Action Input: NBA""" with pytest.raises(OutputParserException) as e_info: get_action_and_input(llm_output) assert e_info.value.observation is not None
def test_bad_action_line() ->None: """Test handling when no action found.""" llm_output = ( 'Thought: I need to search for NBA\nThought: Search\nAction Input: NBA' ) with pytest.raises(OutputParserException) as e_info: get_action_and_input(llm_output) assert e_info.value.observation is not None
Test handling when no action found.
app_creation
"""Creates a Python file which will contain your Beam app definition.""" script = textwrap.dedent( """ import beam # The environment your code will run on app = beam.App( name="{name}", cpu={cpu}, memory="{memory}", gpu="{gpu}", python_version="{python_version}", python_packages={python_packages}, ) app.Trigger.RestAPI( inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = 'app.py' with open(script_name, 'w') as file: file.write(script.format(name=self.name, cpu=self.cpu, memory=self. memory, gpu=self.gpu, python_version=self.python_version, python_packages=self.python_packages))
def app_creation(self) ->None: """Creates a Python file which will contain your Beam app definition.""" script = textwrap.dedent( """ import beam # The environment your code will run on app = beam.App( name="{name}", cpu={cpu}, memory="{memory}", gpu="{gpu}", python_version="{python_version}", python_packages={python_packages}, ) app.Trigger.RestAPI( inputs={{"prompt": beam.Types.String(), "max_length": beam.Types.String()}}, outputs={{"text": beam.Types.String()}}, handler="run.py:beam_langchain", ) """ ) script_name = 'app.py' with open(script_name, 'w') as file: file.write(script.format(name=self.name, cpu=self.cpu, memory=self. memory, gpu=self.gpu, python_version=self.python_version, python_packages=self.python_packages))
Creates a Python file which will contain your Beam app definition.
get_details
return self.details
def get_details(self) ->Any: return self.details
null
import_comet_llm_api
"""Import comet_llm api and raise an error if it is not installed.""" try: from comet_llm import experiment_info, flush from comet_llm.chains import api as chain_api from comet_llm.chains import chain, span except ImportError: raise ImportError( 'To use the CometTracer you need to have the `comet_llm>=2.0.0` python package installed. Please install it with `pip install -U comet_llm`' ) return SimpleNamespace(chain=chain, span=span, chain_api=chain_api, experiment_info=experiment_info, flush=flush)
def import_comet_llm_api() ->SimpleNamespace: """Import comet_llm api and raise an error if it is not installed.""" try: from comet_llm import experiment_info, flush from comet_llm.chains import api as chain_api from comet_llm.chains import chain, span except ImportError: raise ImportError( 'To use the CometTracer you need to have the `comet_llm>=2.0.0` python package installed. Please install it with `pip install -U comet_llm`' ) return SimpleNamespace(chain=chain, span=span, chain_api=chain_api, experiment_info=experiment_info, flush=flush)
Import comet_llm api and raise an error if it is not installed.
load_ts_git_dataset
json_url = 'https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json' tmp_file = 'ts_git_log.json' temp_dir = tempfile.gettempdir() json_file_path = os.path.join(temp_dir, tmp_file) if not os.path.exists(json_file_path): response = requests.get(json_url) if response.status_code == 200: with open(json_file_path, 'w') as json_file: json_file.write(response.text) else: print( f'Failed to download JSON file. Status code: {response.status_code}' ) loader = JSONLoader(file_path=json_file_path, jq_schema='.commit_history[]', text_content=False, metadata_func=extract_metadata) documents = loader.load() documents = [doc for doc in documents if doc.metadata['date'] is not None] if num_records > 0: documents = documents[:num_records] text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() TimescaleVector.from_documents(embedding=embeddings, ids=[doc.metadata['id' ] for doc in docs], documents=docs, collection_name=collection_name, service_url=service_url, time_partition_interval=partition_interval)
def load_ts_git_dataset(service_url, collection_name='timescale_commits', num_records: int=500, partition_interval=timedelta(days=7)): json_url = ( 'https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json') tmp_file = 'ts_git_log.json' temp_dir = tempfile.gettempdir() json_file_path = os.path.join(temp_dir, tmp_file) if not os.path.exists(json_file_path): response = requests.get(json_url) if response.status_code == 200: with open(json_file_path, 'w') as json_file: json_file.write(response.text) else: print( f'Failed to download JSON file. Status code: {response.status_code}' ) loader = JSONLoader(file_path=json_file_path, jq_schema= '.commit_history[]', text_content=False, metadata_func=extract_metadata ) documents = loader.load() documents = [doc for doc in documents if doc.metadata['date'] is not None] if num_records > 0: documents = documents[:num_records] text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() TimescaleVector.from_documents(embedding=embeddings, ids=[doc.metadata[ 'id'] for doc in docs], documents=docs, collection_name= collection_name, service_url=service_url, time_partition_interval= partition_interval)
null
update_args
for dep_id in task.dep: if dep_id == -1: continue dep_task = self.id_task_map[dep_id] for k, v in task.args.items(): if f'<resource-{dep_id}>' in v: task.args[k] = task.args[k].replace(f'<resource-{dep_id}>', dep_task.result)
def update_args(self, task: Task) ->None: for dep_id in task.dep: if dep_id == -1: continue dep_task = self.id_task_map[dep_id] for k, v in task.args.items(): if f'<resource-{dep_id}>' in v: task.args[k] = task.args[k].replace(f'<resource-{dep_id}>', dep_task.result)
null
on_chain_end
"""Do nothing.""" pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing.""" pass
Do nothing.
_call
"""Return `foo` if longer than 10000 words, else `bar`.""" if len(prompt) > 10000: return 'foo' else: return 'bar'
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Return `foo` if longer than 10000 words, else `bar`.""" if len(prompt) > 10000: return 'foo' else: return 'bar'
Return `foo` if longer than 10000 words, else `bar`.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'llms', 'openai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'llms', 'openai']
Get the namespace of the langchain object.
_import_koboldai
from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM
def _import_koboldai() ->Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM
null
_import_watsonxllm
from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM
def _import_watsonxllm() ->Any: from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM
null
test_catches_forbidden_keys
"""Make sure we raise exception on keys that are not allowed; e.g., absolute path""" with pytest.raises(InvalidKeyException): file_store.mset([('/etc', b'value1')]) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('/etc/passwd')) with pytest.raises(InvalidKeyException): file_store.mget(['/etc/passwd']) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('..')) with pytest.raises(InvalidKeyException): file_store.mget(['../etc/passwd']) with pytest.raises(InvalidKeyException): file_store.mset([('../etc', b'value1')]) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('../etc/passwd'))
def test_catches_forbidden_keys(file_store: LocalFileStore) ->None: """Make sure we raise exception on keys that are not allowed; e.g., absolute path""" with pytest.raises(InvalidKeyException): file_store.mset([('/etc', b'value1')]) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('/etc/passwd')) with pytest.raises(InvalidKeyException): file_store.mget(['/etc/passwd']) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('..')) with pytest.raises(InvalidKeyException): file_store.mget(['../etc/passwd']) with pytest.raises(InvalidKeyException): file_store.mset([('../etc', b'value1')]) with pytest.raises(InvalidKeyException): list(file_store.yield_keys('../etc/passwd'))
Make sure we raise exception on keys that are not allowed; e.g., absolute path
test_chat_google_raises_with_invalid_top_k
pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', top_k=-5) assert 'must be positive' in str(e)
def test_chat_google_raises_with_invalid_top_k() ->None: pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', top_k=-5) assert 'must be positive' in str(e)
null
test_from_texts_with_scores
"""Test end to end construction and search with scores and IDs.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Pinecone.from_texts(texts, embedding_openai, index_name= index_name, metadatas=metadatas, namespace=namespace_name) output = docsearch.similarity_search_with_score('foo', k=3, namespace= namespace_name) docs = [o[0] for o in output] scores = [o[1] for o in output] sorted_documents = sorted(docs, key=lambda x: x.metadata['page']) assert sorted_documents == [Document(page_content='foo', metadata={'page': 0.0}), Document(page_content='bar', metadata={'page': 1.0}), Document( page_content='baz', metadata={'page': 2.0})] assert scores[0] > scores[1] > scores[2]
@pytest.mark.vcr() def test_from_texts_with_scores(self, embedding_openai: OpenAIEmbeddings ) ->None: """Test end to end construction and search with scores and IDs.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Pinecone.from_texts(texts, embedding_openai, index_name= index_name, metadatas=metadatas, namespace=namespace_name) output = docsearch.similarity_search_with_score('foo', k=3, namespace= namespace_name) docs = [o[0] for o in output] scores = [o[1] for o in output] sorted_documents = sorted(docs, key=lambda x: x.metadata['page']) assert sorted_documents == [Document(page_content='foo', metadata={ 'page': 0.0}), Document(page_content='bar', metadata={'page': 1.0}), Document(page_content='baz', metadata={'page': 2.0})] assert scores[0] > scores[1] > scores[2]
Test end to end construction and search with scores and IDs.
embed
""" Embeds the actions or context using the SentenceTransformer model (or a model that has an `encode` function) Attributes: to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. namespace: (str, optional) The default namespace to use when dictionary or list of dictionaries not provided. model: (Any, required) The model to use for embedding Returns: List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value """ if isinstance(to_embed, _Embed) and isinstance(to_embed.value, str ) or isinstance(to_embed, str): return [embed_string_type(to_embed, model, namespace)] elif isinstance(to_embed, dict): return [embed_dict_type(to_embed, model)] elif isinstance(to_embed, list): return embed_list_type(to_embed, model, namespace) else: raise ValueError('Invalid input format for embedding')
def embed(to_embed: Union[Union[str, _Embed], Dict, List[Union[str, _Embed] ], List[Dict]], model: Any, namespace: Optional[str]=None) ->List[Dict[ str, Union[str, List[str]]]]: """ Embeds the actions or context using the SentenceTransformer model (or a model that has an `encode` function) Attributes: to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. namespace: (str, optional) The default namespace to use when dictionary or list of dictionaries not provided. model: (Any, required) The model to use for embedding Returns: List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value """ if isinstance(to_embed, _Embed) and isinstance(to_embed.value, str ) or isinstance(to_embed, str): return [embed_string_type(to_embed, model, namespace)] elif isinstance(to_embed, dict): return [embed_dict_type(to_embed, model)] elif isinstance(to_embed, list): return embed_list_type(to_embed, model, namespace) else: raise ValueError('Invalid input format for embedding')
Embeds the actions or context using the SentenceTransformer model (or a model that has an `encode` function) Attributes: to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. namespace: (str, optional) The default namespace to use when dictionary or list of dictionaries not provided. model: (Any, required) The model to use for embedding Returns: List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value
embed_documents
"""Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed Returns: List of embeddings, one for each text. """ results = [] for text in texts: response = self._embedding_func(text) results.append(response) return results
Compute doc embeddings using a Bedrock model. Args: texts: The list of texts to embed Returns: List of embeddings, one for each text.
embed_query
"""Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func(text)
Compute query embeddings using a Bedrock model. Args: text: The text to embed. Returns: Embeddings for the text.
lazy_load
"""Lazy load given path as pages.""" if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self.web_path ) else: blob = Blob.from_path(self.file_path) yield from self.parser.parse_folder(blob)
def lazy_load(self) ->Iterator[Document]: """Lazy load given path as pages.""" if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self. web_path) else: blob = Blob.from_path(self.file_path) yield from self.parser.parse_folder(blob)
Lazy load given path as pages.
similarity_search_with_score_by_vector
"""Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector. """ return [(doc, score) for doc, score, docId in self. similarity_search_with_score_id_by_vector(embedding=embedding, k=k, filter=filter)]
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document, float] ]: """Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector. """ return [(doc, score) for doc, score, docId in self. similarity_search_with_score_id_by_vector(embedding=embedding, k=k, filter=filter)]
Return docs most similar to embedding vector. Args: embedding (str): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. Returns: List of (Document, score), the most similar to the query vector.
__init__
try: from timescale_vector import client except ImportError: raise ImportError( 'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.' ) self.service_url = service_url self.embedding = embedding self.collection_name = collection_name self.num_dimensions = num_dimensions self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self._time_partition_interval = time_partition_interval self.sync_client = client.Sync(self.service_url, self.collection_name, self .num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs) self.async_client = client.Async(self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs) self.__post_init__()
def __init__(self, service_url: str, embedding: Embeddings, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME, num_dimensions: int= ADA_TOKEN_COUNT, distance_strategy: DistanceStrategy= DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool=False, logger: Optional[logging.Logger]=None, relevance_score_fn: Optional[Callable[[ float], float]]=None, time_partition_interval: Optional[timedelta]=None, **kwargs: Any) ->None: try: from timescale_vector import client except ImportError: raise ImportError( 'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.' ) self.service_url = service_url self.embedding = embedding self.collection_name = collection_name self.num_dimensions = num_dimensions self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self._time_partition_interval = time_partition_interval self.sync_client = client.Sync(self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs) self.async_client = client.Async(self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs) self.__post_init__()
null
parse_dict_through_component
"""Parse a dictionary by creating a component and then turning it back into a dictionary. This helps with two things 1. Extract and format data from a dictionary according to schema 2. Provide a central place to do this in a fault-tolerant way """ try: return asdict(component.from_data(data)) except Exception as e: if fault_tolerant: warning_str = f"""Error encountered while trying to parse {str(data)}: {str(e)} Falling back to returning input data.""" warnings.warn(warning_str) return data else: raise e
def parse_dict_through_component(data: dict, component: Type[Component], fault_tolerant: bool=False) ->Dict: """Parse a dictionary by creating a component and then turning it back into a dictionary. This helps with two things 1. Extract and format data from a dictionary according to schema 2. Provide a central place to do this in a fault-tolerant way """ try: return asdict(component.from_data(data)) except Exception as e: if fault_tolerant: warning_str = f"""Error encountered while trying to parse {str(data)}: {str(e)} Falling back to returning input data.""" warnings.warn(warning_str) return data else: raise e
Parse a dictionary by creating a component and then turning it back into a dictionary. This helps with two things 1. Extract and format data from a dictionary according to schema 2. Provide a central place to do this in a fault-tolerant way
test_dereference_refs_one_ref
schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}}, '$defs': {'name': {'type': 'string'}}} expected = {'type': 'object', 'properties': {'first_name': {'type': 'string'}}, '$defs': {'name': {'type': 'string'}}} actual = dereference_refs(schema) assert actual == expected
def test_dereference_refs_one_ref() ->None: schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}}, '$defs': {'name': {'type': 'string'}}} expected = {'type': 'object', 'properties': {'first_name': {'type': 'string'}}, '$defs': {'name': {'type': 'string'}}} actual = dereference_refs(schema) assert actual == expected
null
dict
"""Return dictionary representation of output parser.""" output_parser_dict = super().dict(**kwargs) try: output_parser_dict['_type'] = self._type except NotImplementedError: pass return output_parser_dict
def dict(self, **kwargs: Any) ->Dict: """Return dictionary representation of output parser.""" output_parser_dict = super().dict(**kwargs) try: output_parser_dict['_type'] = self._type except NotImplementedError: pass return output_parser_dict
Return dictionary representation of output parser.
_get_messages
"""Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError('Inputs should not be empty.') if 'messages' in inputs: single_input = inputs['messages'] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError( f"Chat Run expects 'messages' in inputs when example has multiple input keys. Got {inputs}" ) if isinstance(single_input, list) and all(isinstance(i, dict) for i in single_input): raw_messages = [single_input] elif isinstance(single_input, list) and all(isinstance(i, list) for i in single_input): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] values for 'messages' key input. Got {inputs}" ) if len(raw_messages) == 1: return messages_from_dict(raw_messages[0]) else: raise InputFormatError( f"Chat Run expects single List[dict] or List[List[dict]] 'messages' input. Got {len(raw_messages)} messages from inputs {inputs}" )
def _get_messages(inputs: Dict[str, Any]) ->List[BaseMessage]: """Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid. """ if not inputs: raise InputFormatError('Inputs should not be empty.') if 'messages' in inputs: single_input = inputs['messages'] elif len(inputs) == 1: single_input = next(iter(inputs.values())) else: raise InputFormatError( f"Chat Run expects 'messages' in inputs when example has multiple input keys. Got {inputs}" ) if isinstance(single_input, list) and all(isinstance(i, dict) for i in single_input): raw_messages = [single_input] elif isinstance(single_input, list) and all(isinstance(i, list) for i in single_input): raw_messages = single_input else: raise InputFormatError( f"Chat Run expects List[dict] or List[List[dict]] values for 'messages' key input. Got {inputs}" ) if len(raw_messages) == 1: return messages_from_dict(raw_messages[0]) else: raise InputFormatError( f"Chat Run expects single List[dict] or List[List[dict]] 'messages' input. Got {len(raw_messages)} messages from inputs {inputs}" )
Get Chat Messages from inputs. Args: inputs: The input dictionary. Returns: A list of chat messages. Raises: InputFormatError: If the input format is invalid.
create
...
@overload @staticmethod def create(messages: Sequence[Dict[str, Any]], *, provider: str= 'ChatOpenAI', stream: Literal[True], **kwargs: Any) ->Iterable: ...
null
get_format_instructions
return FORMAT_INSTRUCTIONS
def get_format_instructions(self) ->str: return FORMAT_INSTRUCTIONS
null
test_generation_chunk
assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!' ) == GenerationChunk(text='Hello, world!' ), 'GenerationChunk + GenerationChunk should be a GenerationChunk' assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!', generation_info={'foo': 'bar'}) == GenerationChunk(text='Hello, world!', generation_info={'foo': 'bar'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info' assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!', generation_info={'foo': 'bar'}) + GenerationChunk(text='!', generation_info={'baz': 'foo'}) == GenerationChunk(text= 'Hello, world!!', generation_info={'foo': 'bar', 'baz': 'foo'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info'
def test_generation_chunk() ->None: assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!' ) == GenerationChunk(text='Hello, world!' ), 'GenerationChunk + GenerationChunk should be a GenerationChunk' assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!', generation_info={'foo': 'bar'}) == GenerationChunk(text= 'Hello, world!', generation_info={'foo': 'bar'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info' assert GenerationChunk(text='Hello, ') + GenerationChunk(text='world!', generation_info={'foo': 'bar'}) + GenerationChunk(text='!', generation_info={'baz': 'foo'}) == GenerationChunk(text= 'Hello, world!!', generation_info={'foo': 'bar', 'baz': 'foo'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info'
null
batch
from langchain_core.beta.runnables.context import config_with_context from langchain_core.callbacks.manager import CallbackManager if not inputs: return [] configs = [config_with_context(c, self.steps) for c in get_config_list( config, len(inputs))] callback_managers = [CallbackManager.configure(inheritable_callbacks=config .get('callbacks'), local_callbacks=None, verbose=False, inheritable_tags=config.get('tags'), local_tags=None, inheritable_metadata=config.get('metadata'), local_metadata=None) for config in configs] run_managers = [cm.on_chain_start(dumpd(self), input, name=config.get( 'run_name') or self.get_name()) for cm, input, config in zip( callback_managers, inputs, configs)] try: if return_exceptions: failed_inputs_map: Dict[int, Exception] = {} for stepidx, step in enumerate(self.steps): remaining_idxs = [i for i in range(len(configs)) if i not in failed_inputs_map] inputs = step.batch([inp for i, inp in zip(remaining_idxs, inputs) if i not in failed_inputs_map], [patch_config( config, callbacks=rm.get_child(f'seq:step:{stepidx + 1}')) for i, (rm, config) in enumerate(zip(run_managers, configs)) if i not in failed_inputs_map], return_exceptions= return_exceptions, **kwargs) for i, inp in zip(remaining_idxs, inputs): if isinstance(inp, Exception): failed_inputs_map[i] = inp inputs = [inp for inp in inputs if not isinstance(inp, Exception)] if len(failed_inputs_map) == len(configs): break inputs_copy = inputs.copy() inputs = [] for i in range(len(configs)): if i in failed_inputs_map: inputs.append(cast(Input, failed_inputs_map[i])) else: inputs.append(inputs_copy.pop(0)) else: for i, step in enumerate(self.steps): inputs = step.batch(inputs, [patch_config(config, callbacks=rm. get_child(f'seq:step:{i + 1}')) for rm, config in zip( run_managers, configs)]) except BaseException as e: for rm in run_managers: rm.on_chain_error(e) if return_exceptions: return cast(List[Output], [e for _ in inputs]) else: raise else: first_exception: Optional[Exception] = None for run_manager, out in zip(run_managers, inputs): if isinstance(out, Exception): first_exception = first_exception or out run_manager.on_chain_error(out) else: run_manager.on_chain_end(dumpd(out)) if return_exceptions or first_exception is None: return cast(List[Output], inputs) else: raise first_exception
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs: Optional[Any]) ->List[Output]: from langchain_core.beta.runnables.context import config_with_context from langchain_core.callbacks.manager import CallbackManager if not inputs: return [] configs = [config_with_context(c, self.steps) for c in get_config_list( config, len(inputs))] callback_managers = [CallbackManager.configure(inheritable_callbacks= config.get('callbacks'), local_callbacks=None, verbose=False, inheritable_tags=config.get('tags'), local_tags=None, inheritable_metadata=config.get('metadata'), local_metadata=None) for config in configs] run_managers = [cm.on_chain_start(dumpd(self), input, name=config.get( 'run_name') or self.get_name()) for cm, input, config in zip( callback_managers, inputs, configs)] try: if return_exceptions: failed_inputs_map: Dict[int, Exception] = {} for stepidx, step in enumerate(self.steps): remaining_idxs = [i for i in range(len(configs)) if i not in failed_inputs_map] inputs = step.batch([inp for i, inp in zip(remaining_idxs, inputs) if i not in failed_inputs_map], [patch_config( config, callbacks=rm.get_child( f'seq:step:{stepidx + 1}')) for i, (rm, config) in enumerate(zip(run_managers, configs)) if i not in failed_inputs_map], return_exceptions=return_exceptions, **kwargs) for i, inp in zip(remaining_idxs, inputs): if isinstance(inp, Exception): failed_inputs_map[i] = inp inputs = [inp for inp in inputs if not isinstance(inp, Exception)] if len(failed_inputs_map) == len(configs): break inputs_copy = inputs.copy() inputs = [] for i in range(len(configs)): if i in failed_inputs_map: inputs.append(cast(Input, failed_inputs_map[i])) else: inputs.append(inputs_copy.pop(0)) else: for i, step in enumerate(self.steps): inputs = step.batch(inputs, [patch_config(config, callbacks =rm.get_child(f'seq:step:{i + 1}')) for rm, config in zip(run_managers, configs)]) except BaseException as e: for rm in run_managers: rm.on_chain_error(e) if return_exceptions: return cast(List[Output], [e for _ in inputs]) else: raise else: first_exception: Optional[Exception] = None for run_manager, out in zip(run_managers, inputs): if isinstance(out, Exception): first_exception = first_exception or out run_manager.on_chain_error(out) else: run_manager.on_chain_end(dumpd(out)) if return_exceptions or first_exception is None: return cast(List[Output], inputs) else: raise first_exception
null
raise_deprecation
try: import numexpr except ImportError: raise ImportError( 'LLMMathChain requires the numexpr package. Please install it with `pip install numexpr`.' ) if 'llm' in values: warnings.warn( 'Directly instantiating an LLMMathChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.' ) if 'llm_chain' not in values and values['llm'] is not None: prompt = values.get('prompt', PROMPT) values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt) return values
@root_validator(pre=True) def raise_deprecation(cls, values: Dict) ->Dict: try: import numexpr except ImportError: raise ImportError( 'LLMMathChain requires the numexpr package. Please install it with `pip install numexpr`.' ) if 'llm' in values: warnings.warn( 'Directly instantiating an LLMMathChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.' ) if 'llm_chain' not in values and values['llm'] is not None: prompt = values.get('prompt', PROMPT) values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt) return values
null
_type
return 'json-agent'
@property def _type(self) ->str: return 'json-agent'
null
_is_gemini_model
return is_gemini_model(self.model_name)
@property def _is_gemini_model(self) ->bool: return is_gemini_model(self.model_name)
null
__init__
"""Initialize with dataframe object. Args: data_frame: DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text". """ self.data_frame = data_frame self.page_content_column = page_content_column
def __init__(self, data_frame: Any, *, page_content_column: str='text'): """Initialize with dataframe object. Args: data_frame: DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text". """ self.data_frame = data_frame self.page_content_column = page_content_column
Initialize with dataframe object. Args: data_frame: DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "text".
check_queries_required
if values.get('sequential_response') and not queries: raise ValueError( 'queries is required when sequential_response is set to True') return queries
@validator('queries', always=True) def check_queries_required(cls, queries: Optional[Mapping], values: Mapping [str, Any]) ->Optional[Mapping]: if values.get('sequential_response') and not queries: raise ValueError( 'queries is required when sequential_response is set to True') return queries
null
deprecated_function
"""original doc""" return 'This is a deprecated function.'
@deprecated(since='2.0.0', removal='3.0.0', pending=False) def deprecated_function() ->str: """original doc""" return 'This is a deprecated function.'
original doc
save_context
"""Nothing should be saved or changed, my memory is set in stone.""" pass
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None: """Nothing should be saved or changed, my memory is set in stone.""" pass
Nothing should be saved or changed, my memory is set in stone.