method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_scann_local_save_load
"""Test end to end serialization.""" texts = ['foo', 'bar', 'baz'] docsearch = ScaNN.from_texts(texts, FakeEmbeddings()) temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S') with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/' ) as temp_folder: docsearch.save_local(temp_folder) ...
def test_scann_local_save_load() ->None: """Test end to end serialization.""" texts = ['foo', 'bar', 'baz'] docsearch = ScaNN.from_texts(texts, FakeEmbeddings()) temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S') with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/' ...
Test end to end serialization.
requires_reference
return True
@property def requires_reference(self) ->bool: return True
null
test_split_text_on_tokens
"""Test splitting by tokens per chunk.""" text = 'foo bar baz 123' tokenizer = Tokenizer(chunk_overlap=3, tokens_per_chunk=7, decode=lambda it: ''.join(chr(i) for i in it), encode=lambda it: [ord(c) for c in it]) output = split_text_on_tokens(text=text, tokenizer=tokenizer) expected_output = ['foo bar', 'bar baz', ...
def test_split_text_on_tokens() ->None: """Test splitting by tokens per chunk.""" text = 'foo bar baz 123' tokenizer = Tokenizer(chunk_overlap=3, tokens_per_chunk=7, decode=lambda it: ''.join(chr(i) for i in it), encode=lambda it: [ord(c) for c in it] ) output = split_text_on_tokens(text...
Test splitting by tokens per chunk.
test_get_eth_balance
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b' loader = EtherscanLoader(account_address, filter='eth_balance') result = loader.load() assert len(result) > 0, 'No transactions returned'
@pytest.mark.skipif(not etherscan_key_set, reason= 'Etherscan API key not provided.') def test_get_eth_balance() ->None: account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b' loader = EtherscanLoader(account_address, filter='eth_balance') result = loader.load() assert len(result) > 0, 'No t...
null
_get_page_content
"""Get page content from OneNote API""" request_url = self.onenote_api_base_url + f'/pages/{page_id}/content' response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() return response.text
def _get_page_content(self, page_id: str) ->str: """Get page content from OneNote API""" request_url = self.onenote_api_base_url + f'/pages/{page_id}/content' response = requests.get(request_url, headers=self._headers, timeout=10) response.raise_for_status() return response.text
Get page content from OneNote API
from_clickup_api_wrapper
operations: List[Dict] = [{'mode': 'get_task', 'name': 'Get task', 'description': CLICKUP_GET_TASK_PROMPT}, {'mode': 'get_task_attribute', 'name': 'Get task attribute', 'description': CLICKUP_GET_TASK_ATTRIBUTE_PROMPT}, {'mode': 'get_teams', 'name': 'Get Teams', 'description': CLICKUP_GET_ALL_TEAMS_PROM...
@classmethod def from_clickup_api_wrapper(cls, clickup_api_wrapper: ClickupAPIWrapper ) ->'ClickupToolkit': operations: List[Dict] = [{'mode': 'get_task', 'name': 'Get task', 'description': CLICKUP_GET_TASK_PROMPT}, {'mode': 'get_task_attribute', 'name': 'Get task attribute', 'description': ...
null
is_lc_serializable
"""RunnableBranch is serializable if all its branches are serializable.""" return True
@classmethod def is_lc_serializable(cls) ->bool: """RunnableBranch is serializable if all its branches are serializable.""" return True
RunnableBranch is serializable if all its branches are serializable.
setUp
self.host = 'localhost' self.graph = 'test_falkordb' self.port = 6379
def setUp(self) ->None: self.host = 'localhost' self.graph = 'test_falkordb' self.port = 6379
null
test_pwd_command
"""Test correct functionality.""" session = BashProcess() commands = ['pwd'] output = session.run(commands) assert output == subprocess.check_output('pwd', shell=True).decode()
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') def test_pwd_command() ->None: """Test correct functionality.""" session = BashProcess() commands = ['pwd'] output = session.run(commands) assert output == subprocess.check_output('pwd', shell=True).deco...
Test correct functionality.
clear
"""Delete session from Xata table.""" while True: r = self._client.data().query(self._table_name, payload={'columns': [ 'id'], 'filter': {'sessionId': self._session_id}}) if r.status_code != 200: raise Exception(f'Error running query: {r.status_code} {r}') ids = [rec['id'] for rec in r['reco...
def clear(self) ->None: """Delete session from Xata table.""" while True: r = self._client.data().query(self._table_name, payload={'columns': ['id'], 'filter': {'sessionId': self._session_id}}) if r.status_code != 200: raise Exception(f'Error running query: {r.status_code...
Delete session from Xata table.
_call_eden_ai
""" Make an API call to the EdenAI service with the specified query parameters. Args: query_params (dict): The parameters to include in the API call. Returns: requests.Response: The response from the EdenAI API call. """ headers = {'Authorization': f'Bearer {se...
def _call_eden_ai(self, query_params: Dict[str, Any]) ->str: """ Make an API call to the EdenAI service with the specified query parameters. Args: query_params (dict): The parameters to include in the API call. Returns: requests.Response: The response from the EdenA...
Make an API call to the EdenAI service with the specified query parameters. Args: query_params (dict): The parameters to include in the API call. Returns: requests.Response: The response from the EdenAI API call.
from_texts
"""Use from components instead.""" raise NotImplementedError( 'This method is not implemented. Instead, you should initialize the class with `MatchingEngine.from_components(...)` and then call `add_texts`' )
@classmethod def from_texts(cls: Type['MatchingEngine'], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, **kwargs: Any ) ->'MatchingEngine': """Use from components instead.""" raise NotImplementedError( 'This method is not implemented. Instead, you should initializ...
Use from components instead.
test_last_message_not_human_message
messages = [HumanMessage(content='usr-msg-1'), AIMessage(content='ai-msg-1')] with pytest.raises(ValueError) as info: model.predict_messages(messages) assert info.value.args[0] == 'last message must be a HumanMessage'
def test_last_message_not_human_message(model: Llama2Chat) ->None: messages = [HumanMessage(content='usr-msg-1'), AIMessage(content= 'ai-msg-1')] with pytest.raises(ValueError) as info: model.predict_messages(messages) assert info.value.args[0] == 'last message must be a HumanMessage'
null
__init__
try: import arcgis except ImportError as e: raise ImportError( 'arcgis is required to use the ArcGIS Loader. Install it with pip or conda.' ) from e try: from bs4 import BeautifulSoup self.BEAUTIFULSOUP = BeautifulSoup except ImportError: warnings.warn('BeautifulSoup not found. HTML ...
def __init__(self, layer: Union[str, arcgis.features.FeatureLayer], gis: Optional[arcgis.gis.GIS]=None, where: str='1=1', out_fields: Optional[ Union[List[str], str]]=None, return_geometry: bool=False, result_record_count: Optional[int]=None, lyr_desc: Optional[str]=None, **kwargs: Any): try: ...
null
test_call
"""Test that call gives the correct answer.""" search = WolframAlphaAPIWrapper() output = search.run('what is 2x+18=x+5?') assert 'x = -13' in output
def test_call() ->None: """Test that call gives the correct answer.""" search = WolframAlphaAPIWrapper() output = search.run('what is 2x+18=x+5?') assert 'x = -13' in output
Test that call gives the correct answer.
_import_symblai_nebula
from langchain_community.llms.symblai_nebula import Nebula return Nebula
def _import_symblai_nebula() ->Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula
null
__eq__
if isinstance(other, RunnableGenerator): if hasattr(self, '_transform') and hasattr(other, '_transform'): return self._transform == other._transform elif hasattr(self, '_atransform') and hasattr(other, '_atransform'): return self._atransform == other._atransform else: return False el...
def __eq__(self, other: Any) ->bool: if isinstance(other, RunnableGenerator): if hasattr(self, '_transform') and hasattr(other, '_transform'): return self._transform == other._transform elif hasattr(self, '_atransform') and hasattr(other, '_atransform'): return self._atransfo...
null
from_llm
"""Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt) sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt) sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt) return cls(qa_cha...
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate= SPARQL_QA_PROMPT, sparql_select_prompt: BasePromptTemplate= SPARQL_GENERATION_SELECT_PROMPT, sparql_update_prompt: BasePromptTemplate=SPARQL_GENERATION_UPDATE_PROMPT, sparql_intent_prompt: BasePromptTemplate=SPARQL_...
Initialize from LLM.
_type
return 'react-json-single-input'
@property def _type(self) ->str: return 'react-json-single-input'
null
test_saving_loading_llm
"""Test saving/loading an AzureML Foundation Model LLM.""" save_llm = AzureMLOnlineEndpoint(deployment_name= 'databricks-dolly-v2-12b-4', model_kwargs={'temperature': 0.03, 'top_p': 0.4, 'max_tokens': 200}) save_llm.save(file_path=tmp_path / 'azureml.yaml') loaded_llm = load_llm(tmp_path / 'azureml.yaml') asser...
def test_saving_loading_llm(tmp_path: Path) ->None: """Test saving/loading an AzureML Foundation Model LLM.""" save_llm = AzureMLOnlineEndpoint(deployment_name= 'databricks-dolly-v2-12b-4', model_kwargs={'temperature': 0.03, 'top_p': 0.4, 'max_tokens': 200}) save_llm.save(file_path=tmp_path ...
Test saving/loading an AzureML Foundation Model LLM.
output_keys
"""The keys to extract from the run.""" return ['prediction', 'input']
@property def output_keys(self) ->List[str]: """The keys to extract from the run.""" return ['prediction', 'input']
The keys to extract from the run.
test_symblai_nebula_call
"""Test valid call to Nebula.""" conversation = """Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off. Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard. The new chart...
def test_symblai_nebula_call() ->None: """Test valid call to Nebula.""" conversation = """Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off. Alex: Morning! Yesterday, I wrapped u...
Test valid call to Nebula.
_default_meta_function
return {'source': meta['loc'], **meta}
def _default_meta_function(meta: dict, _content: Any) ->dict: return {'source': meta['loc'], **meta}
null
visit_structured_query
if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[ str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
null
create_retriever
"""Create the Milvus store and retriever.""" values['store'] = Milvus(values['embedding_function'], values[ 'collection_name'], values['connection_args'], values['consistency_level']) values['retriever'] = values['store'].as_retriever(search_kwargs={'param': values['search_params']}) return values
@root_validator(pre=True) def create_retriever(cls, values: Dict) ->Dict: """Create the Milvus store and retriever.""" values['store'] = Milvus(values['embedding_function'], values[ 'collection_name'], values['connection_args'], values[ 'consistency_level']) values['retriever'] = values['sto...
Create the Milvus store and retriever.
_import_zep
from langchain_community.vectorstores.zep import ZepVectorStore return ZepVectorStore
def _import_zep() ->Any: from langchain_community.vectorstores.zep import ZepVectorStore return ZepVectorStore
null
_embedding_vector_column
"""Return the embedding vector column configs as a dictionary. Empty if the index is not a self-managed embedding index. """ index_spec = self._delta_sync_index_spec if self._is_delta_sync_index( ) else self._direct_access_index_spec return next(iter(index_spec.get('embedding_vector_columns') or lis...
def _embedding_vector_column(self) ->dict: """Return the embedding vector column configs as a dictionary. Empty if the index is not a self-managed embedding index. """ index_spec = self._delta_sync_index_spec if self._is_delta_sync_index( ) else self._direct_access_index_spec return ...
Return the embedding vector column configs as a dictionary. Empty if the index is not a self-managed embedding index.
get_input_schema
if self.custom_input_type is not None: return super().get_input_schema(config) return self.bound.get_input_schema(merge_configs(self.config, config))
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: if self.custom_input_type is not None: return super().get_input_schema(config) return self.bound.get_input_schema(merge_configs(self.config, config))
null
_import_gmail_GmailCreateDraft
from langchain_community.tools.gmail import GmailCreateDraft return GmailCreateDraft
def _import_gmail_GmailCreateDraft() ->Any: from langchain_community.tools.gmail import GmailCreateDraft return GmailCreateDraft
null
_on_run_create
"""Start a run.""" if self.root_id is None: self.root_id = run.id self.send_stream.send_nowait(RunLogPatch({'op': 'replace', 'path': '', 'value': RunState(id=str(run.id), streamed_output=[], final_output= None, logs={})})) if not self.include_run(run): return with self.lock: self._counte...
def _on_run_create(self, run: Run) ->None: """Start a run.""" if self.root_id is None: self.root_id = run.id self.send_stream.send_nowait(RunLogPatch({'op': 'replace', 'path': '', 'value': RunState(id=str(run.id), streamed_output=[], final_output=None, logs={})})) if ...
Start a run.
validate_environment
"""Dont do anything if client provided externally""" if values.get('client') is not None: return values """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values['credentials_profile_name'] is not None: session = boto3.Session(profi...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Dont do anything if client provided externally""" if values.get('client') is not None: return values """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: ...
Dont do anything if client provided externally
__repr__
if hasattr(self, '_transform'): return f'RunnableGenerator({self._transform.__name__})' elif hasattr(self, '_atransform'): return f'RunnableGenerator({self._atransform.__name__})' else: return 'RunnableGenerator(...)'
def __repr__(self) ->str: if hasattr(self, '_transform'): return f'RunnableGenerator({self._transform.__name__})' elif hasattr(self, '_atransform'): return f'RunnableGenerator({self._atransform.__name__})' else: return 'RunnableGenerator(...)'
null
update_task
""" Update an attribute of a specified task. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} url = f"{DEFAULT_URL}/task/{query_dict['task_id']}" params = {'custom_task_ids': 'true', 'team_id': self.team_id, 'include_subtasks': 'true'}...
def update_task(self, query: str) ->Dict: """ Update an attribute of a specified task. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} url = f"{DEFAULT_URL}/task/{query_dict['task_id']}" params = {'custom_task_ids'...
Update an attribute of a specified task.
on_tool_error
"""Run when tool errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when tool errors.
__post_init__
""" Initialize the store. """ _engine_args = engine_args or {} if 'pool_recycle' not in _engine_args: _engine_args['pool_recycle'] = 3600 self.engine = create_engine(self.connection_string, **_engine_args) self.create_collection()
def __post_init__(self, engine_args: Optional[dict]=None) ->None: """ Initialize the store. """ _engine_args = engine_args or {} if 'pool_recycle' not in _engine_args: _engine_args['pool_recycle'] = 3600 self.engine = create_engine(self.connection_string, **_engine_args) self...
Initialize the store.
__lt__
"""Create a Numeric less than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") < 18 """ self._set_value(other, self.SUPPORTED_VAL...
def __lt__(self, other: Union[int, float]) ->'RedisFilterExpression': """Create a Numeric less than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = ...
Create a Numeric less than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") < 18
test_mdelete
store = InMemoryStore() store.mset([('key1', 'value1'), ('key2', 'value2')]) store.mdelete(['key1']) values = store.mget(['key1', 'key2']) assert values == [None, 'value2'] store.mdelete(['key3'])
def test_mdelete() ->None: store = InMemoryStore() store.mset([('key1', 'value1'), ('key2', 'value2')]) store.mdelete(['key1']) values = store.mget(['key1', 'key2']) assert values == [None, 'value2'] store.mdelete(['key3'])
null
_create_retry_decorator
"""Define retry mechanism.""" import fireworks.client errors = [fireworks.client.error.RateLimitError, fireworks.client.error. InternalServerError, fireworks.client.error.BadGatewayError, fireworks. client.error.ServiceUnavailableError] return create_base_retry_decorator(error_types=errors, max_retries=llm. ...
def _create_retry_decorator(llm: Fireworks, *, run_manager: Optional[Union[ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[ [Any], Any]: """Define retry mechanism.""" import fireworks.client errors = [fireworks.client.error.RateLimitError, fireworks.client.error ....
Define retry mechanism.
_identifying_params
return {'key': 'fake'}
@property def _identifying_params(self) ->Dict[str, Any]: return {'key': 'fake'}
null
test_llamacpp_invalid_model_kwargs
with pytest.raises(ValueError): LlamaCpp(model_path=get_model(), model_kwargs={'n_ctx': 1024})
def test_llamacpp_invalid_model_kwargs() ->None: with pytest.raises(ValueError): LlamaCpp(model_path=get_model(), model_kwargs={'n_ctx': 1024})
null
__init__
""" Initialize the controller. Args: c: The number of children to explore at each node. """ self.c = c
def __init__(self, c: int=3): """ Initialize the controller. Args: c: The number of children to explore at each node. """ self.c = c
Initialize the controller. Args: c: The number of children to explore at each node.
test_load_success
"""Test that returns one document""" loader = ArxivLoader(query='1605.08386', load_max_docs=2) docs = loader.load() assert len(docs) == 1 print(docs[0].metadata) print(docs[0].page_content) assert_docs(docs)
def test_load_success() ->None: """Test that returns one document""" loader = ArxivLoader(query='1605.08386', load_max_docs=2) docs = loader.load() assert len(docs) == 1 print(docs[0].metadata) print(docs[0].page_content) assert_docs(docs)
Test that returns one document
_validate_spark_df
try: from pyspark.sql import DataFrame as SparkLocalDataFrame return isinstance(df, SparkLocalDataFrame) except ImportError: return False
def _validate_spark_df(df: Any) ->bool: try: from pyspark.sql import DataFrame as SparkLocalDataFrame return isinstance(df, SparkLocalDataFrame) except ImportError: return False
null
test__unique_documents
assert _unique_documents(documents) == expected
@pytest.mark.parametrize('documents,expected', [([], []), ([Document( page_content='foo')], [Document(page_content='foo')]), ([Document( page_content='foo')] * 2, [Document(page_content='foo')]), ([Document( page_content='foo', metadata={'bar': 'baz'})] * 2, [Document( page_content='foo', metadata={'bar...
null
test_imessage_chat_loader_upgrade_osx11
chat_path = pathlib.Path(__file__ ).parent / 'data' / 'imessage_chat_upgrade_osx_11.db' loader = imessage.IMessageChatLoader(str(chat_path)) chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender= 'testemail@gmail.com')) assert chat_sessions, 'Chat sessions should not be empty' assert chat_sessio...
def test_imessage_chat_loader_upgrade_osx11() ->None: chat_path = pathlib.Path(__file__ ).parent / 'data' / 'imessage_chat_upgrade_osx_11.db' loader = imessage.IMessageChatLoader(str(chat_path)) chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender= 'testemail@gmail.com')) ...
null
close
"""Close the cloud sandbox.""" self._uploaded_files = [] self.session.close()
def close(self) ->None: """Close the cloud sandbox.""" self._uploaded_files = [] self.session.close()
Close the cloud sandbox.
_Assert
self.fill('assert ') self.dispatch(t.test) if t.msg: self.write(', ') self.dispatch(t.msg)
def _Assert(self, t): self.fill('assert ') self.dispatch(t.test) if t.msg: self.write(', ') self.dispatch(t.msg)
null
embed_query
"""Generate query embeddings using FastEmbed. Args: text: The text to embed. Returns: Embeddings for the text. """ query_embeddings: np.ndarray = next(self._model.query_embed(text)) return query_embeddings.tolist()
def embed_query(self, text: str) ->List[float]: """Generate query embeddings using FastEmbed. Args: text: The text to embed. Returns: Embeddings for the text. """ query_embeddings: np.ndarray = next(self._model.query_embed(text)) return query_embeddings.toli...
Generate query embeddings using FastEmbed. Args: text: The text to embed. Returns: Embeddings for the text.
requires_reference
""" This evaluator does not require a reference. """ return True
@property def requires_reference(self) ->bool: """ This evaluator does not require a reference. """ return True
This evaluator does not require a reference.
stream
"""Enables streaming over steps taken to reach final output.""" config = ensure_config(config) iterator = AgentExecutorIterator(self, input, config.get('callbacks'), tags =config.get('tags'), metadata=config.get('metadata'), run_name=config. get('run_name'), yield_actions=True, **kwargs) for step in iterator: ...
def stream(self, input: Union[Dict[str, Any], Any], config: Optional[ RunnableConfig]=None, **kwargs: Any) ->Iterator[AddableDict]: """Enables streaming over steps taken to reach final output.""" config = ensure_config(config) iterator = AgentExecutorIterator(self, input, config.get('callbacks'), ...
Enables streaming over steps taken to reach final output.
_get_documents
"""Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents. """ attachments = self._get_attachments(soup) self._download_attachments(attachments) documents = self._load_documents() return documents
def _get_documents(self, soup: Any) ->List[Document]: """Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents. """ attachments = self._get_attachments(soup) self._download_attachments(attachments) ...
Fetch content from page and return Documents. Args: soup: BeautifulSoup4 soup object. Returns: List of documents.
_import_ollama
from langchain_community.llms.ollama import Ollama return Ollama
def _import_ollama() ->Any: from langchain_community.llms.ollama import Ollama return Ollama
null
api_passed_via_constructor_fixture
"""Fixture to create an AzureMLChatOnlineEndpoint instance with API key passed from constructor""" azure_chat = AzureMLChatOnlineEndpoint(endpoint_url= 'https://<your-endpoint>.<your_region>.inference.ml.azure.com/score', endpoint_api_key='my-api-key') return azure_chat
@pytest.fixture(scope='class') def api_passed_via_constructor_fixture() ->AzureMLChatOnlineEndpoint: """Fixture to create an AzureMLChatOnlineEndpoint instance with API key passed from constructor""" azure_chat = AzureMLChatOnlineEndpoint(endpoint_url= 'https://<your-endpoint>.<your_region>.inferenc...
Fixture to create an AzureMLChatOnlineEndpoint instance with API key passed from constructor
get_collection
from pymongo import MongoClient test_client: MongoClient = MongoClient(CONNECTION_STRING) return test_client[DB_NAME][COLLECTION_NAME]
def get_collection() ->Any: from pymongo import MongoClient test_client: MongoClient = MongoClient(CONNECTION_STRING) return test_client[DB_NAME][COLLECTION_NAME]
null
_create_space
""" Create VectorStore space Args: dim:dimension of vector Return: code,0 failed for ,1 for success """ space_config = {'name': self.using_table_name, 'partition_num': 1, 'replica_num': 1, 'engine': {'name': 'gamma', 'index_size': 1, 'retrieval_type': 'FLA...
def _create_space(self, dim: int=1024) ->int: """ Create VectorStore space Args: dim:dimension of vector Return: code,0 failed for ,1 for success """ space_config = {'name': self.using_table_name, 'partition_num': 1, 'replica_num': 1, 'engine': {'n...
Create VectorStore space Args: dim:dimension of vector Return: code,0 failed for ,1 for success
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_fireworks_batch
"""Test batch tokens from ChatFireworks.""" result = chat.batch(['What is the weather in Redwood City, CA today?', 'What is the weather in Redwood City, CA today?', 'What is the weather in Redwood City, CA today?'], config={ 'max_concurrency': 2}, stop=[',']) for token in result: assert isinstance(token...
@pytest.mark.scheduled def test_fireworks_batch(chat: ChatFireworks) ->None: """Test batch tokens from ChatFireworks.""" result = chat.batch(['What is the weather in Redwood City, CA today?', 'What is the weather in Redwood City, CA today?', 'What is the weather in Redwood City, CA today?'], con...
Test batch tokens from ChatFireworks.
count
""" Count records of a store in jaguardb Args: no args Returns: (int) number of records in pod store """ podstore = self._pod + '.' + self._store q = 'select count() from ' + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return 0 jd = json.loads(js[0]) return in...
def count(self) ->int: """ Count records of a store in jaguardb Args: no args Returns: (int) number of records in pod store """ podstore = self._pod + '.' + self._store q = 'select count() from ' + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: ...
Count records of a store in jaguardb Args: no args Returns: (int) number of records in pod store
evaluation_name
return f'pairwise_embedding_{self.distance_metric.value}_distance'
@property def evaluation_name(self) ->str: return f'pairwise_embedding_{self.distance_metric.value}_distance'
null
validate_environment
"""Validate environment variables.""" together_api_key = convert_to_secret_str(values.get('together_api_key') or os.getenv('TOGETHER_API_KEY') or '') values['together_api_key'] = together_api_key together.api_key = together_api_key.get_secret_value() values['_client'] = together.Together() return values
@root_validator() def validate_environment(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Validate environment variables.""" together_api_key = convert_to_secret_str(values.get('together_api_key') or os.getenv('TOGETHER_API_KEY') or '') values['together_api_key'] = together_api_key together.a...
Validate environment variables.
__init__
"""Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs)
def __init__(self, file_path: Union[str, List[str]], mode: str='single', ** unstructured_kwargs: Any): """Initialize with file path.""" self.file_path = file_path super().__init__(mode=mode, **unstructured_kwargs)
Initialize with file path.
get_request_body_for_operation
"""Get the request body for a given operation.""" from openapi_pydantic import Reference request_body = operation.requestBody if isinstance(request_body, Reference): request_body = self._get_root_referenced_request_body(request_body) return request_body
def get_request_body_for_operation(self, operation: Operation) ->Optional[ RequestBody]: """Get the request body for a given operation.""" from openapi_pydantic import Reference request_body = operation.requestBody if isinstance(request_body, Reference): request_body = self._get_root_referen...
Get the request body for a given operation.
add
"""Add more documents."""
@abstractmethod def add(self, texts: Dict[str, Document]) ->None: """Add more documents."""
Add more documents.
drop_index
""" Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully. """ try: from tair import Tair as TairClient except ImportError: raise ValueError( 'Could not import tair pyt...
@staticmethod def drop_index(index_name: str='langchain', **kwargs: Any) ->bool: """ Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully. """ try: from tair import Ta...
Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully.
_generate
"""Call out to Fireworks endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. """ params = {'model': self.model, **self.model_kwargs} sub_prom...
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Call out to Fireworks endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optiona...
Call out to Fireworks endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output.
create_json_agent
"""Construct a json agent from an LLM and tools.""" from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain tools = toolkit.get_tools() prompt_params = {'format_instructions': format_instructions } if format_instructions is not ...
def create_json_agent(llm: BaseLanguageModel, toolkit: JsonToolkit, callback_manager: Optional[BaseCallbackManager]=None, prefix: str= JSON_PREFIX, suffix: str=JSON_SUFFIX, format_instructions: Optional[str ]=None, input_variables: Optional[List[str]]=None, verbose: bool=False, agent_executor_kwargs: Op...
Construct a json agent from an LLM and tools.
messages
"""Retrieve the messages from Neo4j""" query = ( f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0..{self._window * 2}]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node RETURN {{data:{{content: node....
@property def messages(self) ->List[BaseMessage]: """Retrieve the messages from Neo4j""" query = ( f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0..{self._window * 2}]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 U...
Retrieve the messages from Neo4j
get_non_abstract_subclasses
to_skip = {AmadeusBaseTool, BaseBrowserTool, GmailBaseTool, O365BaseTool, SlackBaseTool} subclasses = [] for subclass in cls.__subclasses__(): if not getattr(subclass, '__abstract__', None ) and not subclass.__name__.startswith('_' ) and subclass not in to_skip: subclasses.append(subclas...
def get_non_abstract_subclasses(cls: Type[BaseTool]) ->List[Type[BaseTool]]: to_skip = {AmadeusBaseTool, BaseBrowserTool, GmailBaseTool, O365BaseTool, SlackBaseTool} subclasses = [] for subclass in cls.__subclasses__(): if not getattr(subclass, '__abstract__', None ) and not subc...
null
create_vectorstore_router_agent
"""Construct a VectorStore router agent from an LLM and tools. Args: llm (BaseLanguageModel): LLM that will be used by the agent toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores callback_manager (Optional[BaseCallbackMa...
def create_vectorstore_router_agent(llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, callback_manager: Optional[ BaseCallbackManager]=None, prefix: str=ROUTER_PREFIX, verbose: bool= False, agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any ) ->AgentExecutor: """Construct a...
Construct a VectorStore router agent from an LLM and tools. Args: llm (BaseLanguageModel): LLM that will be used by the agent toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores callback_manager (Optional[BaseCallbackManager], optional): ...
get_tools
"""Get the tools in the toolkit.""" json_agent_tool = Tool(name='json_explorer', func=self.json_agent.run, description=DESCRIPTION) request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper) return [*request_toolkit.get_tools(), json_agent_tool]
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" json_agent_tool = Tool(name='json_explorer', func=self.json_agent.run, description=DESCRIPTION) request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper) return [*request_toolkit.get_tools(), json_agent_too...
Get the tools in the toolkit.
_create_key_encoder
"""Create an encoder for a key.""" return partial(_key_encoder, namespace=namespace)
def _create_key_encoder(namespace: str) ->Callable[[str], str]: """Create an encoder for a key.""" return partial(_key_encoder, namespace=namespace)
Create an encoder for a key.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_get_wolfram_alpha
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_wolfram_alpha(**kwargs: Any) ->BaseTool: return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
null
format_tool_to_openai_function
"""Format tool into the OpenAI function API.""" if tool.args_schema: return convert_pydantic_to_openai_function(tool.args_schema, name=tool. name, description=tool.description) else: return {'name': tool.name, 'description': tool.description, 'parameters': {'properties': {'__arg1': {'title': '__...
def format_tool_to_openai_function(tool: BaseTool) ->FunctionDescription: """Format tool into the OpenAI function API.""" if tool.args_schema: return convert_pydantic_to_openai_function(tool.args_schema, name= tool.name, description=tool.description) else: return {'name': tool.na...
Format tool into the OpenAI function API.
deanonymizer_mapping
"""Return the deanonymizer mapping""" return self._deanonymizer_mapping.data
@property def deanonymizer_mapping(self) ->MappingDataType: """Return the deanonymizer mapping""" return self._deanonymizer_mapping.data
Return the deanonymizer mapping
test_initialization
"""Test chat model initialization.""" Chat__ModuleName__()
def test_initialization() ->None: """Test chat model initialization.""" Chat__ModuleName__()
Test chat model initialization.
test__convert_dict_to_message_ai
message_dict = {'role': 'assistant', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = AIMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_ai() ->None: message_dict = {'role': 'assistant', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = AIMessage(content='foo') assert result == expected_output
null
mock_index
from databricks.vector_search.client import VectorSearchIndex index = MagicMock(spec=VectorSearchIndex) index.describe.return_value = index_details return index
def mock_index(index_details: dict) ->MagicMock: from databricks.vector_search.client import VectorSearchIndex index = MagicMock(spec=VectorSearchIndex) index.describe.return_value = index_details return index
null
test_stream
"""Test streaming tokens from Anthropic.""" llm = ChatAnthropicMessages(model_name='claude-instant-1.2') for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
def test_stream() ->None: """Test streaming tokens from Anthropic.""" llm = ChatAnthropicMessages(model_name='claude-instant-1.2') for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from Anthropic.
test_simple_question
"""Test simple question that should not need python.""" question = 'Which mammal lays the biggest eggs?' output = fake_llm_checker_chain.run(question) assert output == "I still don't know."
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) ->None: """Test simple question that should not need python.""" question = 'Which mammal lays the biggest eggs?' output = fake_llm_checker_chain.run(question) assert output == "I still don't know."
Test simple question that should not need python.
serialize_outputs
if not outputs.get('generations'): raise ValueError('Cannot evaluate LLM Run without generations.') generations: List[Dict] = outputs['generations'] if not generations: raise ValueError('Cannot evaluate LLM run with empty generations.') first_generation: Dict = generations[0] if isinstance(first_generation, lis...
def serialize_outputs(self, outputs: Dict) ->str: if not outputs.get('generations'): raise ValueError('Cannot evaluate LLM Run without generations.') generations: List[Dict] = outputs['generations'] if not generations: raise ValueError('Cannot evaluate LLM run with empty generations.') f...
null
transform
if hasattr(self, 'func'): for output in self._transform_stream_with_config(input, self._transform, self._config(config, self.func), **kwargs): yield output else: raise TypeError( 'Cannot stream a coroutine function synchronously.Use `astream` instead.' )
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Optional[Any]) ->Iterator[Output]: if hasattr(self, 'func'): for output in self._transform_stream_with_config(input, self. _transform, self._config(config, self.func), **kwargs): yield o...
null
_import_ainetwork_rule
from langchain_community.tools.ainetwork.rule import AINRuleOps return AINRuleOps
def _import_ainetwork_rule() ->Any: from langchain_community.tools.ainetwork.rule import AINRuleOps return AINRuleOps
null
buffer_as_messages
"""Exposes the buffer as a list of messages in case return_messages is True.""" return self.chat_memory.messages
@property def buffer_as_messages(self) ->List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is True.""" return self.chat_memory.messages
Exposes the buffer as a list of messages in case return_messages is True.
_collapse_chain
if self.collapse_documents_chain is not None: return self.collapse_documents_chain else: return self.combine_documents_chain
@property def _collapse_chain(self) ->BaseCombineDocumentsChain: if self.collapse_documents_chain is not None: return self.collapse_documents_chain else: return self.combine_documents_chain
null
test_chat_openai_streaming_llm_output_contains_model_name
"""Test llm_output contains model_name.""" chat = ChatOpenAI(max_tokens=10, streaming=True) message = HumanMessage(content='Hello') llm_result = chat.generate([[message]]) assert llm_result.llm_output is not None assert llm_result.llm_output['model_name'] == chat.model_name
def test_chat_openai_streaming_llm_output_contains_model_name() ->None: """Test llm_output contains model_name.""" chat = ChatOpenAI(max_tokens=10, streaming=True) message = HumanMessage(content='Hello') llm_result = chat.generate([[message]]) assert llm_result.llm_output is not None assert llm_...
Test llm_output contains model_name.
test_pydantic_output_parser_fail
"""Test PydanticOutputParser where completion result fails schema validation.""" pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser( pydantic_object=TestModel) try: pydantic_parser.parse_folder(DEF_RESULT_FAIL) except OutputParserException as e: print('parse_result:', e) assert 'Fai...
def test_pydantic_output_parser_fail() ->None: """Test PydanticOutputParser where completion result fails schema validation.""" pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser( pydantic_object=TestModel) try: pydantic_parser.parse_folder(DEF_RESULT_FAIL) except Ou...
Test PydanticOutputParser where completion result fails schema validation.
test_redis_cache_chat
from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeChatModel() params = llm.dict() params['stop'] = None with pytest.warns(): llm.predict('foo') langchain.llm_cache.redis.flushall()
@pytest.mark.requires('upstash_redis') def test_redis_cache_chat() ->None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token= TOKEN), ttl=1) llm = FakeChatModel() params = llm.dict() params['stop'] = None with pytest.warns(): llm....
null
visit_Attribute
if isinstance(node.ctx, ast.Load): parent = node.value attr_expr = node.attr while isinstance(parent, ast.Attribute): attr_expr = parent.attr + '.' + attr_expr parent = parent.value if isinstance(parent, ast.Name): self.loads.add(parent.id + '.' + attr_expr) self.loads.di...
def visit_Attribute(self, node: ast.Attribute) ->Any: if isinstance(node.ctx, ast.Load): parent = node.value attr_expr = node.attr while isinstance(parent, ast.Attribute): attr_expr = parent.attr + '.' + attr_expr parent = parent.value if isinstance(parent, as...
null
from_llm
logger.warning( 'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead' ) from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H return H.from_llm(*args, **kwargs)
@classmethod def from_llm(cls, *args: Any, **kwargs: Any) ->Any: logger.warning( 'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead' ) from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H return H.from_llm(*args, **kw...
null
_extract_code
return '\n'.join(self.source_lines[start_idx:end_idx]).rstrip('\n')
def _extract_code(self, start_idx: int, end_idx: int) ->str: return '\n'.join(self.source_lines[start_idx:end_idx]).rstrip('\n')
null
is_chat_model
"""Check if the language model is a chat model. Args: llm: Language model to check. Returns: True if the language model is a BaseChatModel model, False otherwise. """ return isinstance(llm, BaseChatModel)
def is_chat_model(llm: BaseLanguageModel) ->bool: """Check if the language model is a chat model. Args: llm: Language model to check. Returns: True if the language model is a BaseChatModel model, False otherwise. """ return isinstance(llm, BaseChatModel)
Check if the language model is a chat model. Args: llm: Language model to check. Returns: True if the language model is a BaseChatModel model, False otherwise.
load
"""Load all records from FeatureLayer.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load all records from FeatureLayer.""" return list(self.lazy_load())
Load all records from FeatureLayer.
save
"""Save prompt to file. Args: file_path: path to file. """ raise NotImplementedError()
def save(self, file_path: Union[Path, str]) ->None: """Save prompt to file. Args: file_path: path to file. """ raise NotImplementedError()
Save prompt to file. Args: file_path: path to file.
test_conversation_chain_errors_bad_prompt
"""Test that conversation chain raise error with bad prompt.""" llm = FakeLLM() prompt = PromptTemplate(input_variables=[], template='nothing here') with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt)
def test_conversation_chain_errors_bad_prompt() ->None: """Test that conversation chain raise error with bad prompt.""" llm = FakeLLM() prompt = PromptTemplate(input_variables=[], template='nothing here') with pytest.raises(ValueError): ConversationChain(llm=llm, prompt=prompt)
Test that conversation chain raise error with bad prompt.
_generate
"""Call out to Ollama's generate endpoint. Args: messages: The list of base messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: Chat generations from the model Example: .. code-block:: python ...
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: """Call out to Ollama's generate endpoint. Args: messages: The list of base messages to pass into the model. ...
Call out to Ollama's generate endpoint. Args: messages: The list of base messages to pass into the model. stop: Optional list of stop words to use when generating. Returns: Chat generations from the model Example: .. code-block:: python response = ollama([ HumanMessage(content="T...
_import_javelin_ai_gateway
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway
def _import_javelin_ai_gateway() ->Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway
null
test_confluence_loader_when_content_format_and_keep_markdown_format_enabled
mock_confluence.get_all_pages_from_space.return_value = [self. _get_mock_page('123', ContentFormat.VIEW), self._get_mock_page('456', ContentFormat.VIEW)] mock_confluence.get_all_restrictions_for_content.side_effect = [self. _get_mock_page_restrictions('123'), self._get_mock_page_restrictions('456') ] co...
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled( self, mock_confluence: MagicMock) ->None: mock_confluence.get_all_pages_from_space.return_value = [self. _get_mock_page('123', ContentFormat.VIEW), self._get_mock_page( '456', ContentFormat.VIEW)] mock_confluenc...
null
test_agent_iterator_properties_and_setters
"""Test properties and setters of AgentExecutorIterator.""" agent = _get_agent() agent.tags = None agent_iter = agent.iter(inputs='when was langchain made') assert isinstance(agent_iter, AgentExecutorIterator) assert isinstance(agent_iter.inputs, dict) assert isinstance(agent_iter.callbacks, type(None)) assert isinstan...
def test_agent_iterator_properties_and_setters() ->None: """Test properties and setters of AgentExecutorIterator.""" agent = _get_agent() agent.tags = None agent_iter = agent.iter(inputs='when was langchain made') assert isinstance(agent_iter, AgentExecutorIterator) assert isinstance(agent_iter....
Test properties and setters of AgentExecutorIterator.
_llm_type
return 'fake-messages-list-chat-model'
@property def _llm_type(self) ->str: return 'fake-messages-list-chat-model'
null