method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_python_loader
"""Test Python loader.""" file_path = Path(__file__).parent.parent / 'examples' / filename loader = PythonLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 metadata = docs[0].metadata assert metadata['source'] == str(file_path)
@pytest.mark.parametrize('filename', ['default-encoding.py', 'non-utf8-encoding.py']) def test_python_loader(filename: str) ->None: """Test Python loader.""" file_path = Path(__file__).parent.parent / 'examples' / filename loader = PythonLoader(str(file_path)) docs = loader.load() assert len(doc...
Test Python loader.
is_lc_serializable
return True
@classmethod def is_lc_serializable(self) ->bool: return True
null
on_agent_action
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.on_agent_action_records.append(resp) self.ac...
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log...
Run on agent action.
_type
return 'react'
@property def _type(self) ->str: return 'react'
null
test_empty
memory = ToTDFSMemory([]) self.assertEqual(self.controller(memory), ())
def test_empty(self) ->None: memory = ToTDFSMemory([]) self.assertEqual(self.controller(memory), ())
null
from_texts
"""Construct Dingo wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Dingo index This is intended to be a quick way to get started. Example: ...
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, text_key: str ='text', index_name: Optional[str]=None, dimension: int=1024, client: Any=None, host: List[str]=['172.20.31.10:13000'], user: str='root', password:...
Construct Dingo wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Dingo index This is intended to be a quick way to get started. Example: .. code-block:: python from...
test_context_w_namespace_no_emb
expected = [{'test_namespace': 'test'}] assert base.embed({'test_namespace': 'test'}, MockEncoder()) == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_context_w_namespace_no_emb() ->None: expected = [{'test_namespace': 'test'}] assert base.embed({'test_namespace': 'test'}, MockEncoder()) == expected
null
_Try
self.fill('try') self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave() if t.finalbody: self.fill('finally') self.enter() self.dispatch(t.finalbody) self.leave()
def _Try(self, t): self.fill('try') self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave() if t.finalbody: self.fill('finally') ...
null
_result_as_string
toret = 'No good search result found' if 'answer_box' in result.keys() and 'answer' in result['answer_box'].keys(): toret = result['answer_box']['answer'] elif 'answer_box' in result.keys() and 'snippet' in result['answer_box'].keys( ): toret = result['answer_box']['snippet'] elif 'knowledge_graph' in resul...
@staticmethod def _result_as_string(result: dict) ->str: toret = 'No good search result found' if 'answer_box' in result.keys() and 'answer' in result['answer_box'].keys( ): toret = result['answer_box']['answer'] elif 'answer_box' in result.keys() and 'snippet' in result['answer_box' ...
null
setUp
try: import kuzu except ImportError as e: raise ImportError( 'Cannot import Python package kuzu. Please install it by running `pip install kuzu`.' ) from e self.tmpdir = tempfile.mkdtemp() self.kuzu_database = kuzu.Database(self.tmpdir) self.conn = kuzu.Connection(self.kuzu_database) self.conn.e...
def setUp(self) ->None: try: import kuzu except ImportError as e: raise ImportError( 'Cannot import Python package kuzu. Please install it by running `pip install kuzu`.' ) from e self.tmpdir = tempfile.mkdtemp() self.kuzu_database = kuzu.Database(self.tmpdir) ...
null
test_returns_expected_results
fake_llm = FakeLLM(queries={'text': """The meaning of life CORRECT"""}, sequential_responses=True) chain = chain_cls.from_llm(fake_llm) results = chain.evaluate_strings(prediction='my prediction', reference= 'my reference', input='my input') assert results['score'] == 1
@pytest.mark.parametrize('chain_cls', [QAEvalChain, ContextQAEvalChain, CotQAEvalChain]) def test_returns_expected_results(chain_cls: Type[LLMChain]) ->None: fake_llm = FakeLLM(queries={'text': 'The meaning of life\nCORRECT'}, sequential_responses=True) chain = chain_cls.from_llm(fake_llm) resul...
null
load
pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {'source': self.source, 'file_path': self.source} return [Document(page_content=contents, metadata=metadata)]
def load(self) ->List[Document]: pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {'source': self.source, 'file_path': self.source} return [Document(page_content=contents, metadata=metadata)]
null
test_json_distance_evaluator_evaluate_strings_custom_operator_equal
"""Custom operator that returns 0.5 if strings are different.""" def custom_distance(a: str, b: str) ->float: return 0.5 if a != b else 0.0 evaluator = JsonEditDistanceEvaluator(string_distance=custom_distance) prediction = '{"a": "apple", "b": "banana"}' reference = '{"a": "apple", "b": "berries"}' result = evalua...
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_evaluate_strings_custom_operator_equal( ) ->None: """Custom operator that returns 0.5 if strings are different.""" def custom_distance(a: str, b: str) ->float: return 0.5 if a != b else 0.0 evaluator = JsonEditDistanceEvaluator...
Custom operator that returns 0.5 if strings are different.
seq_naive_rag_scoped
context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) scoped = Context.create_scope('a_scope') return Context.setter('input') | {'context': retriever | Context.setter...
def seq_naive_rag_scoped() ->Runnable: context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) scoped = Context.create_scope('a_scope') return C...
null
paginate_request
"""Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" va...
def paginate_request(self, retrieval_method: Callable, **kwargs: Any) ->List: """Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to ...
Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because ...
validate_environment
"""Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env(values, 'replicate_api_token', 'REPLICATE_API_TOKEN') values['replicate_api_token'] = replicate_api_token return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env(values, 'replicate_api_token', 'REPLICATE_API_TOKEN') values['replicate_api_token'] = replicate_api_token return ...
Validate that api key and python package exists in environment.
build_tree
"""Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree. """ id_to_data = {} child_to_parent = {} for entity in runs: for...
def build_tree(self, runs: List[Dict[str, Any]]) ->Dict[str, Any]: """Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree. ...
Builds a nested dictionary from a list of runs. :param runs: The list of runs to build the tree from. :return: The nested dictionary representing the langchain Run in a tree structure compatible with WBTraceTree.
_llm_type
return 'ernie-bot-chat'
@property def _llm_type(self) ->str: return 'ernie-bot-chat'
null
__ror__
"""Compose this runnable with another object to create a RunnableSequence.""" return RunnableSequence(coerce_to_runnable(other), self)
def __ror__(self, other: Union[Runnable[Other, Any], Callable[[Other], Any], Callable[[Iterator[Other]], Iterator[Any]], Mapping[str, Union[Runnable [Other, Any], Callable[[Other], Any], Any]]]) ->RunnableSerializable[ Other, Output]: """Compose this runnable with another object to create a RunnableSequ...
Compose this runnable with another object to create a RunnableSequence.
on_text_common
self.text += 1
def on_text_common(self) ->None: self.text += 1
null
empty_str_to_none
"""Empty strings are not allowed""" if v == '': return None return v
@validator('*', pre=True) def empty_str_to_none(cls, v: str) ->Union[str, None]: """Empty strings are not allowed""" if v == '': return None return v
Empty strings are not allowed
create_messages
"""Create messages.""" system_message_prompt = SystemMessagePromptTemplate(prompt=PromptTemplate( template="Here's some context: {context}", input_variables=['context'])) human_message_prompt = HumanMessagePromptTemplate(prompt=PromptTemplate( template="Hello {foo}, I'm {bar}. Thanks for the {context}", inp...
def create_messages() ->List[BaseMessagePromptTemplate]: """Create messages.""" system_message_prompt = SystemMessagePromptTemplate(prompt= PromptTemplate(template="Here's some context: {context}", input_variables=['context'])) human_message_prompt = HumanMessagePromptTemplate(prompt=PromptT...
Create messages.
_import_deepsparse
from langchain_community.llms.deepsparse import DeepSparse return DeepSparse
def _import_deepsparse() ->Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse
null
fake_llm_summarization_checker_chain
"""Fake LLMCheckerChain for testing.""" queries = {CREATE_ASSERTIONS_PROMPT.format(summary='a'): 'b', CHECK_ASSERTIONS_PROMPT.format(assertions='b'): '- b - True', REVISED_SUMMARY_PROMPT.format(checked_assertions='- b - True', summary= 'a'): 'b', ARE_ALL_TRUE_PROMPT.format(checked_assertions='- b - True'): ...
@pytest.fixture def fake_llm_summarization_checker_chain() ->LLMSummarizationCheckerChain: """Fake LLMCheckerChain for testing.""" queries = {CREATE_ASSERTIONS_PROMPT.format(summary='a'): 'b', CHECK_ASSERTIONS_PROMPT.format(assertions='b'): '- b - True', REVISED_SUMMARY_PROMPT.format(checked_ass...
Fake LLMCheckerChain for testing.
_run
"""Use the tool.""" return str(self.api_wrapper.results(query))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return str(self.api_wrapper.results(query))
Use the tool.
search_api
"""Search the API for the query.""" return 'API result'
@tool def search_api(query: str) ->str: """Search the API for the query.""" return 'API result'
Search the API for the query.
embed_documents
"""Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed(model=self.model, texts=texts) return embeddings
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed(model=self.model, texts=tex...
Call out to Infinity's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
create_index
"""Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs)
def create_index(self, **kwargs: Any) ->Any: """Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs)
Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail.
test_load_nonexistent_feature
"""Tests that KeyError is thrown for nonexistent feature/key in dataset""" page_content_column = 'langchain' name = 'v2' loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET, page_content_column, name) with pytest.raises(KeyError): loader.load()
@pytest.mark.requires('datasets') @pytest.fixture def test_load_nonexistent_feature() ->None: """Tests that KeyError is thrown for nonexistent feature/key in dataset""" page_content_column = 'langchain' name = 'v2' loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET, page_content_colu...
Tests that KeyError is thrown for nonexistent feature/key in dataset
_load_file_from_ids
"""Load files from a list of IDs.""" if not self.file_ids: raise ValueError('file_ids must be set') docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs
def _load_file_from_ids(self) ->List[Document]: """Load files from a list of IDs.""" if not self.file_ids: raise ValueError('file_ids must be set') docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs
Load files from a list of IDs.
__getitem__
...
@overload def __getitem__(self, index: slice) ->ChatPromptTemplate: ...
null
test_initialization_ghe
loader = GitHubIssuesLoader(repo='repo', access_token='access_token', github_api_url='https://github.example.com/api/v3') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.github_api_url == 'https://github.example.com/api/v3' assert loader.headers == {'Accept': 'application/vnd...
def test_initialization_ghe() ->None: loader = GitHubIssuesLoader(repo='repo', access_token='access_token', github_api_url='https://github.example.com/api/v3') assert loader.repo == 'repo' assert loader.access_token == 'access_token' assert loader.github_api_url == 'https://github.example.com/ap...
null
_strip
return text.strip()
def _strip(text: str) ->str: return text.strip()
null
validate_environment
"""Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install cohere`.' ) else: cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key', ...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install coher...
Validate that api key and python package exists in environment.
test_add_texts_with_metadata
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) metadatas = [{'feat1': str(i), 'feat2': i + 1000} for i in range(len( fake_texts))] added_ids = vectorsearch.add_texts(fake_texts, metadatas=metadatas) index.u...
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_add_texts_with_metadata() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = default_databricks_vector_search(index) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) metadatas = [{'feat1': str(i), 'feat2': ...
null
test_api_key_masked_when_passed_via_constructor
llm = StochasticAI(stochasticai_api_key='secret-api-key') print(llm.stochasticai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = StochasticAI(stochasticai_api_key='secret-api-key') print(llm.stochasticai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
dummy_transform
"""Transform a dummy input for tests.""" outputs = inputs outputs['greeting' ] = f"{inputs['first_name']} {inputs['last_name']} says hello" del outputs['first_name'] del outputs['last_name'] return outputs
def dummy_transform(inputs: Dict[str, str]) ->Dict[str, str]: """Transform a dummy input for tests.""" outputs = inputs outputs['greeting' ] = f"{inputs['first_name']} {inputs['last_name']} says hello" del outputs['first_name'] del outputs['last_name'] return outputs
Transform a dummy input for tests.
run
"""Execute a DAX command and return a json representing the results.""" logger.debug('Running command: %s', command) response = requests.post(self.request_url, json=self._create_json_content( command), headers=self.headers, timeout=10) if response.status_code == 403: return ( 'TokenError: Could not logi...
def run(self, command: str) ->Any: """Execute a DAX command and return a json representing the results.""" logger.debug('Running command: %s', command) response = requests.post(self.request_url, json=self. _create_json_content(command), headers=self.headers, timeout=10) if response.status_code =...
Execute a DAX command and return a json representing the results.
test_each
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' first_llm = FakeStreamingListLLM(responses=[ 'first item, second item, third item']) parser = FakeSplitIntoListParser() second_llm = FakeStreamingListLLM(responses=['this', 'is', 'a', 'test']) chain = prompt | first_...
def test_each(snapshot: SnapshotAssertion) ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' first_llm = FakeStreamingListLLM(responses=[ 'first item, second item, third item']) parser = FakeSplitIntoListParser() second_llm = FakeStre...
null
run
results = self.results(query, **kwargs) return self._result_as_string(results)
def run(self, query: str, **kwargs: Any) ->str: results = self.results(query, **kwargs) return self._result_as_string(results)
null
test_pickbest_textembedder_w_full_label_w_embed_and_keep
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_emb...
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_w_full_label_w_embed_and_keep() ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(l...
null
test_openai_batch
"""Test batch tokens from ChatOpenAI.""" llm = ChatOpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
@pytest.mark.scheduled def test_openai_batch() ->None: """Test batch tokens from ChatOpenAI.""" llm = ChatOpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
Test batch tokens from ChatOpenAI.
test_partial_functions_json_output_parser
def input_iter(_: Any) ->Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk(content='', additional_kwargs={'function_call': {'arguments': token}}) chain = input_iter | JsonOutputFunctionsParser() assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON
def test_partial_functions_json_output_parser() ->None: def input_iter(_: Any) ->Iterator[AIMessageChunk]: for token in STREAMED_TOKENS: yield AIMessageChunk(content='', additional_kwargs={ 'function_call': {'arguments': token}}) chain = input_iter | JsonOutputFunctionsParse...
null
__init__
self.llm = llm self.tools = tools self.chat_planner = load_chat_planner(llm) self.response_generator = load_response_generator(llm) self.task_executor: TaskExecutor
def __init__(self, llm: BaseLanguageModel, tools: List[BaseTool]): self.llm = llm self.tools = tools self.chat_planner = load_chat_planner(llm) self.response_generator = load_response_generator(llm) self.task_executor: TaskExecutor
null
__add__
from langchain_core.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other
def __add__(self, other: Any) ->ChatPromptTemplate: from langchain_core.prompts.chat import ChatPromptTemplate prompt = ChatPromptTemplate(messages=[self]) return prompt + other
null
return_values
"""Return values of the agent.""" return ['output']
@property def return_values(self) ->List[str]: """Return values of the agent.""" return ['output']
Return values of the agent.
_run
"""Use the tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return self.api_wrapper.run(query)
Use the tool.
__init__
"""Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwargs. Args: client: A Redis connection instance redis_url: redis url client_kwargs: Keyword arguments to pass to the Redis client ...
def __init__(self, *, client: Any=None, redis_url: Optional[str]=None, client_kwargs: Optional[dict]=None, ttl: Optional[int]=None, namespace: Optional[str]=None) ->None: """Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwa...
Initialize the RedisStore with a Redis connection. Must provide either a Redis client or a redis_url with optional client_kwargs. Args: client: A Redis connection instance redis_url: redis url client_kwargs: Keyword arguments to pass to the Redis client ttl: time to expire keys in seconds if provided,...
__init__
super().__init__(pydantic_object=LineList)
def __init__(self) ->None: super().__init__(pydantic_object=LineList)
null
similarity_search_with_score
"""Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of ...
def similarity_search_with_score(self, query: str, k: int=4, pre_filter: Optional[Dict]=None, post_filter_pipeline: Optional[List[Dict]]=None ) ->List[Tuple[Document, float]]: """Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoD...
Return MongoDB documents most similar to the given query and their scores. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is no...
require_inference
return False
def require_inference(self) ->bool: return False
null
__str__
if not self._filter and not self._operator: raise ValueError('Improperly initialized RedisFilterExpression') if self._operator: if not isinstance(self._left, RedisFilterExpression) or not isinstance(self ._right, RedisFilterExpression): raise TypeError( 'Improper combination of filte...
def __str__(self) ->str: if not self._filter and not self._operator: raise ValueError('Improperly initialized RedisFilterExpression') if self._operator: if not isinstance(self._left, RedisFilterExpression) or not isinstance( self._right, RedisFilterExpression): raise Type...
null
test_graph_cypher_qa_chain_prompt_selection_1
qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) chain = GraphCypherQAChain.from_llm(llm=FakeLLM(), graph=FakeGraphStore(), ver...
def test_graph_cypher_qa_chain_prompt_selection_1() ->None: qa_prompt_template = 'QA Prompt' cypher_prompt_template = 'Cypher Prompt' qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=[]) cypher_prompt = PromptTemplate(template=cypher_prompt_template, input_variables=[]) ...
null
get_knowledge_triplets
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2:], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=input_string, verbose=True) knowledge = parse_triples(outpu...
def get_knowledge_triplets(self, input_string: str) ->List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.pr...
null
flush_tracker
"""Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the performed session so far so it is identifiable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: ...
def flush_tracker(self, langchain_asset: Any=None, task_type: Optional[str] ='inference', workspace: Optional[str]=None, project_name: Optional[str ]='comet-langchain-demo', tags: Optional[Sequence]=None, name: Optional [str]=None, visualizations: Optional[List[str]]=None, complexity_metrics: bool=False...
Flush the tracker and setup the session. Everything after this will be a new table. Args: name: Name of the performed session so far so it is identifiable langchain_asset: The langchain asset to save. finish: Whether to finish the run. Returns: None
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
_get_relevant_documents
request = self._prepare_search_request(query, **kwargs) response = self.client.search_documents(request=request) return self._parse_search_response(response=response)
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: request = self._prepare_search_request(query, **kwargs) response = self.client.search_documents(request=request) return self._parse_search_response(response=response)
null
page_create
try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) return self.confluence.create_page(**dict(params))
def page_create(self, query: str) ->str: try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) return self.confluence.create_page(**dict(params))
null
input_keys
"""Get input keys. Input refers to user input here.""" return ['input']
@property def input_keys(self) ->List[str]: """Get input keys. Input refers to user input here.""" return ['input']
Get input keys. Input refers to user input here.
_construct_json_body
"""Constructs the request body as a dictionary (JSON).""" raise NotImplementedError
def _construct_json_body(self, prompt: str, params: dict) ->dict: """Constructs the request body as a dictionary (JSON).""" raise NotImplementedError
Constructs the request body as a dictionary (JSON).
_on_chain_error
crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback(f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] {run_type} run errored with error: """) + f"{try_json_stringify(run.error, '[error]')}")
def _on_chain_error(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] {run_type} run errored with error: """ ...
null
format
...
@abstractmethod def format(self, event: TEvent) ->str: ...
null
test_exception_handling_callable
expected = 'foo bar' handling = lambda _: expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = _tool.run({}) assert expected == actual
def test_exception_handling_callable() ->None: expected = 'foo bar' handling = lambda _: expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = _tool.run({}) assert expected == actual
null
prepare_output
if provider == 'anthropic': response_body = json.loads(response.get('body').read().decode()) return response_body.get('completion') else: response_body = json.loads(response.get('body').read()) if provider == 'ai21': return response_body.get('completions')[0].get('data').get('text') elif provider == 'co...
@classmethod def prepare_output(cls, provider: str, response: Any) ->str: if provider == 'anthropic': response_body = json.loads(response.get('body').read().decode()) return response_body.get('completion') else: response_body = json.loads(response.get('body').read()) if provider == '...
null
predict
return None
def predict(self, event: TEvent) ->Any: return None
null
from_llm
""" Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class """ _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) chain = ...
@classmethod def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[ BasePromptTemplate]=None, **kwargs: Any) ->QAGenerationChain: """ Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additi...
Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class
_load_prompt
"""Load the prompt template from config.""" config = _load_template('template', config) config = _load_output_parser(config) template_format = config.get('template_format', 'f-string') if template_format == 'jinja2': raise ValueError( f"Loading templates with '{template_format}' format is no longer supporte...
def _load_prompt(config: dict) ->PromptTemplate: """Load the prompt template from config.""" config = _load_template('template', config) config = _load_output_parser(config) template_format = config.get('template_format', 'f-string') if template_format == 'jinja2': raise ValueError( ...
Load the prompt template from config.
get_args
parser = argparse.ArgumentParser() parser.add_argument('--docs_dir', type=str, default=_DOCS_DIR, help= 'Directory where generated markdown files are stored') return parser.parse_args()
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--docs_dir', type=str, default=_DOCS_DIR, help= 'Directory where generated markdown files are stored') return parser.parse_args()
null
request
headers = {'Authorization': f'Bearer {self.api_token}'} response = requests.request(method=method, url=url, headers=headers, json= request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}') return response.json()
def request(self, method: str, url: str, request: Any) ->Any: headers = {'Authorization': f'Bearer {self.api_token}'} response = requests.request(method=method, url=url, headers=headers, json=request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}')...
null
_paths_strict
if not self.paths: raise ValueError('No paths found in spec') return self.paths
@property def _paths_strict(self) ->Paths: if not self.paths: raise ValueError('No paths found in spec') return self.paths
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from ...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict]]= None, ids: Optional[np.ndarray]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas...
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore.
on_chain_end
"""Do nothing when chain ends.""" pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing when chain ends.""" pass
Do nothing when chain ends.
version_callback
if show_version: typer.echo(f'langchain-cli {__version__}') raise typer.Exit()
def version_callback(show_version: bool) ->None: if show_version: typer.echo(f'langchain-cli {__version__}') raise typer.Exit()
null
_import_json_tool_JsonGetValueTool
from langchain_community.tools.json.tool import JsonGetValueTool return JsonGetValueTool
def _import_json_tool_JsonGetValueTool() ->Any: from langchain_community.tools.json.tool import JsonGetValueTool return JsonGetValueTool
null
output_keys
"""Expect output key. :meta private: """ return [self.output_key]
@property def output_keys(self) ->List[str]: """Expect output key. :meta private: """ return [self.output_key]
Expect output key. :meta private:
_import_office365_events_search
from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
def _import_office365_events_search() ->Any: from langchain_community.tools.office365.events_search import O365SearchEvents return O365SearchEvents
null
to_sql_model
"""Convert a BaseMessage instance to a SQLAlchemy model.""" raise NotImplementedError
@abstractmethod def to_sql_model(self, message: BaseMessage, session_id: str) ->Any: """Convert a BaseMessage instance to a SQLAlchemy model.""" raise NotImplementedError
Convert a BaseMessage instance to a SQLAlchemy model.
_get_paths
"""Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all('loc')]
def _get_paths(self, soup: Any) ->List[str]: """Fetch all relative paths in the navbar.""" return [urlparse(loc.text).path for loc in soup.find_all('loc')]
Fetch all relative paths in the navbar.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'llms', 'vertexai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'llms', 'vertexai']
Get the namespace of the langchain object.
_index_name
hashed_index = _hash(llm_string) return f'cache:{hashed_index}'
def _index_name(self, llm_string: str) ->str: hashed_index = _hash(llm_string) return f'cache:{hashed_index}'
null
_parse_chat_history_gemini
from vertexai.preview.generative_models import Content, Image, Part def _convert_to_prompt(part: Union[str, Dict]) ->Part: if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(pa...
def _parse_chat_history_gemini(history: List[BaseMessage], project: Optional[str]) ->List['Content']: from vertexai.preview.generative_models import Content, Image, Part def _convert_to_prompt(part: Union[str, Dict]) ->Part: if isinstance(part, str): return Part.from_text(part) ...
null
format_messages
"""Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in. """ examples = self._get_examples(**kwargs) examples = [{k: e[k] for k i...
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in. """ ...
Format kwargs into a list of messages. Args: **kwargs: keyword arguments to use for filling in templates in messages. Returns: A list of formatted messages with all template variables filled in.
similarity_search_with_relevance_scores
"""Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. ...
def similarity_search_with_relevance_scores(self, query: str, k: int=4, distance_func: DistanceFunction=DistanceFunction.COSINE_SIM, where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look ...
Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadat...
assert_query
expected_query = {'query': {'script_score': {'query': {'bool': {'filter': [ {'term': {'metadata.page': 0}}]}}, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': { 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}}}}} assert query_body == expected_query...
def assert_query(query_body: dict, query: str) ->dict: expected_query = {'query': {'script_score': {'query': {'bool': { 'filter': [{'term': {'metadata.page': 0}}]}}, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, ...
null
load_evaluator
"""Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword argument...
def load_evaluator(evaluator: EvaluatorType, *, llm: Optional[ BaseLanguageModel]=None, **kwargs: Any) ->Union[Chain, StringEvaluator]: """Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : Base...
Load the requested evaluation chain specified by a string. Parameters ---------- evaluator : EvaluatorType The type of evaluator to load. llm : BaseLanguageModel, optional The language model to use for evaluation, by default None **kwargs : Any Additional keyword arguments to pass to the evaluator. Return...
strip_python_markdown_tags
pat = re.compile('```python\\n(.*)```', re.DOTALL) code = pat.match(text) if code: return code.group(1) else: return text
def strip_python_markdown_tags(text: str) ->str: pat = re.compile('```python\\n(.*)```', re.DOTALL) code = pat.match(text) if code: return code.group(1) else: return text
null
test_llm_rails_add_documents
"""Test end to end construction and search.""" docsearch: LLMRails = LLMRails() texts1 = ['large language model', 'information retrieval', 'question answering' ] docsearch.add_texts(texts1) output1 = docsearch.similarity_search('large language model', k=1) print(output1) assert len(output1) == 1 assert output1[0].p...
def test_llm_rails_add_documents() ->None: """Test end to end construction and search.""" docsearch: LLMRails = LLMRails() texts1 = ['large language model', 'information retrieval', 'question answering'] docsearch.add_texts(texts1) output1 = docsearch.similarity_search('large language model'...
Test end to end construction and search.
test_mdelete
"""Test that deletion works as expected.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) store.mdelete(keys) result = redis_client.mget(keys) assert result == [None, None]
def test_mdelete(redis_client: Redis) ->None: """Test that deletion works as expected.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) store.mdelete(keys) result = redis_client.mget(keys) assert result == ...
Test that deletion works as expected.
set_interface
if not values.get('interface'): values['interface'] = authenticate(network=values.get('network', 'testnet') ) return values
@root_validator(pre=True) def set_interface(cls, values: dict) ->dict: if not values.get('interface'): values['interface'] = authenticate(network=values.get('network', 'testnet')) return values
null
get_operations
"""Return a list of operations.""" return self.operations
def get_operations(self) ->List[dict]: """Return a list of operations.""" return self.operations
Return a list of operations.
_parse_messages
results = [] for message in messages: message_id = message['id'] message_data = self.api_resource.users().messages().get(userId='me', format='raw', id=message_id).execute() raw_message = base64.urlsafe_b64decode(message_data['raw']) email_msg = email.message_from_bytes(raw_message) subject =...
def _parse_messages(self, messages: List[Dict[str, Any]]) ->List[Dict[str, Any] ]: results = [] for message in messages: message_id = message['id'] message_data = self.api_resource.users().messages().get(userId='me', format='raw', id=message_id).execute() raw_message = ba...
null
_Raise
self.fill('raise') if not t.exc: assert not t.cause return self.write(' ') self.dispatch(t.exc) if t.cause: self.write(' from ') self.dispatch(t.cause)
def _Raise(self, t): self.fill('raise') if not t.exc: assert not t.cause return self.write(' ') self.dispatch(t.exc) if t.cause: self.write(' from ') self.dispatch(t.cause)
null
test_appx_search_with_boolean_and_lucene_filter_throws_error
"""Test Approximate Search with Boolean and Lucene Filter throws Error.""" boolean_filter_val = {'bool': {'must': [{'term': {'text': 'baz'}}]}} lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH...
def test_appx_search_with_boolean_and_lucene_filter_throws_error() ->None: """Test Approximate Search with Boolean and Lucene Filter throws Error.""" boolean_filter_val = {'bool': {'must': [{'term': {'text': 'baz'}}]}} lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenS...
Test Approximate Search with Boolean and Lucene Filter throws Error.
_type
return 'output_fixing'
@property def _type(self) ->str: return 'output_fixing'
null
get_name
name = (name or self.name or f"RunnableAssign<{','.join(self.mapper.steps.keys())}>") return super().get_name(suffix, name=name)
def get_name(self, suffix: Optional[str]=None, *, name: Optional[str]=None ) ->str: name = (name or self.name or f"RunnableAssign<{','.join(self.mapper.steps.keys())}>") return super().get_name(suffix, name=name)
null
_extract_scheme_and_domain
"""Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
def _extract_scheme_and_domain(url: str) ->Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain
setup_class
if not os.getenv('YDC_API_KEY'): raise ValueError('YDC_API_KEY environment variable is not set')
@classmethod def setup_class(cls) ->None: if not os.getenv('YDC_API_KEY'): raise ValueError('YDC_API_KEY environment variable is not set')
null
_on_llm_error
"""Process the LLM Run upon error.""" self._process_end_trace(run)
def _on_llm_error(self, run: 'Run') ->None: """Process the LLM Run upon error.""" self._process_end_trace(run)
Process the LLM Run upon error.
load_comments
"""Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get('title') return [Document(page_content=comment.text.strip(), metadata={'source': self.web_path, 'title': title}) for comment in comments]
def load_comments(self, soup_info: Any) ->List[Document]: """Load comments from a HN post.""" comments = soup_info.select("tr[class='athing comtr']") title = soup_info.select_one("tr[id='pagespace']").get('title') return [Document(page_content=comment.text.strip(), metadata={'source': self.web_p...
Load comments from a HN post.
test_integration_initialization
"""Test chat model initialization.""" ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k=2, top_p=1, temperature=0.7, n=2) ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k=2, top_p=1, temperature=0.7, candidate_count=2)
def test_integration_initialization() ->None: """Test chat model initialization.""" ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k =2, top_p=1, temperature=0.7, n=2) ChatGoogleGenerativeAI(model='gemini-nano', google_api_key='...', top_k =2, top_p=1, temperature=0.7,...
Test chat model initialization.