method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
is_running
return any(not op.done() for op in operations)
def is_running(self, operations: List['Operation']) ->bool: return any(not op.done() for op in operations)
null
get_relevant_documents
assert isinstance(self, FakeRetrieverV1) return [Document(page_content=query, metadata={'uuid': '1234'})]
def get_relevant_documents(self, query: str) ->List[Document]: assert isinstance(self, FakeRetrieverV1) return [Document(page_content=query, metadata={'uuid': '1234'})]
null
test_promptlayer_chat_openai_multiple_completions
"""Test PromptLayerChatOpenAI wrapper with multiple completions.""" chat = PromptLayerChatOpenAI(max_tokens=10, n=5) message = HumanMessage(content='Hello') response = chat._generate([message]) assert isinstance(response, ChatResult) assert len(response.generations) == 5 for generation in response.generations: asse...
def test_promptlayer_chat_openai_multiple_completions() ->None: """Test PromptLayerChatOpenAI wrapper with multiple completions.""" chat = PromptLayerChatOpenAI(max_tokens=10, n=5) message = HumanMessage(content='Hello') response = chat._generate([message]) assert isinstance(response, ChatResult) ...
Test PromptLayerChatOpenAI wrapper with multiple completions.
_format_message_as_text
if isinstance(message, ChatMessage): message_text = f'\n\n{message.role.capitalize()}: {message.content}' elif isinstance(message, HumanMessage): if message.content[0].get('type') == 'text': message_text = f"[INST] {message.content[0]['text']} [/INST]" elif message.content[0].get('type') == 'image_u...
@deprecated('0.0.3', alternative='_convert_messages_to_ollama_messages') def _format_message_as_text(self, message: BaseMessage) ->str: if isinstance(message, ChatMessage): message_text = f'\n\n{message.role.capitalize()}: {message.content}' elif isinstance(message, HumanMessage): if message.con...
null
resolve_pairwise_criteria
"""Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional): The criteria to use. Returns: dict: The resolved criteria. """ if criteria is None: _default_criteria = [Criteria.HELPFULNESS, Criteria.RELEVANCE, Cri...
def resolve_pairwise_criteria(criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]]) ->dict: """Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional): The criteria to use. Returns: dict: The resol...
Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional): The criteria to use. Returns: dict: The resolved criteria.
_validate_tools
"""Validate that appropriate tools are passed in.""" pass
@classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) ->None: """Validate that appropriate tools are passed in.""" pass
Validate that appropriate tools are passed in.
__getitem__
...
@overload def __getitem__(self, index: int) ->MessageLike: ...
null
atransform
if not hasattr(self, '_atransform'): raise NotImplementedError('This runnable does not support async methods.') return self._atransform_stream_with_config(input, self._atransform, config, **kwargs)
def atransform(self, input: AsyncIterator[Input], config: Optional[ RunnableConfig]=None, **kwargs: Any) ->AsyncIterator[Output]: if not hasattr(self, '_atransform'): raise NotImplementedError( 'This runnable does not support async methods.') return self._atransform_stream_with_config(in...
null
create
""" Create a EmbedchainRetriever from a YAML configuration file. Args: yaml_path: Path to the YAML configuration file. If not provided, a default configuration is used. Returns: An instance of EmbedchainRetriever. """ from embedchain impo...
@classmethod def create(cls, yaml_path: Optional[str]=None) ->EmbedchainRetriever: """ Create a EmbedchainRetriever from a YAML configuration file. Args: yaml_path: Path to the YAML configuration file. If not provided, a default configuration is used. Ret...
Create a EmbedchainRetriever from a YAML configuration file. Args: yaml_path: Path to the YAML configuration file. If not provided, a default configuration is used. Returns: An instance of EmbedchainRetriever.
lazy_load
"""Lazy load from a file path.""" dump = self._load_dump_file() for page in dump.pages: if self.skip_redirects and page.redirect: continue if self.namespaces and page.namespace not in self.namespaces: continue try: yield self._load_single_page_from_dump(page) except Exception as ...
def lazy_load(self) ->Iterator[Document]: """Lazy load from a file path.""" dump = self._load_dump_file() for page in dump.pages: if self.skip_redirects and page.redirect: continue if self.namespaces and page.namespace not in self.namespaces: continue try: ...
Lazy load from a file path.
delete
"""Delete documents from the index. Only support direct-access index. Args: ids: List of ids of documents to delete. Returns: True if successful. """ self._op_require_direct_access_index('delete') if ids is None: raise ValueError('ids must be provided.') se...
def delete(self, ids: Optional[List[Any]]=None, **kwargs: Any) ->Optional[bool ]: """Delete documents from the index. Only support direct-access index. Args: ids: List of ids of documents to delete. Returns: True if successful. """ self._op_require_...
Delete documents from the index. Only support direct-access index. Args: ids: List of ids of documents to delete. Returns: True if successful.
_tools_description
"""Get the description of the agent tools. Returns: str: The description of the agent tools. """ if self.agent_tools is None: return '' return '\n\n'.join([ f"""Tool {i}: {tool.name} Description: {tool.description}""" for i, tool in enumerate(self.agent_tools, 1)])
@property def _tools_description(self) ->str: """Get the description of the agent tools. Returns: str: The description of the agent tools. """ if self.agent_tools is None: return '' return '\n\n'.join([ f'Tool {i}: {tool.name}\nDescription: {tool.description}' fo...
Get the description of the agent tools. Returns: str: The description of the agent tools.
_import_playwright_CurrentWebPageTool
from langchain_community.tools.playwright import CurrentWebPageTool return CurrentWebPageTool
def _import_playwright_CurrentWebPageTool() ->Any: from langchain_community.tools.playwright import CurrentWebPageTool return CurrentWebPageTool
null
parse
lines = re.findall('\\d+\\..*?(?:\\n|$)', text) return LineList(lines=lines)
def parse(self, text: str) ->LineList: lines = re.findall('\\d+\\..*?(?:\\n|$)', text) return LineList(lines=lines)
null
_make_tool
if isinstance(dec_func, Runnable): runnable = dec_func if runnable.input_schema.schema().get('type') != 'object': raise ValueError('Runnable must have an object schema.') async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any) ->Any: return await runnable.ainvo...
def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool: if isinstance(dec_func, Runnable): runnable = dec_func if runnable.input_schema.schema().get('type') != 'object': raise ValueError('Runnable must have an object schema.') async def ainvoke_wrapper(callbacks: Optiona...
null
add_node
"""Add a node to the graph and return it.""" node = Node(id=self.next_id(), data=data) self.nodes[node.id] = node return node
def add_node(self, data: Union[Type[BaseModel], RunnableType]) ->Node: """Add a node to the graph and return it.""" node = Node(id=self.next_id(), data=data) self.nodes[node.id] = node return node
Add a node to the graph and return it.
test_visit_structured_query_deep_nesting
query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='name', value='foo'), Operation(operator= Operator.OR, arguments=[Comparison(comparator=Comparator.GT, attribute= 'qty', value=6), Comparison(comparator=Comparator.NIN, a...
def test_visit_structured_query_deep_nesting() ->None: query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='name', value='foo'), Operation(operator= Operator.OR, arguments=[Comparison(comparator=Comparator.GT, ...
null
test_embeddings_redundant_filter
texts = ['What happened to all of my cookies?', 'Where did all of my cookies go?', 'I wish there were better Italian restaurants in my neighborhood.'] docs = [Document(page_content=t) for t in texts] embeddings = OpenAIEmbeddings() redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) actual = red...
def test_embeddings_redundant_filter() ->None: texts = ['What happened to all of my cookies?', 'Where did all of my cookies go?', 'I wish there were better Italian restaurants in my neighborhood.'] docs = [Document(page_content=t) for t in texts] embeddings = OpenAIEmbeddings() redundant...
null
test_clear
""" Test cleanup of data in the store """ self.vectorstore.clear() assert self.vectorstore.count() == 0
def test_clear(self) ->None: """ Test cleanup of data in the store """ self.vectorstore.clear() assert self.vectorstore.count() == 0
Test cleanup of data in the store
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'prompt']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'prompt']
Get the namespace of the langchain object.
_prepare_params
stop_sequences = stop or self.stop params_mapping = {'n': 'candidate_count'} params = {params_mapping.get(k, k): v for k, v in kwargs.items()} params = {**self._default_params, 'stop_sequences': stop_sequences, **params} if stream or self.streaming: params.pop('candidate_count') return params
def _prepare_params(self, stop: Optional[List[str]]=None, stream: bool= False, **kwargs: Any) ->dict: stop_sequences = stop or self.stop params_mapping = {'n': 'candidate_count'} params = {params_mapping.get(k, k): v for k, v in kwargs.items()} params = {**self._default_params, 'stop_sequences': sto...
null
test_api_key_is_string
llm = PipelineAI(pipeline_api_key='secret-api-key') assert isinstance(llm.pipeline_api_key, SecretStr)
def test_api_key_is_string() ->None: llm = PipelineAI(pipeline_api_key='secret-api-key') assert isinstance(llm.pipeline_api_key, SecretStr)
null
embed_documents
text_features = [] for text in texts: tokenized_text = self.tokenizer(text) embeddings_tensor = self.model.encode_text(tokenized_text) norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True) normalized_embeddings_tensor = embeddings_tensor.div(norm) embeddings_list = normalized_embeddings_tensor.squ...
def embed_documents(self, texts: List[str]) ->List[List[float]]: text_features = [] for text in texts: tokenized_text = self.tokenizer(text) embeddings_tensor = self.model.encode_text(tokenized_text) norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True) normalized_embeddings_te...
null
test_chroma_update_document
"""Test the update_document function in the Chroma class.""" embedding = ConsistentFakeEmbeddings() initial_content = 'foo' document_id = 'doc1' original_doc = Document(page_content=initial_content, metadata={'page': '0'}) docsearch = Chroma.from_documents(collection_name='test_collection', documents=[original_doc]...
def test_chroma_update_document() ->None: """Test the update_document function in the Chroma class.""" embedding = ConsistentFakeEmbeddings() initial_content = 'foo' document_id = 'doc1' original_doc = Document(page_content=initial_content, metadata={'page': '0'}) docsearch = Chroma.from...
Test the update_document function in the Chroma class.
test_minimax_call
"""Test valid call to minimax.""" llm = Minimax(max_tokens=10) output = llm('Hello world!') assert isinstance(output, str)
def test_minimax_call() ->None: """Test valid call to minimax.""" llm = Minimax(max_tokens=10) output = llm('Hello world!') assert isinstance(output, str)
Test valid call to minimax.
input_keys
"""Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys
@property def input_keys(self) ->List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys
Input keys for Hyde's LLM chain.
_import_clarifai
from langchain_community.vectorstores.clarifai import Clarifai return Clarifai
def _import_clarifai() ->Any: from langchain_community.vectorstores.clarifai import Clarifai return Clarifai
null
OutputType
for cls in self.__class__.__orig_bases__: type_args = get_args(cls) if type_args and len(type_args) == 1: return type_args[0] raise TypeError( f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. Override the OutputType property to specify the output type." )
@property def OutputType(self) ->Type[T]: for cls in self.__class__.__orig_bases__: type_args = get_args(cls) if type_args and len(type_args) == 1: return type_args[0] raise TypeError( f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. Override the Out...
null
_load_refine_chain
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) _refine_llm = refine_llm or llm refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) return RefineDocumentsChain(initial_llm_chain=initial_chain, refine_llm_chain=refine_chain, document_variable_name= document_...
def _load_refine_chain(llm: BaseLanguageModel, question_prompt: BasePromptTemplate=refine_prompts.DEFAULT_TEXT_QA_PROMPT, refine_prompt: BasePromptTemplate=refine_prompts.DEFAULT_REFINE_PROMPT, document_prompt: BasePromptTemplate=refine_prompts.EXAMPLE_PROMPT, document_variable_name: str='context_str', ...
null
test_missing_apikey_raises_validation_error
with self.assertRaises(ValueError) as cm: RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader.global_id ) e = cm.exception self.assertRegex(str(e), 'Did not find api_key')
def test_missing_apikey_raises_validation_error(self) ->None: with self.assertRaises(ValueError) as cm: RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader. global_id) e = cm.exception self.assertRegex(str(e), 'Did not find api_key')
null
input_keys
"""Return the input keys. :meta private: """
@property @abstractmethod def input_keys(self) ->List[str]: """Return the input keys. :meta private: """
Return the input keys. :meta private:
from_browser
"""Instantiate the toolkit.""" lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
@classmethod def from_browser(cls, sync_browser: Optional[SyncBrowser]=None, async_browser: Optional[AsyncBrowser]=None) ->PlayWrightBrowserToolkit: """Instantiate the toolkit.""" lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
Instantiate the toolkit.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
on_tool_start
"""Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start', 'input_str': input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_recor...
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start', 'input_str': input_str}) resp.update(flat...
Run when tool starts running.
score
return self.sum / len(self.queue) if len(self.queue) > 0 else 0
@property def score(self) ->float: return self.sum / len(self.queue) if len(self.queue) > 0 else 0
null
from_texts
"""Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collecti...
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str='LangChainCollection', connection_args: Optional[Dict[str, Any]]=None, consistency_level: str= 'Session', index_params: Optional[dict]=None, search_params: Optional[ dict]...
Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. De...
test_initialization
"""Test embedding model initialization.""" TogetherEmbeddings(model='togethercomputer/m2-bert-80M-8k-retrieval')
def test_initialization() ->None: """Test embedding model initialization.""" TogetherEmbeddings(model='togethercomputer/m2-bert-80M-8k-retrieval')
Test embedding model initialization.
set_handlers
"""Set handlers as the only handlers on the callback manager.""" self.handlers = [] self.inheritable_handlers = [] for handler in handlers: self.add_handler(handler, inherit=inherit)
def set_handlers(self, handlers: List[BaseCallbackHandler], inherit: bool=True ) ->None: """Set handlers as the only handlers on the callback manager.""" self.handlers = [] self.inheritable_handlers = [] for handler in handlers: self.add_handler(handler, inherit=inherit)
Set handlers as the only handlers on the callback manager.
_get_document_for_channel
try: from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled except ImportError: raise ImportError( 'You must run`pip install --upgrade youtube-transcript-api` to use the youtube loader' ) channel_id = self._get_channel_id(channel) request = self.youtube_client.search().list(pa...
def _get_document_for_channel(self, channel: str, **kwargs: Any) ->List[ Document]: try: from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled except ImportError: raise ImportError( 'You must run`pip install --upgrade youtube-transcript-api` to use the youtube...
null
test_sentence_transformers_split_text
splitter = SentenceTransformersTokenTextSplitter(model_name= 'sentence-transformers/paraphrase-albert-small-v2') text = 'lorem ipsum' text_chunks = splitter.split_text(text=text) expected_text_chunks = [text] assert expected_text_chunks == text_chunks
def test_sentence_transformers_split_text() ->None: splitter = SentenceTransformersTokenTextSplitter(model_name= 'sentence-transformers/paraphrase-albert-small-v2') text = 'lorem ipsum' text_chunks = splitter.split_text(text=text) expected_text_chunks = [text] assert expected_text_chunks == ...
null
similarity_search_with_score
"""Return Dingo documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_params: Dictionary of argument(s) to filter on metadata Returns: List of Docume...
def similarity_search_with_score(self, query: str, k: int=4, search_params: Optional[dict]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[ Tuple[Document, float]]: """Return Dingo documents most similar to query, along with scores. Args: query: Text to look up documents simila...
Return Dingo documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_params: Dictionary of argument(s) to filter on metadata Returns: List of Documents most similar to the query and score for each
exists
"""Check if the given keys exist in the SQLite database.""" with self._make_session() as session: records = session.query(UpsertionRecord.key).filter(and_( UpsertionRecord.key.in_(keys), UpsertionRecord.namespace == self. namespace)).all() found_keys = set(r.key for r in records) return [(k in found...
def exists(self, keys: Sequence[str]) ->List[bool]: """Check if the given keys exist in the SQLite database.""" with self._make_session() as session: records = session.query(UpsertionRecord.key).filter(and_( UpsertionRecord.key.in_(keys), UpsertionRecord.namespace == self.namespa...
Check if the given keys exist in the SQLite database.
_import_fake
from langchain_community.llms.fake import FakeListLLM return FakeListLLM
def _import_fake() ->Any: from langchain_community.llms.fake import FakeListLLM return FakeListLLM
null
parse
"""Parse the output text. Args: text (str): The output text to parse. Returns: Dict: The parsed output. Raises: ValueError: If the verdict is invalid. """ match = _FIND_DOUBLE_BRACKETS.search(text) if match: verdict = match.group(1) if not matc...
def parse(self, text: str) ->Dict[str, Any]: """Parse the output text. Args: text (str): The output text to parse. Returns: Dict: The parsed output. Raises: ValueError: If the verdict is invalid. """ match = _FIND_DOUBLE_BRACKETS.search(tex...
Parse the output text. Args: text (str): The output text to parse. Returns: Dict: The parsed output. Raises: ValueError: If the verdict is invalid.
__init__
super().__init__() self.flags = flags
def __init__(self, *, flags: int=0, **kwargs: Any): super().__init__() self.flags = flags
null
from_llm
question_to_checked_assertions_chain = ( _load_question_to_checked_assertions_chain(llm, create_draft_answer_prompt, list_assertions_prompt, check_assertions_prompt, revised_answer_prompt)) return cls(question_to_checked_assertions_chain= question_to_checked_assertions_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, create_draft_answer_prompt: PromptTemplate=CREATE_DRAFT_ANSWER_PROMPT, list_assertions_prompt: PromptTemplate=LIST_ASSERTIONS_PROMPT, check_assertions_prompt: PromptTemplate=CHECK_ASSERTIONS_PROMPT, revised_answer_prompt: PromptTemplate=REVISED_ANSW...
null
_get_default_output_parser
"""Get default output parser for this class."""
@classmethod @abstractmethod def _get_default_output_parser(cls, **kwargs: Any) ->AgentOutputParser: """Get default output parser for this class."""
Get default output parser for this class.
fake_retriever_v1_with_kwargs
with pytest.warns(DeprecationWarning, match= 'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`' ): class FakeRetrieverV1(BaseRetriever): def get_relevant_documents(self, query: str, where_filter: Optional [Dict[str, object]]=None)...
@pytest.fixture def fake_retriever_v1_with_kwargs() ->BaseRetriever: with pytest.warns(DeprecationWarning, match= 'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`' ): class FakeRetrieverV1(BaseRetriever): def get_relevant...
null
messages
"""Return the messages that correspond to this action.""" return _convert_agent_action_to_messages(self)
@property def messages(self) ->Sequence[BaseMessage]: """Return the messages that correspond to this action.""" return _convert_agent_action_to_messages(self)
Return the messages that correspond to this action.
finish
"""Waits for all asynchronous processes to finish and data to upload. Proxy for `wandb.finish()`. """ self._wandb.finish()
def finish(self) ->None: """Waits for all asynchronous processes to finish and data to upload. Proxy for `wandb.finish()`. """ self._wandb.finish()
Waits for all asynchronous processes to finish and data to upload. Proxy for `wandb.finish()`.
escape_symbol
value = match.group(0) return f'\\{value}'
def escape_symbol(match: re.Match) ->str: value = match.group(0) return f'\\{value}'
null
_create_session_analysis_df
"""Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = on_llm_start_records_df[['step', 'prompts', 'name'] ].dropna(axis=1).rename(...
def _create_session_analysis_df(self) ->Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records) on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = on_llm_start_re...
Create a dataframe with all the information from the session.
_create_subset_model
"""Create a pydantic model with only a subset of model's fields.""" fields = {} for field_name in field_names: field = model.__fields__[field_name] fields[field_name] = field.outer_type_, field.field_info return create_model(name, **fields)
def _create_subset_model(name: str, model: BaseModel, field_names: list ) ->Type[BaseModel]: """Create a pydantic model with only a subset of model's fields.""" fields = {} for field_name in field_names: field = model.__fields__[field_name] fields[field_name] = field.outer_type_, field.f...
Create a pydantic model with only a subset of model's fields.
test_bagel
"""Test from_texts""" texts = ['hello bagel', 'hello langchain'] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts) output = txt_search.similarity_search('hello bagel', k=1) assert output == [Document(page_content='hello bagel')] txt_search.delete_cluster()
def test_bagel() ->None: """Test from_texts""" texts = ['hello bagel', 'hello langchain'] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts) output = txt_search.similarity_search('hello bagel', k=1) assert output == [Document(page_content='hello bagel')] txt_search.delete_cluster...
Test from_texts
_evaluate_strings
"""Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): the LLM or chain prediction to evaluate. reference (Optional[str], optional): the reference label to evaluate against. input (Optional[str], optional): the input to co...
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]= None, input: Optional[str]=None, callbacks: Callbacks=None, include_run_info: bool=False, **kwargs: Any) ->dict: """Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): the LLM ...
Evaluate Chain or LLM output, based on optional input and label. Args: prediction (str): the LLM or chain prediction to evaluate. reference (Optional[str], optional): the reference label to evaluate against. input (Optional[str], optional): the input to consider during evaluation callbacks (Cal...
load
"""Load given path as pages.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load given path as pages.""" return list(self.lazy_load())
Load given path as pages.
dict
"""Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) try: prompt_dict['_type'] = self._prompt_type except NotImplementedError: pass return prompt_dict
def dict(self, **kwargs: Any) ->Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) try: prompt_dict['_type'] = self._prompt_type except NotImplementedError: pass return prompt_dict
Return dictionary representation of prompt.
_run
"""Use the Steam-WebAPI tool.""" return self.api_wrapper.run(self.mode, query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the Steam-WebAPI tool.""" return self.api_wrapper.run(self.mode, query)
Use the Steam-WebAPI tool.
similarity_search_with_score
"""Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. any other querying variable in VectaraQueryConfig like: - lambda_val: lexical match paramete...
def similarity_search_with_score(self, query: str, **kwargs: Any) ->List[Tuple [Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. ...
Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. any other querying variable in VectaraQueryConfig like: - lambda_val: lexical match parameter for hybrid search. - filter: filter s...
test_load_returns_full_set_of_metadata
"""Test that returns several docs""" api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) docs = api_client.load('ChatGPT') assert len(docs) == 1 for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata).issuperset({'Published', 'Title', 'Authors', ...
def test_load_returns_full_set_of_metadata() ->None: """Test that returns several docs""" api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) docs = api_client.load('ChatGPT') assert len(docs) == 1 for doc in docs: assert doc.page_content assert doc.metadata ...
Test that returns several docs
is_lc_serializable
"""Return whether this model can be serialized by Langchain.""" return True
@classmethod def is_lc_serializable(cls) ->bool: """Return whether this model can be serialized by Langchain.""" return True
Return whether this model can be serialized by Langchain.
__init__
try: from xinference.client import RESTfulClient except ImportError as e: raise ImportError( 'Could not import RESTfulClient from xinference. Please install it with `pip install xinference`.' ) from e model_kwargs = model_kwargs or {} super().__init__(**{'server_url': server_url, 'model_uid': mo...
def __init__(self, server_url: Optional[str]=None, model_uid: Optional[str] =None, **model_kwargs: Any): try: from xinference.client import RESTfulClient except ImportError as e: raise ImportError( 'Could not import RESTfulClient from xinference. Please install it with `pip insta...
null
similarity_search_with_score
""" Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 3. lambda_val: lexical match parameter for hybrid search. where: the where clause in sele...
def similarity_search_with_score(self, query: str, k: int=3, fetch_k: int=- 1, where: Optional[str]=None, args: Optional[str]=None, metadatas: Optional[List[str]]=None, **kwargs: Any) ->List[Tuple[Document, float]]: """ Return Jaguar documents most similar to query, along with scores. Args: ...
Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 3. lambda_val: lexical match parameter for hybrid search. where: the where clause in select similarity. For example a where can be "r...
add_message
"""Append the message to the Zep memory history""" from zep_python import Memory, Message zep_message = Message(content=message.content, role=message.type, metadata= metadata) zep_memory = Memory(messages=[zep_message]) self.zep_client.memory.add_memory(self.session_id, zep_memory)
def add_message(self, message: BaseMessage, metadata: Optional[Dict[str, Any]]=None) ->None: """Append the message to the Zep memory history""" from zep_python import Memory, Message zep_message = Message(content=message.content, role=message.type, metadata=metadata) zep_memory = Memory(mess...
Append the message to the Zep memory history
_get_num_tokens
return _get_language_model(self.llm).get_num_tokens(text)
def _get_num_tokens(self, text: str) ->int: return _get_language_model(self.llm).get_num_tokens(text)
null
_bing_search_results
headers = {'Ocp-Apim-Subscription-Key': self.bing_subscription_key} params = {'q': search_term, 'count': count, 'textDecorations': True, 'textFormat': 'HTML', **self.search_kwargs} response = requests.get(self.bing_search_url, headers=headers, params=params) response.raise_for_status() search_results = response.jso...
def _bing_search_results(self, search_term: str, count: int) ->List[dict]: headers = {'Ocp-Apim-Subscription-Key': self.bing_subscription_key} params = {'q': search_term, 'count': count, 'textDecorations': True, 'textFormat': 'HTML', **self.search_kwargs} response = requests.get(self.bing_search_url...
null
test_sitemap_block_does_not_exists
"""Test sitemap loader.""" loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize=1000000, blocknum=15) with pytest.raises(ValueError, match= 'Selected sitemap does not contain enough blocks for given blocknum'): loader.load()
def test_sitemap_block_does_not_exists() ->None: """Test sitemap loader.""" loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize=1000000, blocknum=15) with pytest.raises(ValueError, match= 'Selected sitemap does not contain enough blocks for given blocknum'): ...
Test sitemap loader.
predict_and_parse
"""Call predict and then parse the results.""" warnings.warn( 'The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.' ) result = self.predict(callbacks=callbacks, **kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse_folder(resul...
def predict_and_parse(self, callbacks: Callbacks=None, **kwargs: Any) ->Union[ str, List[str], Dict[str, Any]]: """Call predict and then parse the results.""" warnings.warn( 'The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.' ) result = self....
Call predict and then parse the results.
__deepcopy__
return self
def __deepcopy__(self, memo: dict) ->'FakeAsyncCallbackHandler': return self
null
load
result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f'https://{self.blockchainType}.g.alchemy.com/nft/v2/{self.api_key}/getNFTsForCollection?withMetadata=True&contractAddress={self.contract_address}&startToken={current_start_token}' ) response = request...
def load(self) ->List[Document]: result = [] current_start_token = self.startToken start_time = time.time() while True: url = ( f'https://{self.blockchainType}.g.alchemy.com/nft/v2/{self.api_key}/getNFTsForCollection?withMetadata=True&contractAddress={self.contract_address}&startToke...
null
_get_resource
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint)
def _get_resource(self) ->List[Document]: endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint)
null
_on_llm_new_token
"""Process new LLM token."""
def _on_llm_new_token(self, run: Run, token: str, chunk: Optional[Union[ GenerationChunk, ChatGenerationChunk]]) ->None: """Process new LLM token."""
Process new LLM token.
add_message
"""Add a message to the chat session in Elasticsearch""" try: from elasticsearch import ApiError self.client.index(index=self.index, document={'session_id': self. session_id, 'created_at': round(time() * 1000), 'history': json. dumps(message_to_dict(message), ensure_ascii=self.ensure_ascii)}, ...
def add_message(self, message: BaseMessage) ->None: """Add a message to the chat session in Elasticsearch""" try: from elasticsearch import ApiError self.client.index(index=self.index, document={'session_id': self. session_id, 'created_at': round(time() * 1000), 'history': json ...
Add a message to the chat session in Elasticsearch
test__filter_similar_embeddings_empty
assert len(_filter_similar_embeddings([], cosine_similarity, 0.0)) == 0
def test__filter_similar_embeddings_empty() ->None: assert len(_filter_similar_embeddings([], cosine_similarity, 0.0)) == 0
null
_get_tool_return
"""Check if the tool is a returning tool.""" agent_action, observation = next_step_output name_to_tool_map = {tool.name: tool for tool in self.tools} return_value_key = 'output' if len(self.agent.return_values) > 0: return_value_key = self.agent.return_values[0] if agent_action.tool in name_to_tool_map: if name...
def _get_tool_return(self, next_step_output: Tuple[AgentAction, str] ) ->Optional[AgentFinish]: """Check if the tool is a returning tool.""" agent_action, observation = next_step_output name_to_tool_map = {tool.name: tool for tool in self.tools} return_value_key = 'output' if len(self.agent.retu...
Check if the tool is a returning tool.
on_chain_error_common
self.errors += 1
def on_chain_error_common(self) ->None: self.errors += 1
null
_import_powerbi_tool_QueryPowerBITool
from langchain_community.tools.powerbi.tool import QueryPowerBITool return QueryPowerBITool
def _import_powerbi_tool_QueryPowerBITool() ->Any: from langchain_community.tools.powerbi.tool import QueryPowerBITool return QueryPowerBITool
null
output_keys
"""The keys to extract from the run.""" return ['reference']
@property def output_keys(self) ->List[str]: """The keys to extract from the run.""" return ['reference']
The keys to extract from the run.
test_faiss_similarity_search_with_relevance_scores_with_threshold
"""Test the similarity search with normalized similarities with score threshold.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2)) outputs = docsearch.similarity_search_with_relevance_scores('foo', k=2, score_thres...
@pytest.mark.requires('faiss') def test_faiss_similarity_search_with_relevance_scores_with_threshold() ->None: """Test the similarity search with normalized similarities with score threshold.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=la...
Test the similarity search with normalized similarities with score threshold.
_construct_result
if self.return_intermediate_steps: extra_return_dict = {'intermediate_steps': refine_steps} else: extra_return_dict = {} return res, extra_return_dict
def _construct_result(self, refine_steps: List[str], res: str) ->Tuple[str, dict]: if self.return_intermediate_steps: extra_return_dict = {'intermediate_steps': refine_steps} else: extra_return_dict = {} return res, extra_return_dict
null
append_copy
"""Append a copy of another MutableExpander's children to this MutableExpander. """ other_records = other._child_records.copy() for record in other_records: self._create_child(record.type, record.kwargs)
def append_copy(self, other: MutableExpander) ->None: """Append a copy of another MutableExpander's children to this MutableExpander. """ other_records = other._child_records.copy() for record in other_records: self._create_child(record.type, record.kwargs)
Append a copy of another MutableExpander's children to this MutableExpander.
test_singlestoredb_filter_metadata_7
"""Test filtering by float""" table_name = 'test_singlestoredb_filter_metadata_7' drop(table_name) docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget', 'score': i + 0.5}) for i, t in enumerate(texts)] docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(), distance_strategy=Dist...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_filter_metadata_7(texts: List[str]) ->None: """Test filtering by float""" table_name = 'test_singlestoredb_filter_metadata_7' drop(table_name) docs = [Document(page_content=t, metadata={'in...
Test filtering by float
get_value_text
return self.Value.TextWithHighlightsValue.Text
def get_value_text(self) ->str: return self.Value.TextWithHighlightsValue.Text
null
__init__
"""Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine)
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache]= FullLLMCache): """Initialize by creating all tables.""" self.engine = engine self.cache_schema = cache_schema self.cache_schema.metadata.create_all(self.engine)
Initialize by creating all tables.
test_visit_operation
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator= Comparator.LT, attribute='abc', value=['1', '2'])]) expected = {'$and': [{'foo': {'$lt': 2}}, {'bar': ...
def test_visit_operation() ->None: op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator =Comparator.LT, attribute='abc', value=['1', '2'])])...
null
test_singlestoredb_add_texts_to_existing
"""Test adding a new document""" table_name = 'test_singlestoredb_add_texts_to_existing' drop(table_name) SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name= table_name, host=TEST_SINGLESTOREDB_URL) docsearch = SingleStoreDB(NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLE...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_add_texts_to_existing(texts: List[str]) ->None: """Test adding a new document""" table_name = 'test_singlestoredb_add_texts_to_existing' drop(table_name) SingleStoreDB.from_texts(texts, Nor...
Test adding a new document
_raise_functions_not_supported
raise ValueError( 'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io' )
@staticmethod def _raise_functions_not_supported() ->None: raise ValueError( 'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io' )
null
test_milvus
"""Test end to end construction and search.""" docsearch = _milvus_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_milvus() ->None: """Test end to end construction and search.""" docsearch = _milvus_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
add_vector_field
if self.vector is None: self.vector = [] if vector_field['algorithm'] == 'FLAT': self.vector.append(FlatVectorField(**vector_field)) elif vector_field['algorithm'] == 'HNSW': self.vector.append(HNSWVectorField(**vector_field)) else: raise ValueError( f"algorithm must be either FLAT or HNSW. Got ...
def add_vector_field(self, vector_field: Dict[str, Any]) ->None: if self.vector is None: self.vector = [] if vector_field['algorithm'] == 'FLAT': self.vector.append(FlatVectorField(**vector_field)) elif vector_field['algorithm'] == 'HNSW': self.vector.append(HNSWVectorField(**vector_...
null
_make_iterator
"""Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise ImportError( 'You must install tqdm to use show_progress=True.You can install tqdm with `pip install tqdm`.' ) def _with_tqd...
def _make_iterator(length_func: Callable[[], int], show_progress: bool=False ) ->Callable[[Iterable[T]], Iterator[T]]: """Create a function that optionally wraps an iterable in tqdm.""" if show_progress: try: from tqdm.auto import tqdm except ImportError: raise Import...
Create a function that optionally wraps an iterable in tqdm.
save_context
"""Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limi...
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None: """Save context from this conversation to buffer. Pruned.""" super().save_context(inputs, outputs) buffer = self.chat_memory.messages curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer) if curr_buffer_leng...
Save context from this conversation to buffer. Pruned.
__init__
self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields
def __init__(self, query: str, page_content_field: str, secret: str, metadata_fields: Optional[Sequence[str]]=None): self.query = query self.page_content_field = page_content_field self.secret = secret self.metadata_fields = metadata_fields
null
convert_to_base64
""" Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string """ buffered = BytesIO() pil_image.save(buffered, format='JPEG') img_str = base64.b64encode(buffered.getvalue()).decode('utf-8') return img_str
def convert_to_base64(pil_image): """ Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string """ buffered = BytesIO() pil_image.save(buffered, format='JPEG') img_str = base64.b64encode(buffered.getvalue()).decode('utf-8') return img_...
Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string
output_keys
"""The checker output keys. :meta private: """ return [self.output_key]
@property def output_keys(self) ->List[str]: """The checker output keys. :meta private: """ return [self.output_key]
The checker output keys. :meta private:
test_math_question_1
"""Test simple question.""" question = """Olivia has $23. She bought five bagels for $3 each. How much money does she have left?""" prompt = MATH_PROMPT.format(question=question) queries = {prompt: _MATH_SOLUTION_1} fake_llm = FakeLLM(queries=queries) fake_pal_chain = PALChain.from_math_prompt(fake_llm...
def test_math_question_1() ->None: """Test simple question.""" question = """Olivia has $23. She bought five bagels for $3 each. How much money does she have left?""" prompt = MATH_PROMPT.format(question=question) queries = {prompt: _MATH_SOLUTION_1} fake_llm = FakeLLM(queries=queri...
Test simple question.
_split_text_with_regex
if separator: if keep_separator: _splits = re.split(f'({separator})', text) splits = [(_splits[i] + _splits[i + 1]) for i in range(1, len( _splits), 2)] if len(_splits) % 2 == 0: splits += _splits[-1:] splits = [_splits[0]] + splits else: splits = ...
def _split_text_with_regex(text: str, separator: str, keep_separator: bool ) ->List[str]: if separator: if keep_separator: _splits = re.split(f'({separator})', text) splits = [(_splits[i] + _splits[i + 1]) for i in range(1, len( _splits), 2)] if len(_s...
null
create_client
values['store'] = Zilliz(values['embedding_function'], values[ 'collection_name'], values['connection_args'], values['consistency_level']) values['retriever'] = values['store'].as_retriever(search_kwargs={'param': values['search_params']}) return values
@root_validator(pre=True) def create_client(cls, values: dict) ->dict: values['store'] = Zilliz(values['embedding_function'], values[ 'collection_name'], values['connection_args'], values[ 'consistency_level']) values['retriever'] = values['store'].as_retriever(search_kwargs={ 'param': v...
null
_post_process_elements
"""Applies post processing functions to extracted unstructured elements. Post processing functions are str -> str callables are passed in using the post_processors kwarg when the loader is instantiated.""" for element in elements: for post_processor in self.post_processors: element.apply(pos...
def _post_process_elements(self, elements: list) ->list: """Applies post processing functions to extracted unstructured elements. Post processing functions are str -> str callables are passed in using the post_processors kwarg when the loader is instantiated.""" for element in elements: ...
Applies post processing functions to extracted unstructured elements. Post processing functions are str -> str callables are passed in using the post_processors kwarg when the loader is instantiated.
_run
"""Use the Semantic Scholar tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the Semantic Scholar tool.""" return self.api_wrapper.run(query)
Use the Semantic Scholar tool.
_Constant
value = t.value if isinstance(value, tuple): self.write('(') if len(value) == 1: self._write_constant(value[0]) self.write(',') else: interleave(lambda : self.write(', '), self._write_constant, value) self.write(')') elif value is ...: self.write('...') else: if t.kind ==...
def _Constant(self, t): value = t.value if isinstance(value, tuple): self.write('(') if len(value) == 1: self._write_constant(value[0]) self.write(',') else: interleave(lambda : self.write(', '), self._write_constant, value) self.write(')') ...
null