method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_create_bash_persistent
"""Test the pexpect persistent bash terminal""" session = BashProcess(persistent=True) response = session.run('echo hello') response += session.run('echo world') assert 'hello' in response assert 'world' in response
@pytest.mark.skip(reason='flaky on GHA, TODO to fix') @pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') def test_create_bash_persistent() ->None: """Test the pexpect persistent bash terminal""" session = BashProcess(persistent=True) response = session.run('echo...
Test the pexpect persistent bash terminal
visit_structured_query
if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[ str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
null
validate_environment
"""Validate that api key and endpoint exists in environment.""" metaphor_api_key = get_from_dict_or_env(values, 'metaphor_api_key', 'METAPHOR_API_KEY') values['metaphor_api_key'] = metaphor_api_key return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and endpoint exists in environment.""" metaphor_api_key = get_from_dict_or_env(values, 'metaphor_api_key', 'METAPHOR_API_KEY') values['metaphor_api_key'] = metaphor_api_key return values
Validate that api key and endpoint exists in environment.
embed_query
"""Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = embed_with_retry(self, input=text, text_type='query', model= self.model)[0]['embedding'] return embedding
def embed_query(self, text: str) ->List[float]: """Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = embed_with_retry(self, input=text, text_type='query', model ...
Call out to DashScope's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text.
import_installed_app_flow
"""Import InstalledAppFlow class. Returns: InstalledAppFlow: InstalledAppFlow class. """ try: from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( 'You need to install google-auth-oauthlib to use this toolkit. Try running pip install --upgrade go...
def import_installed_app_flow() ->InstalledAppFlow: """Import InstalledAppFlow class. Returns: InstalledAppFlow: InstalledAppFlow class. """ try: from google_auth_oauthlib.flow import InstalledAppFlow except ImportError: raise ImportError( 'You need to install go...
Import InstalledAppFlow class. Returns: InstalledAppFlow: InstalledAppFlow class.
text
"""Print a text on ASCII canvas. Args: x (int): x coordinate where the text should start. y (int): y coordinate where the text should start. text (str): string that should be printed. """ for i, char in enumerate(text): self.point(x + i, y, char)
def text(self, x: int, y: int, text: str) ->None: """Print a text on ASCII canvas. Args: x (int): x coordinate where the text should start. y (int): y coordinate where the text should start. text (str): string that should be printed. """ for i, char in enumer...
Print a text on ASCII canvas. Args: x (int): x coordinate where the text should start. y (int): y coordinate where the text should start. text (str): string that should be printed.
_run
"""Use the WolframAlpha tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the WolframAlpha tool.""" return self.api_wrapper.run(query)
Use the WolframAlpha tool.
invoke
if self.func is not None: call_func_with_variable_args(self.func, input, ensure_config(config), **kwargs) return self._call_with_config(identity, input, config)
def invoke(self, input: Other, config: Optional[RunnableConfig]=None, ** kwargs: Any) ->Other: if self.func is not None: call_func_with_variable_args(self.func, input, ensure_config(config ), **kwargs) return self._call_with_config(identity, input, config)
null
gen
yield fake.invoke(input) yield fake.invoke(input * 2) yield fake.invoke(input * 3)
@chain def gen(input: str) ->Iterator[int]: yield fake.invoke(input) yield fake.invoke(input * 2) yield fake.invoke(input * 3)
null
on_chain_start
self.on_chain_start_common()
def on_chain_start(self, *args: Any, **kwargs: Any) ->Any: self.on_chain_start_common()
null
clear
"""Remove the session's messages from the cache. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheDelete delete_response = self.cache_client.delete(self.cache_name, self.key) if isinstance(delete_respo...
def clear(self) ->None: """Remove the session's messages from the cache. Raises: SdkException: Momento service or network error. Exception: Unexpected response. """ from momento.responses import CacheDelete delete_response = self.cache_client.delete(self.cache_name, ...
Remove the session's messages from the cache. Raises: SdkException: Momento service or network error. Exception: Unexpected response.
_prepare_eval_run
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name) dataset = client.read_dataset(dataset_name=dataset_name) examples = list(client.list_examples(dataset_id=dataset.id)) if not examples: raise ValueError(f'Dataset {dataset_name} has no example rows.') try: git_info = get_git_info() if...
def _prepare_eval_run(client: Client, dataset_name: str, llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: str, project_metadata: Optional[Dict[str, Any]]=None, tags: Optional[List[ str]]=None) ->Tuple[MCF, TracerSession, Dataset, List[Example]]: wrapped_model = _wrap_in_chain_factory(llm_or_c...
null
test_multiple_output_keys_error
"""Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=['foo', 'bar']) with pytest.raises(ValueError): chain.run('bar')
def test_multiple_output_keys_error() ->None: """Test run with multiple output keys errors as expected.""" chain = FakeChain(the_output_keys=['foo', 'bar']) with pytest.raises(ValueError): chain.run('bar')
Test run with multiple output keys errors as expected.
_load_graph_cypher_chain
if 'graph' in kwargs: graph = kwargs.pop('graph') else: raise ValueError('`graph` must be present.') if 'cypher_generation_chain' in config: cypher_generation_chain_config = config.pop('cypher_generation_chain') cypher_generation_chain = load_chain_from_config( cypher_generation_chain_config) el...
def _load_graph_cypher_chain(config: dict, **kwargs: Any) ->GraphCypherQAChain: if 'graph' in kwargs: graph = kwargs.pop('graph') else: raise ValueError('`graph` must be present.') if 'cypher_generation_chain' in config: cypher_generation_chain_config = config.pop('cypher_generation_...
null
fetch_space_id
"""Fetch the space id.""" url = f'{DEFAULT_URL}/team/{team_id}/space' data = fetch_data(url, access_token, query={'archived': 'false'}) return fetch_first_id(data, 'spaces')
def fetch_space_id(team_id: int, access_token: str) ->Optional[int]: """Fetch the space id.""" url = f'{DEFAULT_URL}/team/{team_id}/space' data = fetch_data(url, access_token, query={'archived': 'false'}) return fetch_first_id(data, 'spaces')
Fetch the space id.
run
"""Run query through GoogleSearch and parse result.""" snippets = [] results = self._google_search_results(query, num=self.k) if len(results) == 0: return 'No good Google Search Result was found' for result in results: if 'snippet' in result: snippets.append(result['snippet']) return ' '.join(snippets)
def run(self, query: str) ->str: """Run query through GoogleSearch and parse result.""" snippets = [] results = self._google_search_results(query, num=self.k) if len(results) == 0: return 'No good Google Search Result was found' for result in results: if 'snippet' in result: ...
Run query through GoogleSearch and parse result.
test_few_shot_functionality
"""Test that few shot works with examples.""" prefix = 'This is a test about {content}.' suffix = 'Now you try to talk about {new_content}.' examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz', 'answer': 'foo'}] prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix, input_variables=['cont...
def test_few_shot_functionality() ->None: """Test that few shot works with examples.""" prefix = 'This is a test about {content}.' suffix = 'Now you try to talk about {new_content}.' examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz', 'answer': 'foo'}] prompt = FewShotPromp...
Test that few shot works with examples.
_get_prompt_and_tools
try: import pandas as pd pd.set_option('display.max_columns', None) except ImportError: raise ImportError( 'pandas package not found, please install with `pip install pandas`') if include_df_in_prompt is not None and suffix is not None: raise ValueError( 'If suffix is specified, include_...
def _get_prompt_and_tools(df: Any, prefix: Optional[str]=None, suffix: Optional[str]=None, input_variables: Optional[List[str]]=None, include_df_in_prompt: Optional[bool]=True, number_of_head_rows: int=5, extra_tools: Sequence[BaseTool]=()) ->Tuple[BasePromptTemplate, List[ BaseTool]]: try: ...
null
test_chat_ernie_bot
chat = ErnieBotChat() message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_ernie_bot() ->None: chat = ErnieBotChat() message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
test_configure_mappers
sqlalchemy.orm.configure_mappers()
def test_configure_mappers() ->None: sqlalchemy.orm.configure_mappers()
null
_check_all_feedback_passed
runs = list(client.list_runs(project_name=_project_name, execution_order=1)) if not runs: return feedback = list(client.list_feedback(run_ids=[run.id for run in runs])) if not feedback: return assert all([(f.score == 1) for f in feedback])
def _check_all_feedback_passed(_project_name: str, client: Client) ->None: runs = list(client.list_runs(project_name=_project_name, execution_order=1) ) if not runs: return feedback = list(client.list_feedback(run_ids=[run.id for run in runs])) if not feedback: return assert ...
null
test_sitemap_metadata_default
"""Test sitemap loader.""" loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml') documents = loader.load() assert len(documents) > 1 assert 'source' in documents[0].metadata assert 'loc' in documents[0].metadata
def test_sitemap_metadata_default() ->None: """Test sitemap loader.""" loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml') documents = loader.load() assert len(documents) > 1 assert 'source' in documents[0].metadata assert 'loc' in documents[0].metadata
Test sitemap loader.
test_from_texts
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.'] bm25_retriever = BM25Retriever.from_texts(texts=input_texts) assert len(bm25_retriever.docs) == 3 assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
@pytest.mark.requires('rank_bm25') def test_from_texts() ->None: input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.'] bm25_retriever = BM25Retriever.from_texts(texts=input_texts) assert len(bm25_retriever.docs) == 3 assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
null
_import_pinecone
from langchain_community.vectorstores.pinecone import Pinecone return Pinecone
def _import_pinecone() ->Any: from langchain_community.vectorstores.pinecone import Pinecone return Pinecone
null
_import_playwright_ExtractHyperlinksTool
from langchain_community.tools.playwright import ExtractHyperlinksTool return ExtractHyperlinksTool
def _import_playwright_ExtractHyperlinksTool() ->Any: from langchain_community.tools.playwright import ExtractHyperlinksTool return ExtractHyperlinksTool
null
test_parse_string_value
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value assert actual == x[1:-1]
@pytest.mark.parametrize('x', ('""', '" "', '"foo"', "'foo'")) def test_parse_string_value(x: str) ->None: parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value assert actual == x[1:-1]
null
load_local
"""Load FAISS index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file n...
@classmethod def load_local(cls, folder_path: str, embeddings: Embeddings, index_name: str='index', **kwargs: Any) ->FAISS: """Load FAISS index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id fro...
Load FAISS index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name asynchronous: whether to use async ...
test_sequential_bad_outputs
"""Test error is raised when bad outputs are specified.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) with pytest.raises(ValueError): SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'], output_...
def test_sequential_bad_outputs() ->None: """Test error is raised when bad outputs are specified.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) with pytest.raises(ValueError): SequentialChain(chain...
Test error is raised when bad outputs are specified.
test_single_input_correct
"""Test passing single input works.""" chain = FakeChain() output = chain('bar') assert output == {'foo': 'bar', 'bar': 'baz'}
def test_single_input_correct() ->None: """Test passing single input works.""" chain = FakeChain() output = chain('bar') assert output == {'foo': 'bar', 'bar': 'baz'}
Test passing single input works.
get_default_api_token
"""Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if (api_token := os.getenv('DATABRICKS_TOKEN')): return api_token try: api_token = get_repl_context().apiToken if not api_token: raise ValueError("context doesn't contai...
def get_default_api_token() ->str: """Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined. """ if (api_token := os.getenv('DATABRICKS_TOKEN')): return api_token try: api_token = get_repl_context().apiToken if not ...
Gets the default Databricks personal access token. Raises an error if the token cannot be automatically determined.
_similarity_search_with_relevance_scores
return self.similarity_search_with_score(query, k, **kwargs)
def _similarity_search_with_relevance_scores(self, query: str, k: int=4, ** kwargs: Any) ->List[Tuple[Document, float]]: return self.similarity_search_with_score(query, k, **kwargs)
null
test_generativeai_stream
llm = GoogleGenerativeAI(temperature=0, model='gemini-pro') outputs = list(llm.stream('Please say foo:')) assert isinstance(outputs[0], str)
def test_generativeai_stream() ->None: llm = GoogleGenerativeAI(temperature=0, model='gemini-pro') outputs = list(llm.stream('Please say foo:')) assert isinstance(outputs[0], str)
null
lazy_load
"""Load documents lazily with concurrent parsing.""" with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_workers ) as executor: futures = {executor.submit(self.blob_parser.lazy_parse, blob) for blob in self.blob_loader.yield_blobs()} for future in concurrent.futures.as_completed(futures)...
def lazy_load(self) ->Iterator[Document]: """Load documents lazily with concurrent parsing.""" with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_workers ) as executor: futures = {executor.submit(self.blob_parser.lazy_parse, blob) for blob in self.blob_loader.yield_blobs...
Load documents lazily with concurrent parsing.
add_embeddings
"""Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Retu...
def add_embeddings(self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, ** kwargs: Any) ->List[str]: """Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and em...
Add the given texts and embeddings to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the...
_on_retriever_start
"""Process the Retriever Run upon start."""
def _on_retriever_start(self, run: Run) ->None: """Process the Retriever Run upon start."""
Process the Retriever Run upon start.
pending
return [item for idx, item in enumerate(iterable) if idx not in results_map]
def pending(iterable: List[U]) ->List[U]: return [item for idx, item in enumerate(iterable) if idx not in results_map ]
null
_init_resp
return {k: None for k in self.callback_columns}
def _init_resp(self) ->Dict: return {k: None for k in self.callback_columns}
null
embed
payload = {'model': self.model, 'type': embed_type, 'texts': texts} headers = {'Authorization': f'Bearer {self.minimax_api_key.get_secret_value()}', 'Content-Type': 'application/json'} params = {'GroupId': self.minimax_group_id} response = requests.post(self.endpoint_url, params=params, headers=headers, jso...
def embed(self, texts: List[str], embed_type: str) ->List[List[float]]: payload = {'model': self.model, 'type': embed_type, 'texts': texts} headers = {'Authorization': f'Bearer {self.minimax_api_key.get_secret_value()}', 'Content-Type': 'application/json'} params = {'GroupId': self.minimax_g...
null
_import_metaphor_search
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper return MetaphorSearchAPIWrapper
def _import_metaphor_search() ->Any: from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper return MetaphorSearchAPIWrapper
null
model
return Orca(llm=FakeLLM())
@pytest.fixture def model() ->Orca: return Orca(llm=FakeLLM())
null
_run
"""Use the tool.""" if action == 'push': self._check_params(path, text) if path: return self._pushFile(id, path) if text: return self._pushText(id, text) elif action == 'pull': return self._pull(id) return ''
def _run(self, action: str, id: str, path: Optional[str], text: Optional[ str], run_manager: Optional[CallbackManagerForToolRun]=None) ->str: """Use the tool.""" if action == 'push': self._check_params(path, text) if path: return self._pushFile(id, path) if text: ...
Use the tool.
test__convert_dict_to_message_system
message = {'role': 'system', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = SystemMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_system() ->None: message = {'role': 'system', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = SystemMessage(content='foo') assert result == expected_output
null
test_redis_from_texts_return_keys
"""Test from_texts_return_keys constructor.""" docsearch, keys = Redis.from_texts_return_keys(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL) output = docsearch.similarity_search('foo', k=1, return_metadata=False) assert output == TEST_SINGLE_RESULT assert len(keys) == len(texts) assert drop(docsearch.index_name...
def test_redis_from_texts_return_keys(texts: List[str]) ->None: """Test from_texts_return_keys constructor.""" docsearch, keys = Redis.from_texts_return_keys(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL) output = docsearch.similarity_search('foo', k=1, return_metadata=False) assert output =...
Test from_texts_return_keys constructor.
get_folders
""" Get all the folders for the team. """ url = f'{DEFAULT_URL}/team/' + str(self.team_id) + '/space' params = self.get_default_params() response = requests.get(url, headers=self.get_headers(), params=params) return {'response': response}
def get_folders(self) ->Dict: """ Get all the folders for the team. """ url = f'{DEFAULT_URL}/team/' + str(self.team_id) + '/space' params = self.get_default_params() response = requests.get(url, headers=self.get_headers(), params=params) return {'response': response}
Get all the folders for the team.
__init__
"""Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs)
def __init__(self, file: Union[IO, Sequence[IO]], mode: str='single', ** unstructured_kwargs: Any): """Initialize with file path.""" self.file = file super().__init__(mode=mode, **unstructured_kwargs)
Initialize with file path.
test__get_prompts_invalid
with pytest.raises(InputFormatError): _get_prompt(inputs)
@pytest.mark.parametrize('inputs', _INVALID_PROMPTS) def test__get_prompts_invalid(inputs: Dict[str, Any]) ->None: with pytest.raises(InputFormatError): _get_prompt(inputs)
null
test_add_texts
"""Test add_texts dataset.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] deeplake_datastore.add_texts(texts=texts, metadatas=metadatas) with pytest.raises(TypeError): deeplake_datastore.add_texts(texts=texts, metada=metadatas)
def test_add_texts(deeplake_datastore: DeepLake) ->None: """Test add_texts dataset.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] deeplake_datastore.add_texts(texts=texts, metadatas=metadatas) with pytest.raises(TypeError): deeplake_datastore.add_...
Test add_texts dataset.
test_json_distance_evaluator_parse_json
string = '{"a": 1}' result = json_distance_evaluator._parse_json(string) assert result == {'a': 1}
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_parse_json(json_distance_evaluator: JsonEditDistanceEvaluator) ->None: string = '{"a": 1}' result = json_distance_evaluator._parse_json(string) assert result == {'a': 1}
null
max_marginal_relevance_search
raise NotImplementedError
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]: raise NotImplementedError
null
_get_docs
"""Get docs to run questioning over."""
@abstractmethod def _get_docs(self, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun) ->List[Document]: """Get docs to run questioning over."""
Get docs to run questioning over.
test_similarity_search_with_score
"""Test similarity search with score using Approximate Search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL) output = docsearch.similarity_search_with_score('foo', k=2) assert...
def test_similarity_search_with_score() ->None: """Test similarity search with score using Approximate Search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL) ...
Test similarity search with score using Approximate Search.
__init__
pass
def __init__(self, **kwargs: Any): pass
null
_run
return query
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: return query
null
on_tool_start
self._require_current_thought().on_tool_start(serialized, input_str, **kwargs) self._prune_old_thought_containers()
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: self._require_current_thought().on_tool_start(serialized, input_str, ** kwargs) self._prune_old_thought_containers()
null
on_llm_start
"""Save the prompts in memory when an LLM starts.""" if self.input_type != 'Text': raise ValueError( f""" Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To make it work with the mode="chat", the input type should be <Text>. Read more here https://labelstud.io/tags/text""" ...
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], ** kwargs: Any) ->None: """Save the prompts in memory when an LLM starts.""" if self.input_type != 'Text': raise ValueError( f""" Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To ma...
Save the prompts in memory when an LLM starts.
on_agent_finish_common
self.agent_ends += 1 self.ends += 1
def on_agent_finish_common(self) ->None: self.agent_ends += 1 self.ends += 1
null
sorted_values
"""Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)]
def sorted_values(values: Dict[str, str]) ->List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)]
Return a list of values in dict sorted by key.
test_create_documents_with_metadata
"""Test create documents with metadata method.""" texts = ['foo bar', 'baz'] splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{'source': '1'}, {'source': '2'}]) expected_docs = [Document(page_content='foo', metadata={'source': '1'}), Document(pa...
def test_create_documents_with_metadata() ->None: """Test create documents with metadata method.""" texts = ['foo bar', 'baz'] splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0) docs = splitter.create_documents(texts, [{'source': '1'}, {'source': '2'}]) expected_d...
Test create documents with metadata method.
test_functionality
"""Test correct functionality.""" chain = PythonREPL() code = 'print(1 + 1)' output = chain.run(code) assert output == '2\n'
def test_functionality() ->None: """Test correct functionality.""" chain = PythonREPL() code = 'print(1 + 1)' output = chain.run(code) assert output == '2\n'
Test correct functionality.
get_images
""" Extract images. :param img_path: A string representing the path to the images. """ pil_images = [Image.open(os.path.join(img_path, image_name)) for image_name in os.listdir(img_path) if image_name.endswith('.jpg')] return pil_images
def get_images(img_path): """ Extract images. :param img_path: A string representing the path to the images. """ pil_images = [Image.open(os.path.join(img_path, image_name)) for image_name in os.listdir(img_path) if image_name.endswith('.jpg')] return pil_images
Extract images. :param img_path: A string representing the path to the images.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: ...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, text_key: str='text', batch_size: int=500, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to...
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vect...
_get_next_response_in_sequence
queries = cast(Mapping, self.queries) response = queries[list(queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response
@property def _get_next_response_in_sequence(self) ->str: queries = cast(Mapping, self.queries) response = queries[list(queries.keys())[self.response_index]] self.response_index = self.response_index + 1 return response
null
_get
"""Method for getting from the AI Foundation Model Function API.""" last_inputs = {'url': invoke_url, 'headers': self.headers['call'], 'json': payload, 'stream': False} session = self.get_session_fn() last_response = session.get(**last_inputs) self._try_raise(last_response) return last_response, session
def _get(self, invoke_url: str, payload: dict={}) ->Tuple[Response, Any]: """Method for getting from the AI Foundation Model Function API.""" last_inputs = {'url': invoke_url, 'headers': self.headers['call'], 'json': payload, 'stream': False} session = self.get_session_fn() last_response = sessi...
Method for getting from the AI Foundation Model Function API.
_generate_documents_to_add
from zep_python.document import Document as ZepDocument embeddings = None if self._collection and self._collection.is_auto_embedded: if self._embedding is not None: warnings.warn( """The collection is set to auto-embed and an embedding function is present. Ignoring the embedding...
def _generate_documents_to_add(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]]=None, document_ids: Optional[List[str]]=None ) ->List[ZepDocument]: from zep_python.document import Document as ZepDocument embeddings = None if self._collection and self._collection.is_auto_embedded...
null
test_prompt_empty_input_variable
"""Test error is raised when empty string input variable.""" with pytest.raises(ValueError): PromptTemplate(input_variables=[''], template='{}', validate_template=True)
def test_prompt_empty_input_variable() ->None: """Test error is raised when empty string input variable.""" with pytest.raises(ValueError): PromptTemplate(input_variables=[''], template='{}', validate_template=True)
Test error is raised when empty string input variable.
lower_case_name
v = v.lower() return v
@validator('name') def lower_case_name(cls, v: str) ->str: v = v.lower() return v
null
test_candidates
model = ChatVertexAI(model_name='chat-bison@001', temperature=0.3, n=2) message = HumanMessage(content='Hello') response = model.generate(messages=[[message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 1 assert len(response.generations[0]) == 2
@pytest.mark.xfail @pytest.mark.scheduled def test_candidates() ->None: model = ChatVertexAI(model_name='chat-bison@001', temperature=0.3, n=2) message = HumanMessage(content='Hello') response = model.generate(messages=[[message]]) assert isinstance(response, LLMResult) assert len(response.generatio...
null
_import_annoy
from langchain_community.vectorstores.annoy import Annoy return Annoy
def _import_annoy() ->Any: from langchain_community.vectorstores.annoy import Annoy return Annoy
null
_get_tools_requests_post
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() ->BaseTool: return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
null
_identifying_params
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'gradient_api_url': self.gradient_api_url}, **{'model_kwargs': _model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'gradient_api_url': self.gradient_api_url}, **{ 'model_kwargs': _model_kwargs}}
Get the identifying parameters.
test_scann_with_metadatas_and_list_filter
texts = ['foo', 'bar', 'baz', 'foo', 'qux'] metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range(len( texts))] docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata...
def test_scann_with_metadatas_and_list_filter() ->None: texts = ['foo', 'bar', 'baz', 'foo', 'qux'] metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range( len(texts))] docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({...
null
_validate_tools
validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 1: raise ValueError(f'Exactly one tool must be specified, but got {tools}') tool_names = {tool.name for tool in tools} if tool_names != {'Intermediate Answer'}: raise ValueError( f'Tool name should be Interm...
@classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) ->None: validate_tools_single_input(cls.__name__, tools) super()._validate_tools(tools) if len(tools) != 1: raise ValueError(f'Exactly one tool must be specified, but got {tools}' ) tool_names = {tool.name for tool in t...
null
_import_hologres
from langchain_community.vectorstores.hologres import Hologres return Hologres
def _import_hologres() ->Any: from langchain_community.vectorstores.hologres import Hologres return Hologres
null
on_chain_end_common
self.chain_ends += 1 self.ends += 1
def on_chain_end_common(self) ->None: self.chain_ends += 1 self.ends += 1
null
__del__
try: self.session_pool.close() except Exception as e: logger.warning(f'Could not close session pool. Error: {e}')
def __del__(self) ->None: try: self.session_pool.close() except Exception as e: logger.warning(f'Could not close session pool. Error: {e}')
null
test_l2
"""Test Flat L2 distance.""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric='l2_sq') output = docsearch.similarity_search_with_score('far', k=2) _, score = output[1] assert score == 1.0
def test_l2() ->None: """Test Flat L2 distance.""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric='l2_sq') output = docsearch.similarity_search_with_score('far', k=2) _, score = output[1] assert score == 1.0
Test Flat L2 distance.
preview_as_str
"""Same as preview, but returns a stringified version of the JSON for insertting back into an LLM.""" data = self.preview(*args, **kwargs) return json.dumps(data)
def preview_as_str(self, *args, **kwargs) ->str: """Same as preview, but returns a stringified version of the JSON for insertting back into an LLM.""" data = self.preview(*args, **kwargs) return json.dumps(data)
Same as preview, but returns a stringified version of the JSON for insertting back into an LLM.
_run
"""Get the schema for tables in a comma-separated list.""" return self.powerbi.get_table_info(tool_input.split(', '))
def _run(self, tool_input: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Get the schema for tables in a comma-separated list.""" return self.powerbi.get_table_info(tool_input.split(', '))
Get the schema for tables in a comma-separated list.
_create_table
""" Create VectorStore Table Args: dim:dimension of vector fields_list: the field you want to store Return: code,0 for success,1 for failed """ type_dict = {'int': vearch.dataType.INT, 'str': vearch.dataType.STRING} engine_info = {'index_size': 10000, ...
def _create_table(self, dim: int=1024, field_list: List[dict]=[{'field': 'text', 'type': 'str'}, {'field': 'metadata', 'type': 'str'}]) ->int: """ Create VectorStore Table Args: dim:dimension of vector fields_list: the field you want to store Return: c...
Create VectorStore Table Args: dim:dimension of vector fields_list: the field you want to store Return: code,0 for success,1 for failed
_import_timescalevector
from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
def _import_timescalevector() ->Any: from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
null
on_retriever_start
"""Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() handle_event(self.handlers, 'on_retriever_start', 'ignore_retriever', serialized, query, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs) return CallbackManagerForRetrieverRun(...
def on_retriever_start(self, serialized: Dict[str, Any], query: str, run_id: Optional[UUID]=None, parent_run_id: Optional[UUID]=None, **kwargs: Any ) ->CallbackManagerForRetrieverRun: """Run when retriever starts running.""" if run_id is None: run_id = uuid.uuid4() handle_event(self.handlers...
Run when retriever starts running.
test_add_messages
file_chat_message_history.add_user_message('Hello!') file_chat_message_history.add_ai_message('Hi there!') messages = file_chat_message_history.messages assert len(messages) == 2 assert isinstance(messages[0], HumanMessage) assert isinstance(messages[1], AIMessage) assert messages[0].content == 'Hello!' assert messages...
def test_add_messages(file_chat_message_history: FileChatMessageHistory ) ->None: file_chat_message_history.add_user_message('Hello!') file_chat_message_history.add_ai_message('Hi there!') messages = file_chat_message_history.messages assert len(messages) == 2 assert isinstance(messages[0], Huma...
null
_filter_to_metadata
if filter_dict is None: return {} else: return {f'metadata.{mdk}': mdv for mdk, mdv in filter_dict.items()}
@staticmethod def _filter_to_metadata(filter_dict: Optional[Dict[str, str]]) ->Dict[str, Any ]: if filter_dict is None: return {} else: return {f'metadata.{mdk}': mdv for mdk, mdv in filter_dict.items()}
null
test_awadb_with_metadatas_with_scores
"""Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas) output = docsearch.similarity_search_with_score('foo', k=1...
def test_awadb_with_metadatas_with_scores() ->None: """Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts, embedding=FakeEmbeddings(), metadat...
Test end to end construction and scored search.
next_thought
if thoughts_path not in self.tot_memory or not self.tot_memory[thoughts_path]: new_thoughts = self.predict_and_parse(problem_description= problem_description, thoughts=thoughts_path, n=self.c, **kwargs) if not new_thoughts: return '' if isinstance(new_thoughts, list): self.tot_memory...
def next_thought(self, problem_description: str, thoughts_path: Tuple[str, ...]=(), **kwargs: Any) ->str: if thoughts_path not in self.tot_memory or not self.tot_memory[ thoughts_path]: new_thoughts = self.predict_and_parse(problem_description= problem_description, thoughts=thoughts_...
null
parse
last_line = text.split('\n')[-1] if not any([(follow in last_line) for follow in self.followups]): if self.finish_string not in last_line: raise OutputParserException(f'Could not parse output: {text}') return AgentFinish({'output': last_line[len(self.finish_string):]}, text) after_colon = text.split(':'...
def parse(self, text: str) ->Union[AgentAction, AgentFinish]: last_line = text.split('\n')[-1] if not any([(follow in last_line) for follow in self.followups]): if self.finish_string not in last_line: raise OutputParserException(f'Could not parse output: {text}') return AgentFinish({...
null
from_documents
"""Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inferenc...
@classmethod def from_documents(cls, documents: List[Document], embedding: Optional[ Embeddings]=None, **kwargs: Any) ->'BESVectorStore': """Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embeddin...
Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. kwargs: create index key words argum...
_embed_documents
"""Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) ->List[List[float] ]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs)
Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch.
test_multi_vector_retriever_initialization
vectorstore = InMemoryVectorstoreWithSearch() retriever = MultiVectorRetriever(vectorstore=vectorstore, docstore= InMemoryStore(), doc_id='doc_id') documents = [Document(page_content='test document', metadata={'doc_id': '1'})] retriever.vectorstore.add_documents(documents, ids=['1']) retriever.docstore.mset(list(zi...
def test_multi_vector_retriever_initialization() ->None: vectorstore = InMemoryVectorstoreWithSearch() retriever = MultiVectorRetriever(vectorstore=vectorstore, docstore= InMemoryStore(), doc_id='doc_id') documents = [Document(page_content='test document', metadata={'doc_id': '1'})] retr...
null
test_person
p = Person(secret='hello') assert dumps(p, pretty=True) == snapshot sp = SpecialPerson(another_secret='Wooo', secret='Hmm') assert dumps(sp, pretty=True) == snapshot assert Person.lc_id() == ['tests', 'unit_tests', 'load', 'test_dump', 'Person']
def test_person(snapshot: Any) ->None: p = Person(secret='hello') assert dumps(p, pretty=True) == snapshot sp = SpecialPerson(another_secret='Wooo', secret='Hmm') assert dumps(sp, pretty=True) == snapshot assert Person.lc_id() == ['tests', 'unit_tests', 'load', 'test_dump', 'Person']
null
_message_from_dict
_type = message['type'] if _type == 'human': return HumanMessage(**message['data']) elif _type == 'ai': return AIMessage(**message['data']) elif _type == 'system': return SystemMessage(**message['data']) elif _type == 'chat': return ChatMessage(**message['data']) elif _type == 'function': return Fun...
def _message_from_dict(message: dict) ->BaseMessage: _type = message['type'] if _type == 'human': return HumanMessage(**message['data']) elif _type == 'ai': return AIMessage(**message['data']) elif _type == 'system': return SystemMessage(**message['data']) elif _type == 'chat...
null
lazy_load
"""Lazy load records from dataframe.""" crs_str = self.data_frame.crs.to_string() if self.data_frame.crs else None geometry_type = self.data_frame.geometry.geom_type.iloc[0] for _, row in self.data_frame.iterrows(): geom = row[self.page_content_column] xmin, ymin, xmax, ymax = geom.bounds metadata = row.to_...
def lazy_load(self) ->Iterator[Document]: """Lazy load records from dataframe.""" crs_str = self.data_frame.crs.to_string() if self.data_frame.crs else None geometry_type = self.data_frame.geometry.geom_type.iloc[0] for _, row in self.data_frame.iterrows(): geom = row[self.page_content_column] ...
Lazy load records from dataframe.
test_pai_eas_v2_streaming
llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='2.0') generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=['.']) stream_results_string = '' assert isinstance(generator, Generator) for chunk in generator: assert isi...
def test_pai_eas_v2_streaming() ->None: llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='2.0') generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop =['.']) stream_results_string = '' assert is...
null
default_loader_func
return UnstructuredFileLoader(file_path)
def default_loader_func(file_path: str) ->BaseLoader: return UnstructuredFileLoader(file_path)
null
test_get_layer_properties_with_description
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc= 'Custom Description') props = loader._get_layer_properties('Custom Description') assert props['layer_description'] == 'Custom Description'
def test_get_layer_properties_with_description(arcgis_mocks, mock_feature_layer, mock_gis): loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc= 'Custom Description') props = loader._get_layer_properties('Custom Description') assert props['layer_description'] == 'Custom Descri...
null
encode_strip_start_and_stop_token_ids
return self._encode(text)[1:-1]
def encode_strip_start_and_stop_token_ids(text: str) ->List[int]: return self._encode(text)[1:-1]
null
max_marginal_relevance_search_with_score_by_vector
"""Return docs and their similarity scores selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Numb...
def max_marginal_relevance_search_with_score_by_vector(self, embedding: List[float], *, k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Tuple[ Document, float]]: """Return docs and their similarity scores selected using the maximal marginal...
Return docs and their similarity scores selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch...
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. b...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, batch_size: int=16, ttl_seconds: Optional[int]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str...
Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. batch_size (int): Number of concurrent reque...
lc_secrets
"""A map of constructor argument names to secret ids. For example, {"openai_api_key": "OPENAI_API_KEY"} """ return dict()
@property def lc_secrets(self) ->Dict[str, str]: """A map of constructor argument names to secret ids. For example, {"openai_api_key": "OPENAI_API_KEY"} """ return dict()
A map of constructor argument names to secret ids. For example, {"openai_api_key": "OPENAI_API_KEY"}
json
return self.json_data
def json(self) ->Dict: return self.json_data
null