method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
visit_comparison
comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value) attribute = comparison.attribute return '( ' + 'doc.' + attribute + ' ' + comparator + ' ' + processed_value + ' )'
def visit_comparison(self, comparison: Comparison) ->str: comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value) attribute = comparison.attribute return ('( ' + 'doc.' + attribute + ' ' + comparator + ' ' + processed_value + ' )')
null
test_comet_tracer__trace_chain_with_single_span__happyflow
chain_module_mock = mock.Mock() chain_instance_mock = mock.Mock() chain_module_mock.Chain.return_value = chain_instance_mock span_module_mock = mock.Mock() span_instance_mock = mock.MagicMock() span_instance_mock.__api__start__ = mock.Mock() span_instance_mock.__api__end__ = mock.Mock() span_module_mock.Span.return_val...
def test_comet_tracer__trace_chain_with_single_span__happyflow() ->None: chain_module_mock = mock.Mock() chain_instance_mock = mock.Mock() chain_module_mock.Chain.return_value = chain_instance_mock span_module_mock = mock.Mock() span_instance_mock = mock.MagicMock() span_instance_mock.__api__sta...
null
batch
configs = get_config_list(config, len(inputs)) prepared = [self._prepare(c) for c in configs] if all(p is self.default for p, _ in prepared): return self.default.batch(inputs, [c for _, c in prepared], return_exceptions=return_exceptions, **kwargs) if not inputs: return [] def invoke(prepared: Tuple[Run...
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs: Optional[Any]) ->List[Output]: configs = get_config_list(config, len(inputs)) prepared = [self._prepare(c) for c in configs] if all(p is self.default f...
null
lc_secrets
return {'openai_api_key': 'OPENAI_API_KEY'}
@property def lc_secrets(self) ->Dict[str, str]: return {'openai_api_key': 'OPENAI_API_KEY'}
null
input_keys
"""Expect input key. :meta private: """ return [self.instructions_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.instructions_key]
Expect input key. :meta private:
embed_query
"""Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.toli...
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(sel...
Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
process_pages
"""Process a list of pages into a list of documents.""" docs = [] for page in pages: if not include_restricted_content and not self.is_public_page(page): continue doc = self.process_page(page, include_attachments, include_comments, content_format, ocr_languages=ocr_languages, keep_markdown_forma...
def process_pages(self, pages: List[dict], include_restricted_content: bool, include_attachments: bool, include_comments: bool, content_format: ContentFormat, ocr_languages: Optional[str]=None, keep_markdown_format: Optional[bool]=False, keep_newlines: bool=False) ->List[Document]: """Process a list of ...
Process a list of pages into a list of documents.
test_continue_on_failure_false
"""Test exception is raised when continue_on_failure=False.""" loader = RSSFeedLoader(['badurl.foobar'], continue_on_failure=False) with pytest.raises(Exception): loader.load()
@pytest.mark.requires('feedparser', 'newspaper') def test_continue_on_failure_false() ->None: """Test exception is raised when continue_on_failure=False.""" loader = RSSFeedLoader(['badurl.foobar'], continue_on_failure=False) with pytest.raises(Exception): loader.load()
Test exception is raised when continue_on_failure=False.
test_datetime_output_parser_parse
parser = DatetimeOutputParser() date = datetime.now() datestr = date.strftime(parser.format) result = parser.parse_folder(datestr) assert result == date parser.format = '%Y-%m-%dT%H:%M:%S' date = datetime.now() datestr = date.strftime(parser.format) result = parser.parse_folder(datestr) assert result.year == date.year ...
def test_datetime_output_parser_parse() ->None: parser = DatetimeOutputParser() date = datetime.now() datestr = date.strftime(parser.format) result = parser.parse_folder(datestr) assert result == date parser.format = '%Y-%m-%dT%H:%M:%S' date = datetime.now() datestr = date.strftime(parse...
null
texts
return ['foo', 'bar', 'baz']
@pytest.fixture def texts() ->List[str]: return ['foo', 'bar', 'baz']
null
_llm_type
"""Return type of chat model.""" return 'openai-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'openai-chat'
Return type of chat model.
_generate
if self.streaming: stream_iter = self._stream(messages=messages, stop=stop, run_manager= run_manager, **kwargs) return generate_from_stream(stream_iter) res = self._chat(messages, **kwargs) response = res.json() if response.get('code') != 0: raise ValueError(f'Error from Baichuan api response: {resp...
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: if self.streaming: stream_iter = self._stream(messages=messages, stop=stop, run_manager=run_manager, **kwargs) ret...
null
test__convert_message_to_dict_ai
message = AIMessage(content='foo') result = _convert_message_to_dict(message) expected_output = {'role': 'assistant', 'content': 'foo'} assert result == expected_output
def test__convert_message_to_dict_ai() ->None: message = AIMessage(content='foo') result = _convert_message_to_dict(message) expected_output = {'role': 'assistant', 'content': 'foo'} assert result == expected_output
null
test_alibabacloud_opensearch_delete_doc
opensearch = create_alibabacloud_opensearch() delete_result = opensearch.delete_documents_with_texts(['bar']) assert delete_result time.sleep(1) search_result = opensearch.similarity_search(query='bar', search_filter={ 'int_field': 2}, k=1) assert len(search_result) == 0
def test_alibabacloud_opensearch_delete_doc() ->None: opensearch = create_alibabacloud_opensearch() delete_result = opensearch.delete_documents_with_texts(['bar']) assert delete_result time.sleep(1) search_result = opensearch.similarity_search(query='bar', search_filter ={'int_field': 2}, k=...
null
assert_docs
for doc in docs: assert doc.page_content assert doc.metadata main_meta = {'title', 'summary', 'source'} assert set(doc.metadata).issuperset(main_meta) if all_meta: assert len(set(doc.metadata)) > len(main_meta) else: assert len(set(doc.metadata)) == len(main_meta)
def assert_docs(docs: List[Document], all_meta: bool=False) ->None: for doc in docs: assert doc.page_content assert doc.metadata main_meta = {'title', 'summary', 'source'} assert set(doc.metadata).issuperset(main_meta) if all_meta: assert len(set(doc.metadata)) > ...
null
test_python_ast_repl_one_line_print
program = 'print("The square of {} is {:.2f}".format(3, 3**2))' tool = PythonAstREPLTool() assert tool.run(program) == 'The square of 3 is 9.00\n'
@pytest.mark.skipif(sys.version_info < (3, 9), reason= 'Requires python version >= 3.9 to run.') def test_python_ast_repl_one_line_print() ->None: program = 'print("The square of {} is {:.2f}".format(3, 3**2))' tool = PythonAstREPLTool() assert tool.run(program) == 'The square of 3 is 9.00\n'
null
_speech2text
try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass audio_src_type = detect_file_src_type(audio_path) if audio_src_type == 'local': audio_config = speechsdk.AudioConfig(filename=audio_path) elif audio_src_type == 'remote': tmp_audio_path = download_audio_from_url(audio_path)...
def _speech2text(self, audio_path: str, speech_language: str) ->str: try: import azure.cognitiveservices.speech as speechsdk except ImportError: pass audio_src_type = detect_file_src_type(audio_path) if audio_src_type == 'local': audio_config = speechsdk.AudioConfig(filename=audi...
null
test_get_nfts_with_pagination
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c' startToken = ( '0x0000000000000000000000000000000000000000000000000000000000000077') result = BlockchainDocumentLoader(contract_address, BlockchainType. ETH_MAINNET, api_key=apiKey, startToken=startToken).load() print('Tokens returned for contract ...
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.') def test_get_nfts_with_pagination() ->None: contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c' startToken = ( '0x0000000000000000000000000000000000000000000000000000000000000077') result = BlockchainDocumentL...
null
create_client
if values.get('client') is not None: return values try: import boto3 if values.get('credentials_profile_name'): session = boto3.Session(profile_name=values['credentials_profile_name'] ) else: session = boto3.Session() client_params = {} if values.get('region_name'): ...
@root_validator(pre=True) def create_client(cls, values: Dict[str, Any]) ->Dict[str, Any]: if values.get('client') is not None: return values try: import boto3 if values.get('credentials_profile_name'): session = boto3.Session(profile_name=values[ 'credentials...
null
test_run_arg_with_memory
"""Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=['foo', 'baz'], memory=FakeMemory()) chain.run('bar')
def test_run_arg_with_memory() ->None: """Test run method works when arg is passed.""" chain = FakeChain(the_input_keys=['foo', 'baz'], memory=FakeMemory()) chain.run('bar')
Test run method works when arg is passed.
add_ai_message
"""Convenience method for adding an AI message string to the store. Args: message: The AI message to add. """ if isinstance(message, AIMessage): self.add_message(message) else: self.add_message(AIMessage(content=message))
def add_ai_message(self, message: Union[AIMessage, str]) ->None: """Convenience method for adding an AI message string to the store. Args: message: The AI message to add. """ if isinstance(message, AIMessage): self.add_message(message) else: self.add_message(AIMe...
Convenience method for adding an AI message string to the store. Args: message: The AI message to add.
test_add_texts
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) ids = [idx for idx, i in enumerate(fake_texts)] vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts, ids=i...
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_add_texts() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) ids = [idx for idx, i in enumerate(fake_texts)] v...
null
validate_environment
"""Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key', 'AZURE_COGS_KEY') azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region', 'AZURE_COGS_REGION') try: import azure.cognitiveservices.speech as speechsdk values['spe...
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key', 'AZURE_COGS_KEY') azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region', ...
Validate that api key and endpoint exists in environment.
test_pandas_output_parser_row_no_array
try: parser.parse_folder('row:5') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
def test_pandas_output_parser_row_no_array() ->None: try: parser.parse_folder('row:5') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
null
_convert_delta_to_message_chunk
role = _dict.get('role') content = _dict.get('content') or '' additional_kwargs: Dict = {} if _dict.get('function_call'): function_call = dict(_dict['function_call']) if 'name' in function_call and function_call['name'] is None: function_call['name'] = '' additional_kwargs['function_call'] = functio...
def _convert_delta_to_message_chunk(_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]) ->BaseMessageChunk: role = _dict.get('role') content = _dict.get('content') or '' additional_kwargs: Dict = {} if _dict.get('function_call'): function_call = dict(_dict['function_call']) ...
null
_import_reddit_search_RedditSearchRun
from langchain_community.tools.reddit_search.tool import RedditSearchRun return RedditSearchRun
def _import_reddit_search_RedditSearchRun() ->Any: from langchain_community.tools.reddit_search.tool import RedditSearchRun return RedditSearchRun
null
append_to_last_tokens
self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
def append_to_last_tokens(self, token: str) ->None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
null
clear
"""Nothing to clear, got a memory like a vault.""" pass
def clear(self) ->None: """Nothing to clear, got a memory like a vault.""" pass
Nothing to clear, got a memory like a vault.
_import_nlp_engine_provider
try: from presidio_analyzer.nlp_engine import NlpEngineProvider except ImportError as e: raise ImportError( 'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to download a spaCy model to use the analyzer, e.g. `python -m spacy download en_core_w...
def _import_nlp_engine_provider() ->'NlpEngineProvider': try: from presidio_analyzer.nlp_engine import NlpEngineProvider except ImportError as e: raise ImportError( 'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to downloa...
null
on_llm_new_token_common
self.llm_streams += 1
def on_llm_new_token_common(self) ->None: self.llm_streams += 1
null
_import_sql_database
from langchain_community.utilities.sql_database import SQLDatabase return SQLDatabase
def _import_sql_database() ->Any: from langchain_community.utilities.sql_database import SQLDatabase return SQLDatabase
null
on_llm_new_token
"""Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1
Run when LLM generates a new token.
test_add_documents_with_ids
"""Test end to end construction and search with scores and IDs.""" from momento.responses.vector_index import Search texts = ['apple', 'orange', 'hammer'] ids = [random_string() for _ in range(len(texts))] metadatas = [{'page': f'{i}'} for i in range(len(texts))] stored_ids = vector_store.add_texts(texts, metadatas, id...
def test_add_documents_with_ids(vector_store: MomentoVectorIndex) ->None: """Test end to end construction and search with scores and IDs.""" from momento.responses.vector_index import Search texts = ['apple', 'orange', 'hammer'] ids = [random_string() for _ in range(len(texts))] metadatas = [{'page'...
Test end to end construction and search with scores and IDs.
test_pdfplumber_parser
"""Test PDFPlumber parser.""" _assert_with_parser(PDFPlumberParser()) _assert_with_duplicate_parser(PDFPlumberParser()) _assert_with_duplicate_parser(PDFPlumberParser(dedupe=True), dedupe=True)
def test_pdfplumber_parser() ->None: """Test PDFPlumber parser.""" _assert_with_parser(PDFPlumberParser()) _assert_with_duplicate_parser(PDFPlumberParser()) _assert_with_duplicate_parser(PDFPlumberParser(dedupe=True), dedupe=True)
Test PDFPlumber parser.
create_schema
"""Create the database schema for the record manager."""
@abstractmethod def create_schema(self) ->None: """Create the database schema for the record manager."""
Create the database schema for the record manager.
__init__
super().__init__() self.dimension = dimension
def __init__(self, dimension: int=DEFAULT_VECTOR_DIMENSION): super().__init__() self.dimension = dimension
null
_type
return 'tot_llm_checker_output'
@property def _type(self) ->str: return 'tot_llm_checker_output'
null
test_visit_structured_query
query = 'What is the capital of France?' operation = Operation(operator=Operator.AND, arguments=[Comparison( comparator=Comparator.EQ, attribute='foo', value='20'), Operation( operator=Operator.OR, arguments=[Comparison(comparator=Comparator.LTE, attribute='bar', value=7), Comparison(comparator=Comparator.L...
def test_visit_structured_query() ->None: query = 'What is the capital of France?' operation = Operation(operator=Operator.AND, arguments=[Comparison( comparator=Comparator.EQ, attribute='foo', value='20'), Operation( operator=Operator.OR, arguments=[Comparison(comparator=Comparator. LTE...
null
_invocation_params
params = {**self._default_params, **kwargs} params['stop_sequences'] = params['stop_sequences'] + (runtime_stop or []) return params
def _invocation_params(self, runtime_stop: Optional[List[str]], **kwargs: Any ) ->Dict[str, Any]: params = {**self._default_params, **kwargs} params['stop_sequences'] = params['stop_sequences'] + (runtime_stop or []) return params
null
_get_default_embeddings
"""This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use. """ from langchain_community.embeddings import TensorflowHubEmbeddings return TensorflowHubEmbeddings()
@classmethod def _get_default_embeddings(cls) ->'TensorflowHubEmbeddings': """This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use. """ from langchain_community.embeddings import TensorflowHubEmbeddings return TensorflowHubEmbeddings()
This function returns the default embedding. Returns: Default TensorflowHubEmbeddings to use.
pipe
"""Compose this runnable with another object to create a RunnableSequence.""" return RunnableSequence(self, *others, name=name)
def pipe(self, *others: Union[Runnable[Any, Other], Callable[[Any], Other]], name: Optional[str]=None) ->RunnableSerializable[Input, Other]: """Compose this runnable with another object to create a RunnableSequence.""" return RunnableSequence(self, *others, name=name)
Compose this runnable with another object to create a RunnableSequence.
test_daxquery
from azure.identity import DefaultAzureCredential DATASET_ID = get_from_env('', 'POWERBI_DATASET_ID') TABLE_NAME = get_from_env('', 'POWERBI_TABLE_NAME') NUM_ROWS = get_from_env('', 'POWERBI_NUMROWS') fast_llm = ChatOpenAI(temperature=0.5, max_tokens=1000, model_name= 'gpt-3.5-turbo', verbose=True) smart_llm = Chat...
@pytest.mark.skipif(not azure_installed(), reason='requires azure package') def test_daxquery() ->None: from azure.identity import DefaultAzureCredential DATASET_ID = get_from_env('', 'POWERBI_DATASET_ID') TABLE_NAME = get_from_env('', 'POWERBI_TABLE_NAME') NUM_ROWS = get_from_env('', 'POWERBI_NUMROWS')...
null
_split_sources
"""Split sources from answer.""" if re.search('SOURCES?:', answer, re.IGNORECASE): answer, sources = re.split('SOURCES?:|QUESTION:\\s', answer, flags=re. IGNORECASE)[:2] sources = re.split('\\n', sources)[0].strip() else: sources = '' return answer, sources
def _split_sources(self, answer: str) ->Tuple[str, str]: """Split sources from answer.""" if re.search('SOURCES?:', answer, re.IGNORECASE): answer, sources = re.split('SOURCES?:|QUESTION:\\s', answer, flags= re.IGNORECASE)[:2] sources = re.split('\\n', sources)[0].strip() else: ...
Split sources from answer.
lazy_load
"""Lazy load Documents from table.""" result = [] if self.filter == 'normal_transaction': result = self.getNormTx() elif self.filter == 'internal_transaction': result = self.getInternalTx() elif self.filter == 'erc20_transaction': result = self.getERC20Tx() elif self.filter == 'eth_balance': result = se...
def lazy_load(self) ->Iterator[Document]: """Lazy load Documents from table.""" result = [] if self.filter == 'normal_transaction': result = self.getNormTx() elif self.filter == 'internal_transaction': result = self.getInternalTx() elif self.filter == 'erc20_transaction': res...
Lazy load Documents from table.
__eq__
"""Create a RedisTag equality filter expression. Args: other (Union[List[str], Set[str], Tuple[str], str]): The tag(s) to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisTag >>> filter = RedisTag("brand") == "nike" ...
@check_operator_misuse def __eq__(self, other: Union[List[str], Set[str], Tuple[str], str] ) ->'RedisFilterExpression': """Create a RedisTag equality filter expression. Args: other (Union[List[str], Set[str], Tuple[str], str]): The tag(s) to filter on. Example: ...
Create a RedisTag equality filter expression. Args: other (Union[List[str], Set[str], Tuple[str], str]): The tag(s) to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisTag >>> filter = RedisTag("brand") == "nike"
query
g = self.client.gremlin() res = g.exec(query) return res['data']
def query(self, query: str) ->List[Dict[str, Any]]: g = self.client.gremlin() res = g.exec(query) return res['data']
null
configurable_fields
return self.__class__(which=self.which, default=self.default. configurable_fields(**kwargs), alternatives=self.alternatives)
def configurable_fields(self, **kwargs: AnyConfigurableField ) ->RunnableSerializable[Input, Output]: return self.__class__(which=self.which, default=self.default. configurable_fields(**kwargs), alternatives=self.alternatives)
null
_validate_content_key
"""Check if a content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f'Expected the jq schema to result in a list of objects (dict), so sample must be a dict but got `{type(sample)}`' ) if sample.get(self._content_key) is None: raise ...
def _validate_content_key(self, data: Any) ->None: """Check if a content key is valid""" sample = data.first() if not isinstance(sample, dict): raise ValueError( f'Expected the jq schema to result in a list of objects (dict), so sample must be a dict but got `{type(sa...
Check if a content key is valid
learn
pass
def learn(self, event: TEvent) ->None: pass
null
__init__
super().__init__(**kwargs) from google.cloud.discoveryengine_v1beta import ConversationalSearchServiceClient self._client = ConversationalSearchServiceClient(credentials=self. credentials, client_options=self.client_options, client_info= get_client_info(module='vertex-ai-search')) self._serving_config = self._c...
def __init__(self, **kwargs: Any): super().__init__(**kwargs) from google.cloud.discoveryengine_v1beta import ConversationalSearchServiceClient self._client = ConversationalSearchServiceClient(credentials=self. credentials, client_options=self.client_options, client_info= get_client_info(mod...
null
test_openai_streaming
"""Test streaming tokens from AzureOpenAI.""" generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) full_response = '' for token in generator: assert isinstance(token, str) full_response += token assert full_response
@pytest.mark.scheduled def test_openai_streaming(llm: AzureOpenAI) ->None: """Test streaming tokens from AzureOpenAI.""" generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) full_response = '' for token in generator: assert isinstance(token, str) full_res...
Test streaming tokens from AzureOpenAI.
test_convert_to_message
"""Test convert to message.""" assert _convert_to_message(args) == expected
@pytest.mark.parametrize('args,expected', [(('human', '{question}'), HumanMessagePromptTemplate(prompt=PromptTemplate.from_template( '{question}'))), ('{question}', HumanMessagePromptTemplate(prompt= PromptTemplate.from_template('{question}'))), (HumanMessage(content= 'question'), HumanMessage(content='...
Test convert to message.
is_stringtype_instance
"""Helper function to check if an item is a string.""" return isinstance(item, str) or isinstance(item, _Embed) and isinstance(item .value, str)
def is_stringtype_instance(item: Any) ->bool: """Helper function to check if an item is a string.""" return isinstance(item, str) or isinstance(item, _Embed) and isinstance( item.value, str)
Helper function to check if an item is a string.
run_query
return db.run(query)
def run_query(query): return db.run(query)
null
_process_results
typed_results = cast(List[dict], results) sorted_res = sorted(zip(typed_results, docs), key=lambda x: -int(x[0][self. rank_key])) output, document = sorted_res[0] extra_info = {} if self.metadata_keys is not None: for key in self.metadata_keys: extra_info[key] = document.metadata[key] if self.return_int...
def _process_results(self, docs: List[Document], results: Sequence[Union[ str, List[str], Dict[str, str]]]) ->Tuple[str, dict]: typed_results = cast(List[dict], results) sorted_res = sorted(zip(typed_results, docs), key=lambda x: -int(x[0][ self.rank_key])) output, document = sorted_res[0] e...
null
test_check_instances
"""Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'This is John Smith. John Smith works in a bakery.John Smith is a good guy' ) anonymizer = PresidioAnonymizer(['PERSON'], faker_seed=42) anonymized_text = anonymizer.anonymize(text)...
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_check_instances() ->None: """Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'This is John Smith. John Smith works in a bakery.John Smit...
Test anonymizing multiple items in a sentence
client
import marqo client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY) try: client.index(INDEX_NAME).delete() except Exception: pass client.create_index(INDEX_NAME) return client
@pytest.fixture def client() ->Marqo: import marqo client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY) try: client.index(INDEX_NAME).delete() except Exception: pass client.create_index(INDEX_NAME) return client
null
test_sql_query
import rockset assert os.environ.get('ROCKSET_API_KEY') is not None assert os.environ.get('ROCKSET_REGION') is not None api_key = os.environ.get('ROCKSET_API_KEY') region = os.environ.get('ROCKSET_REGION') if region == 'use1a1': host = rockset.Regions.use1a1 elif region == 'usw2a1': host = rockset.Regions.usw2a...
def test_sql_query() ->None: import rockset assert os.environ.get('ROCKSET_API_KEY') is not None assert os.environ.get('ROCKSET_REGION') is not None api_key = os.environ.get('ROCKSET_API_KEY') region = os.environ.get('ROCKSET_REGION') if region == 'use1a1': host = rockset.Regions.use1a1 ...
null
_convert_prompt_msg_params
model_req = {'model': {'name': self.model}} if self.model_version is not None: model_req['model']['version'] = self.model_version return {**model_req, 'messages': [_convert_message_to_dict(message) for message in messages], 'parameters': {**self._default_params, **kwargs}}
def _convert_prompt_msg_params(self, messages: List[BaseMessage], **kwargs: Any ) ->Dict[str, Any]: model_req = {'model': {'name': self.model}} if self.model_version is not None: model_req['model']['version'] = self.model_version return {**model_req, 'messages': [_convert_message_to_dict(message...
null
extract_node_variable
""" Args: part: node in string format """ part = part.lstrip('(').rstrip(')') idx = part.find(':') if idx != -1: part = part[:idx] return None if part == '' else part
def extract_node_variable(self, part: str) ->Optional[str]: """ Args: part: node in string format """ part = part.lstrip('(').rstrip(')') idx = part.find(':') if idx != -1: part = part[:idx] return None if part == '' else part
Args: part: node in string format
mset
"""Set the given key-value pairs.""" for key, value in key_value_pairs: self.client.set(self._get_prefixed_key(key), value, ex=self.ttl)
def mset(self, key_value_pairs: Sequence[Tuple[str, str]]) ->None: """Set the given key-value pairs.""" for key, value in key_value_pairs: self.client.set(self._get_prefixed_key(key), value, ex=self.ttl)
Set the given key-value pairs.
on_text
pass
def on_text(self, text: str, **kwargs: Any) ->None: pass
null
test_multiple_namespaces
loader = MWDumpLoader(file_path=(PARENT_DIR / 'mwtest_current_pages.xml'). absolute(), namespaces=[0, 6], skip_redirects=True, stop_on_error=False) documents = loader.load() [print(doc) for doc in documents] assert len(documents) == 2
@pytest.mark.requires('mwparserfromhell', 'mwxml') def test_multiple_namespaces() ->None: loader = MWDumpLoader(file_path=(PARENT_DIR / 'mwtest_current_pages.xml').absolute(), namespaces=[0, 6], skip_redirects=True, stop_on_error=False) documents = loader.load() [print(doc) for doc in docume...
null
from_llm
"""Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, ** llm_chain_kwargs or {}) document_prompt = PromptTemplate(input_variables=['page_content'], template ="""Context: {page_content}""") combine_documents_chain = Stuff...
@classmethod def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]= None, callbacks: Callbacks=None, llm_chain_kwargs: Optional[dict]=None, **kwargs: Any) ->BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=l...
Initialize from LLM.
_chunk
for i in range(0, len(texts), size): yield texts[i:i + size]
def _chunk(texts: List[str], size: int) ->Iterator[List[str]]: for i in range(0, len(texts), size): yield texts[i:i + size]
null
_create_filter_clause
IN, NIN, BETWEEN, GT, LT, NE = 'in', 'nin', 'between', 'gt', 'lt', 'ne' EQ, LIKE, CONTAINS, OR, AND = 'eq', 'like', 'contains', 'or', 'and' value_case_insensitive = {k.lower(): v for k, v in value.items()} if IN in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.in_( va...
def _create_filter_clause(self, key, value): IN, NIN, BETWEEN, GT, LT, NE = 'in', 'nin', 'between', 'gt', 'lt', 'ne' EQ, LIKE, CONTAINS, OR, AND = 'eq', 'like', 'contains', 'or', 'and' value_case_insensitive = {k.lower(): v for k, v in value.items()} if IN in map(str.lower, value): filter_by_met...
null
_import_vertex_model_garden
from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden
def _import_vertex_model_garden() ->Any: from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden
null
__init__
self.id = id self.name = name self.cards = cards self.lists = lists
def __init__(self, id: str, name: str, cards: list, lists: list): self.id = id self.name = name self.cards = cards self.lists = lists
null
test_faiss_vector_sim
"""Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content ='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Docume...
@pytest.mark.requires('faiss') def test_faiss_vector_sim() ->None: """Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document( pa...
Test vector similarity.
test_dependency_string_both
_assert_dependency_equals(parse_dependency_string( 'git+https://github.com/efriis/myrepo.git@branch#subdirectory=src', None, None, None), git='https://github.com/efriis/myrepo.git', subdirectory='src', ref='branch')
def test_dependency_string_both() ->None: _assert_dependency_equals(parse_dependency_string( 'git+https://github.com/efriis/myrepo.git@branch#subdirectory=src', None, None, None), git='https://github.com/efriis/myrepo.git', subdirectory='src', ref='branch')
null
_import_vertex
from langchain_community.llms.vertexai import VertexAI return VertexAI
def _import_vertex() ->Any: from langchain_community.llms.vertexai import VertexAI return VertexAI
null
_llm_type
"""Return type of llm.""" return 'ctransformers'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'ctransformers'
Return type of llm.
close
for child in self._children: child.close()
def close(self) ->None: for child in self._children: child.close()
null
_comprehension
if t.is_async: self.write(' async for ') else: self.write(' for ') self.dispatch(t.target) self.write(' in ') self.dispatch(t.iter) for if_clause in t.ifs: self.write(' if ') self.dispatch(if_clause)
def _comprehension(self, t): if t.is_async: self.write(' async for ') else: self.write(' for ') self.dispatch(t.target) self.write(' in ') self.dispatch(t.iter) for if_clause in t.ifs: self.write(' if ') self.dispatch(if_clause)
null
test_selector_trims_one_example
"""Test LengthBasedExampleSelector can trim one example.""" long_question = """I am writing a really long question, this probably is going to affect the example right?""" output = selector.select_examples({'question': long_question}) assert output == EXAMPLES[:1]
def test_selector_trims_one_example(selector: LengthBasedExampleSelector ) ->None: """Test LengthBasedExampleSelector can trim one example.""" long_question = """I am writing a really long question, this probably is going to affect the example right?""" output = selector.select_examples({'question':...
Test LengthBasedExampleSelector can trim one example.
test_get_input_schema_input_dict
class RunnableWithChatHistoryInput(BaseModel): input: Union[str, BaseMessage, Sequence[BaseMessage]] runnable = RunnableLambda(lambda input: {'output': [AIMessage(content= 'you said: ' + '\n'.join([str(m.content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']]))]}) get_session_h...
def test_get_input_schema_input_dict() ->None: class RunnableWithChatHistoryInput(BaseModel): input: Union[str, BaseMessage, Sequence[BaseMessage]] runnable = RunnableLambda(lambda input: {'output': [AIMessage(content= 'you said: ' + '\n'.join([str(m.content) for m in input['history'] if ...
null
invoke
return self._call_with_config(self._invoke, input, config, **kwargs)
def invoke(self, input: Input, config: Optional[RunnableConfig]=None, ** kwargs: Any) ->Output: return self._call_with_config(self._invoke, input, config, **kwargs)
null
_llm_type
"""Return type of llm.""" return 'huggingface_endpoint'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'huggingface_endpoint'
Return type of llm.
messages
"""Retrieve the messages from Upstash Redis""" _items = self.redis_client.lrange(self.key, 0, -1) items = [json.loads(m) for m in _items[::-1]] messages = messages_from_dict(items) return messages
@property def messages(self) ->List[BaseMessage]: """Retrieve the messages from Upstash Redis""" _items = self.redis_client.lrange(self.key, 0, -1) items = [json.loads(m) for m in _items[::-1]] messages = messages_from_dict(items) return messages
Retrieve the messages from Upstash Redis
sort_by_index_name
"""Sort first element to match the index_name if exists""" return sorted(lst, key=lambda x: x.get('index_name') != index_name)
def sort_by_index_name(lst: List[Dict[str, Any]], index_name: str) ->List[Dict [str, Any]]: """Sort first element to match the index_name if exists""" return sorted(lst, key=lambda x: x.get('index_name') != index_name)
Sort first element to match the index_name if exists
_import_powerbi_tool_InfoPowerBITool
from langchain_community.tools.powerbi.tool import InfoPowerBITool return InfoPowerBITool
def _import_powerbi_tool_InfoPowerBITool() ->Any: from langchain_community.tools.powerbi.tool import InfoPowerBITool return InfoPowerBITool
null
_import_scann
from langchain_community.vectorstores.scann import ScaNN return ScaNN
def _import_scann() ->Any: from langchain_community.vectorstores.scann import ScaNN return ScaNN
null
on_chain_end
"""Do nothing when LLM chain ends.""" pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing when LLM chain ends.""" pass
Do nothing when LLM chain ends.
_create_and_run_api_controller_agent
pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*' matches = re.findall(pattern, plan_str) endpoint_names = ['{method} {route}'.format(method=method, route=route. split('?')[0]) for method, route in matches] docs_str = '' for endpoint_name in endpoint_names: found_match = False for name, _, docs in api_spec...
def _create_and_run_api_controller_agent(plan_str: str) ->str: pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*' matches = re.findall(pattern, plan_str) endpoint_names = ['{method} {route}'.format(method=method, route=route. split('?')[0]) for method, route in matches] docs_str = '' for end...
null
_on_tool_end
"""Process the Tool Run.""" self._submit(self._update_run_single, _copy(run))
def _on_tool_end(self, run: Run) ->None: """Process the Tool Run.""" self._submit(self._update_run_single, _copy(run))
Process the Tool Run.
mock_documents
return [Document(page_content='Test Document', metadata={'key': 'value'}) for _ in range(2)]
@pytest.fixture def mock_documents() ->List[Document]: return [Document(page_content='Test Document', metadata={'key': 'value' }) for _ in range(2)]
null
requires_reference
"""Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True
@property def requires_reference(self) ->bool: """Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise. """ return True
Return whether the chain requires a reference. Returns: bool: True if the chain requires a reference, False otherwise.
_Compare
self.write('(') self.dispatch(t.left) for o, e in zip(t.ops, t.comparators): self.write(' ' + self.cmpops[o.__class__.__name__] + ' ') self.dispatch(e) self.write(')')
def _Compare(self, t): self.write('(') self.dispatch(t.left) for o, e in zip(t.ops, t.comparators): self.write(' ' + self.cmpops[o.__class__.__name__] + ' ') self.dispatch(e) self.write(')')
null
_validate_example_inputs_for_chain
"""Validate that the example inputs match the chain input keys.""" if input_mapper: first_inputs = input_mapper(first_example.inputs) missing_keys = set(chain.input_keys).difference(first_inputs) if not isinstance(first_inputs, dict): raise InputFormatError( f"""When using an input_mappe...
def _validate_example_inputs_for_chain(first_example: Example, chain: Chain, input_mapper: Optional[Callable[[Dict], Any]]) ->None: """Validate that the example inputs match the chain input keys.""" if input_mapper: first_inputs = input_mapper(first_example.inputs) missing_keys = set(chain.i...
Validate that the example inputs match the chain input keys.
test_jinachat_streaming
"""Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True) message = HumanMessage(content='Hello'...
def test_jinachat_streaming() ->None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = JinaChat(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_man...
Test that streaming correctly invokes on_llm_new_token callback.
test_parse_float_value
_test_parse_value(x)
@pytest.mark.parametrize('x', (-1.001, 2e-08, 1234567.654321)) def test_parse_float_value(x: float) ->None: _test_parse_value(x)
null
validate_environment
volc_engine_maas_ak = convert_to_secret_str(get_from_dict_or_env(values, 'volc_engine_maas_ak', 'VOLC_ACCESSKEY')) volc_engine_maas_sk = convert_to_secret_str(get_from_dict_or_env(values, 'volc_engine_maas_sk', 'VOLC_SECRETKEY')) endpoint = values['endpoint'] if values['endpoint'] is not None and values['endpoi...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: volc_engine_maas_ak = convert_to_secret_str(get_from_dict_or_env(values, 'volc_engine_maas_ak', 'VOLC_ACCESSKEY')) volc_engine_maas_sk = convert_to_secret_str(get_from_dict_or_env(values, 'volc_engine_maas_sk', 'VOLC_SECRETKEY...
null
_import_python
from langchain_community.utilities.python import PythonREPL return PythonREPL
def _import_python() ->Any: from langchain_community.utilities.python import PythonREPL return PythonREPL
null
h
"""Height of the box.""" return self._h
@property def h(self) ->int: """Height of the box.""" return self._h
Height of the box.
_transform_llama2_chat
return response['candidates'][0]['text']
def _transform_llama2_chat(response: Dict[str, Any]) ->str: return response['candidates'][0]['text']
null
_run
"""Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) selector_effective = self._selector_effective(selector=selector) from playwright.sync_api import TimeoutError as PlaywrightTimeoutError try: page.clic...
def _run(self, selector: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) selector_effective = self._selector_e...
Use the tool.
test_chat_baichuan_with_kwargs
chat = ChatBaichuan() message = HumanMessage(content='Hello') response = chat([message], temperature=0.88, top_p=0.7) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_baichuan_with_kwargs() ->None: chat = ChatBaichuan() message = HumanMessage(content='Hello') response = chat([message], temperature=0.88, top_p=0.7) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
set
self.store[key] = value
def set(self, key: str, value: Optional[str]) ->None: self.store[key] = value
null
_import_google_cloud_texttospeech
try: from google.cloud import texttospeech except ImportError as e: raise ImportError( 'Cannot import google.cloud.texttospeech, please install `pip install google-cloud-texttospeech`.' ) from e return texttospeech
def _import_google_cloud_texttospeech() ->Any: try: from google.cloud import texttospeech except ImportError as e: raise ImportError( 'Cannot import google.cloud.texttospeech, please install `pip install google-cloud-texttospeech`.' ) from e return texttospeech
null
indent_lines_after_first
"""Indent all lines of text after the first line. Args: text: The text to indent prefix: Used to determine the number of spaces to indent Returns: str: The indented text """ n_spaces = len(prefix) spaces = ' ' * n_spaces lines = text.splitlines() return '\n'.join([lines[0]] + [(sp...
def indent_lines_after_first(text: str, prefix: str) ->str: """Indent all lines of text after the first line. Args: text: The text to indent prefix: Used to determine the number of spaces to indent Returns: str: The indented text """ n_spaces = len(prefix) spaces = ' '...
Indent all lines of text after the first line. Args: text: The text to indent prefix: Used to determine the number of spaces to indent Returns: str: The indented text