method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
load
"""Load documents.""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.bucket}/{self.key}' os.makedirs(os.path.dirname(file_path), exist_ok=True) self.client.downloadFile(bucketName=self.bucket, objectKey=self.key, downloadFile=file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
def load(self) ->List[Document]: """Load documents.""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.bucket}/{self.key}' os.makedirs(os.path.dirname(file_path), exist_ok=True) self.client.downloadFile(bucketName=self.bucket, objectKey=self.key, downloadFile=file_path) loader = UnstructuredFileLoader(file_path) return loader.load()
Load documents.
test__human_assistant_format
for input_text, expected_output in TEST_CASES.items(): if expected_output == ALTERNATION_ERROR: with pytest.warns(UserWarning, match=ALTERNATION_ERROR): _human_assistant_format(input_text) else: output = _human_assistant_format(input_text) assert output == expected_output
def test__human_assistant_format() ->None: for input_text, expected_output in TEST_CASES.items(): if expected_output == ALTERNATION_ERROR: with pytest.warns(UserWarning, match=ALTERNATION_ERROR): _human_assistant_format(input_text) else: output = _human_assistant_format(input_text) assert output == expected_output
null
_import_sagemaker_endpoint
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint
def _import_sagemaker_endpoint() ->Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint
null
similarity_search
raise NotImplementedError
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: raise NotImplementedError
null
from_data
raise NotImplementedError()
@classmethod def from_data(cls, data: Dict[str, Any]) ->'Component': raise NotImplementedError()
null
handle_id_and_kwargs
"""Recursively handles the id and kwargs fields of a dictionary. changes the id field to a string "_kind" field that tells WBTraceTree how to visualize the run. recursively moves the dictionaries under the kwargs key to the top level. :param obj: a run dictionary with id and kwargs fields. :param root: whether this is the root dictionary or the serialized dictionary. :return: The modified dictionary. """ if isinstance(obj, dict): if ('id' in obj or 'name' in obj) and not root: _kind = obj.get('id') if not _kind: _kind = [obj.get('name')] obj['_kind'] = _kind[-1] obj.pop('id', None) obj.pop('name', None) if 'kwargs' in obj: kwargs = obj.pop('kwargs') for k, v in kwargs.items(): obj[k] = v for k, v in obj.items(): obj[k] = handle_id_and_kwargs(v) elif isinstance(obj, list): obj = [handle_id_and_kwargs(x) for x in obj] return obj
def handle_id_and_kwargs(obj: Dict[str, Any], root: bool=False) ->Dict[str, Any ]: """Recursively handles the id and kwargs fields of a dictionary. changes the id field to a string "_kind" field that tells WBTraceTree how to visualize the run. recursively moves the dictionaries under the kwargs key to the top level. :param obj: a run dictionary with id and kwargs fields. :param root: whether this is the root dictionary or the serialized dictionary. :return: The modified dictionary. """ if isinstance(obj, dict): if ('id' in obj or 'name' in obj) and not root: _kind = obj.get('id') if not _kind: _kind = [obj.get('name')] obj['_kind'] = _kind[-1] obj.pop('id', None) obj.pop('name', None) if 'kwargs' in obj: kwargs = obj.pop('kwargs') for k, v in kwargs.items(): obj[k] = v for k, v in obj.items(): obj[k] = handle_id_and_kwargs(v) elif isinstance(obj, list): obj = [handle_id_and_kwargs(x) for x in obj] return obj
Recursively handles the id and kwargs fields of a dictionary. changes the id field to a string "_kind" field that tells WBTraceTree how to visualize the run. recursively moves the dictionaries under the kwargs key to the top level. :param obj: a run dictionary with id and kwargs fields. :param root: whether this is the root dictionary or the serialized dictionary. :return: The modified dictionary.
test_load_success
"""Test that returns the correct answer""" output = tfds_client.load() assert isinstance(output, list) assert len(output) == MAX_DOCS assert isinstance(output[0], Document) assert len(output[0].page_content) > 0 assert isinstance(output[0].page_content, str) assert isinstance(output[0].metadata, dict)
def test_load_success(tfds_client: TensorflowDatasetLoader) ->None: """Test that returns the correct answer""" output = tfds_client.load() assert isinstance(output, list) assert len(output) == MAX_DOCS assert isinstance(output[0], Document) assert len(output[0].page_content) > 0 assert isinstance(output[0].page_content, str) assert isinstance(output[0].metadata, dict)
Test that returns the correct answer
get_table_info_no_throw
"""Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ try: return self.get_table_info(table_names) except ValueError as e: """Format the error message""" return f'Error: {e}'
def get_table_info_no_throw(self, table_names: Optional[List[str]]=None) ->str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ try: return self.get_table_info(table_names) except ValueError as e: """Format the error message""" return f'Error: {e}'
Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper.
__add__
chunk = AddableDict(self) for key in other: if key not in chunk or chunk[key] is None: chunk[key] = other[key] elif other[key] is not None: try: added = chunk[key] + other[key] except TypeError: added = other[key] chunk[key] = added return chunk
def __add__(self, other: AddableDict) ->AddableDict: chunk = AddableDict(self) for key in other: if key not in chunk or chunk[key] is None: chunk[key] = other[key] elif other[key] is not None: try: added = chunk[key] + other[key] except TypeError: added = other[key] chunk[key] = added return chunk
null
get_format_instructions
return self.guard.raw_prompt.format_instructions
def get_format_instructions(self) ->str: return self.guard.raw_prompt.format_instructions
null
on_llm_end
self._require_current_thought().on_llm_end(response, **kwargs) self._prune_old_thought_containers()
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None: self._require_current_thought().on_llm_end(response, **kwargs) self._prune_old_thought_containers()
null
get_agent_executor
_agent_executor = initialize_agent(tools, llm, agent=agent_type, verbose= True, handle_parsing_errors=True, prompt=prompt) return _agent_executor | (lambda output: output['output'])
def get_agent_executor(llm: BaseLLM, agent_type: AgentType=AgentType. OPENAI_FUNCTIONS) ->Runnable: _agent_executor = initialize_agent(tools, llm, agent=agent_type, verbose=True, handle_parsing_errors=True, prompt=prompt) return _agent_executor | (lambda output: output['output'])
null
collection
return get_collection()
@pytest.fixture() def collection() ->Any: return get_collection()
null
_import_google_search
from langchain_community.utilities.google_search import GoogleSearchAPIWrapper return GoogleSearchAPIWrapper
def _import_google_search() ->Any: from langchain_community.utilities.google_search import GoogleSearchAPIWrapper return GoogleSearchAPIWrapper
null
on_tool_error_common
self.errors += 1
def on_tool_error_common(self) ->None: self.errors += 1
null
_embed_texts
"""Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding. """ if self.embeddings is not None: embeddings = self.embeddings.embed_documents(list(texts)) if hasattr(embeddings, 'tolist'): embeddings = embeddings.tolist() elif self._embeddings_function is not None: embeddings = [] for text in texts: embedding = self._embeddings_function(text) if hasattr(embeddings, 'tolist'): embedding = embedding.tolist() embeddings.append(embedding) else: raise ValueError('Neither of embeddings or embedding_function is set') return embeddings
def _embed_texts(self, texts: Iterable[str]) ->List[List[float]]: """Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding. """ if self.embeddings is not None: embeddings = self.embeddings.embed_documents(list(texts)) if hasattr(embeddings, 'tolist'): embeddings = embeddings.tolist() elif self._embeddings_function is not None: embeddings = [] for text in texts: embedding = self._embeddings_function(text) if hasattr(embeddings, 'tolist'): embedding = embedding.tolist() embeddings.append(embedding) else: raise ValueError('Neither of embeddings or embedding_function is set') return embeddings
Embed search texts. Used to provide backward compatibility with `embedding_function` argument. Args: texts: Iterable of texts to embed. Returns: List of floats representing the texts embedding.
test_html_header_text_splitter
splitter = HTMLHeaderTextSplitter(headers_to_split_on=[('h1', 'Header 1'), ('h2', 'Header 2')]) content = """ <h1>Sample Document</h1> <h2>Section</h2> <p id="1234">Reference content.</p> <h2>Lists</h2> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> <h3>A block</h3> <div class="amazing"> <p>Some text</p> <p>Some more text</p> </div> """ docs = splitter.split_text(content) expected = [Document(page_content='Reference content.', metadata={ 'Header 1': 'Sample Document', 'Header 2': 'Section'}), Document( page_content="""Item 1 Item 2 Item 3 Some text Some more text""", metadata={'Header 1': 'Sample Document', 'Header 2': 'Lists'})] assert docs == expected with open(tmp_path / 'doc.html', 'w') as tmp: tmp.write(content) docs_from_file = splitter.split_text_from_file(tmp_path / 'doc.html') assert docs_from_file == expected
@pytest.mark.requires('lxml') def test_html_header_text_splitter(tmp_path: Path) ->None: splitter = HTMLHeaderTextSplitter(headers_to_split_on=[('h1', 'Header 1'), ('h2', 'Header 2')]) content = """ <h1>Sample Document</h1> <h2>Section</h2> <p id="1234">Reference content.</p> <h2>Lists</h2> <ul> <li>Item 1</li> <li>Item 2</li> <li>Item 3</li> </ul> <h3>A block</h3> <div class="amazing"> <p>Some text</p> <p>Some more text</p> </div> """ docs = splitter.split_text(content) expected = [Document(page_content='Reference content.', metadata={ 'Header 1': 'Sample Document', 'Header 2': 'Section'}), Document( page_content= """Item 1 Item 2 Item 3 Some text Some more text""", metadata={ 'Header 1': 'Sample Document', 'Header 2': 'Lists'})] assert docs == expected with open(tmp_path / 'doc.html', 'w') as tmp: tmp.write(content) docs_from_file = splitter.split_text_from_file(tmp_path / 'doc.html') assert docs_from_file == expected
null
prep_msg
"""Helper Method: Ensures a message is a dictionary with a role and content.""" if isinstance(msg, str): return dict(role='user', content=msg) if isinstance(msg, dict): if msg.get('content', None) is None: raise ValueError(f'Message {msg} has no content') return msg raise ValueError(f'Unknown message received: {msg} of type {type(msg)}')
def prep_msg(self, msg: Union[str, dict, BaseMessage]) ->dict: """Helper Method: Ensures a message is a dictionary with a role and content.""" if isinstance(msg, str): return dict(role='user', content=msg) if isinstance(msg, dict): if msg.get('content', None) is None: raise ValueError(f'Message {msg} has no content') return msg raise ValueError(f'Unknown message received: {msg} of type {type(msg)}')
Helper Method: Ensures a message is a dictionary with a role and content.
_get_relevant_documents
query_embeds = np.array(self.embeddings.embed_query(query)) index_embeds = self.index / np.sqrt((self.index ** 2).sum(1, keepdims=True)) query_embeds = query_embeds / np.sqrt((query_embeds ** 2).sum()) similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-06 normalized_similarities = (similarities - np.min(similarities)) / denominator top_k_results = [Document(page_content=self.texts[row]) for row in sorted_ix[0:self.k] if self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold] return top_k_results
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: query_embeds = np.array(self.embeddings.embed_query(query)) index_embeds = self.index / np.sqrt((self.index ** 2).sum(1, keepdims=True) ) query_embeds = query_embeds / np.sqrt((query_embeds ** 2).sum()) similarities = index_embeds.dot(query_embeds) sorted_ix = np.argsort(-similarities) denominator = np.max(similarities) - np.min(similarities) + 1e-06 normalized_similarities = (similarities - np.min(similarities) ) / denominator top_k_results = [Document(page_content=self.texts[row]) for row in sorted_ix[0:self.k] if self.relevancy_threshold is None or normalized_similarities[row] >= self.relevancy_threshold] return top_k_results
null
_get_default_output_parser
return SelfAskOutputParser()
@classmethod def _get_default_output_parser(cls, **kwargs: Any) ->AgentOutputParser: return SelfAskOutputParser()
null
_get_elements
"""Get elements."""
@abstractmethod def _get_elements(self) ->List: """Get elements."""
Get elements.
_url
"""Create URL for getting page ids from the OneNoteApi API.""" query_params_list = [] filter_list = [] expand_list = [] query_params_list.append('$select=id') if self.notebook_name is not None: filter_list.append('parentNotebook/displayName%20eq%20' + f"'{self.notebook_name.replace(' ', '%20')}'") expand_list.append('parentNotebook') if self.section_name is not None: filter_list.append('parentSection/displayName%20eq%20' + f"'{self.section_name.replace(' ', '%20')}'") expand_list.append('parentSection') if self.page_title is not None: filter_list.append('title%20eq%20' + f"'{self.page_title.replace(' ', '%20')}'") if len(expand_list) > 0: query_params_list.append('$expand=' + ','.join(expand_list)) if len(filter_list) > 0: query_params_list.append('$filter=' + '%20and%20'.join(filter_list)) query_params = '&'.join(query_params_list) if query_params != '': query_params = '?' + query_params return f'{self.onenote_api_base_url}/pages{query_params}'
@property def _url(self) ->str: """Create URL for getting page ids from the OneNoteApi API.""" query_params_list = [] filter_list = [] expand_list = [] query_params_list.append('$select=id') if self.notebook_name is not None: filter_list.append('parentNotebook/displayName%20eq%20' + f"'{self.notebook_name.replace(' ', '%20')}'") expand_list.append('parentNotebook') if self.section_name is not None: filter_list.append('parentSection/displayName%20eq%20' + f"'{self.section_name.replace(' ', '%20')}'") expand_list.append('parentSection') if self.page_title is not None: filter_list.append('title%20eq%20' + f"'{self.page_title.replace(' ', '%20')}'") if len(expand_list) > 0: query_params_list.append('$expand=' + ','.join(expand_list)) if len(filter_list) > 0: query_params_list.append('$filter=' + '%20and%20'.join(filter_list)) query_params = '&'.join(query_params_list) if query_params != '': query_params = '?' + query_params return f'{self.onenote_api_base_url}/pages{query_params}'
Create URL for getting page ids from the OneNoteApi API.
tearDown
builtins.__import__ = self.builtins_import
def tearDown(self) ->None: builtins.__import__ = self.builtins_import
null
is_lc_serializable
"""Return whether this class is serializable.""" return True
@classmethod def is_lc_serializable(cls) ->bool: """Return whether this class is serializable.""" return True
Return whether this class is serializable.
get_schema
return db.get_table_info()
def get_schema(_): return db.get_table_info()
null
__init__
super().__init__(criteria=criteria, **kwargs)
def __init__(self, criteria: Optional[CRITERIA_TYPE]=None, **kwargs: Any ) ->None: super().__init__(criteria=criteria, **kwargs)
null
test_api_key_masked_when_passed_via_constructor
llm = GooseAI(gooseai_api_key='secret-api-key') assert str(llm.gooseai_api_key) == '**********' assert 'secret-api-key' not in repr(llm.gooseai_api_key) assert 'secret-api-key' not in repr(llm)
@pytest.mark.skipif(_openai_v1_installed(), reason= 'GooseAI currently only works with openai<1') @pytest.mark.requires('openai') def test_api_key_masked_when_passed_via_constructor() ->None: llm = GooseAI(gooseai_api_key='secret-api-key') assert str(llm.gooseai_api_key) == '**********' assert 'secret-api-key' not in repr(llm.gooseai_api_key) assert 'secret-api-key' not in repr(llm)
null
test_parse_json
parsed = parse_json_markdown(json_string) assert parsed == {'foo': 'bar'}
@pytest.mark.parametrize('json_string', TEST_CASES) def test_parse_json(json_string: str) ->None: parsed = parse_json_markdown(json_string) assert parsed == {'foo': 'bar'}
null
_has_env_vars
return all([ASTRA_DB_APPLICATION_TOKEN, ASTRA_DB_API_ENDPOINT])
def _has_env_vars() ->bool: return all([ASTRA_DB_APPLICATION_TOKEN, ASTRA_DB_API_ENDPOINT])
null
test_huggingface_text2text_generation
"""Test valid call to HuggingFace text2text model.""" llm = HuggingFaceHub(repo_id='google/flan-t5-xl') output = llm('The capital of New York is') assert output == 'Albany'
def test_huggingface_text2text_generation() ->None: """Test valid call to HuggingFace text2text model.""" llm = HuggingFaceHub(repo_id='google/flan-t5-xl') output = llm('The capital of New York is') assert output == 'Albany'
Test valid call to HuggingFace text2text model.
retriever
return search.run(query)
def retriever(query): return search.run(query)
null
validate_environment
"""Validate that api key and python package exists in environment.""" values['nlpcloud_api_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'nlpcloud_api_key', 'NLPCLOUD_API_KEY')) try: import nlpcloud values['client'] = nlpcloud.Client(values['model_name'], values[ 'nlpcloud_api_key'].get_secret_value(), gpu=values['gpu'], lang= values['lang']) except ImportError: raise ImportError( 'Could not import nlpcloud python package. Please install it with `pip install nlpcloud`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['nlpcloud_api_key'] = convert_to_secret_str(get_from_dict_or_env (values, 'nlpcloud_api_key', 'NLPCLOUD_API_KEY')) try: import nlpcloud values['client'] = nlpcloud.Client(values['model_name'], values[ 'nlpcloud_api_key'].get_secret_value(), gpu=values['gpu'], lang =values['lang']) except ImportError: raise ImportError( 'Could not import nlpcloud python package. Please install it with `pip install nlpcloud`.' ) return values
Validate that api key and python package exists in environment.
get_steam_id
user = self.steam.users.search_user(name) steam_id = user['player']['steamid'] return steam_id
def get_steam_id(self, name: str) ->str: user = self.steam.users.search_user(name) steam_id = user['player']['steamid'] return steam_id
null
delete
"""Delete by ID. These are the IDs in the vectorstore. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError('No ids provided to delete.') missing_ids = set(ids).difference(self.index_to_docstore_id.values()) if missing_ids: raise ValueError( f'Some specified ids do not exist in the current store. Ids not found: {missing_ids}' ) reversed_index = {id_: idx for idx, id_ in self.index_to_docstore_id.items()} index_to_delete = [reversed_index[id_] for id_ in ids] self.index.remove_ids(np.array(index_to_delete, dtype=np.int64)) self.docstore.delete(ids) remaining_ids = [id_ for i, id_ in sorted(self.index_to_docstore_id.items() ) if i not in index_to_delete] self.index_to_docstore_id = {i: id_ for i, id_ in enumerate(remaining_ids)} return True
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: """Delete by ID. These are the IDs in the vectorstore. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError('No ids provided to delete.') missing_ids = set(ids).difference(self.index_to_docstore_id.values()) if missing_ids: raise ValueError( f'Some specified ids do not exist in the current store. Ids not found: {missing_ids}' ) reversed_index = {id_: idx for idx, id_ in self.index_to_docstore_id. items()} index_to_delete = [reversed_index[id_] for id_ in ids] self.index.remove_ids(np.array(index_to_delete, dtype=np.int64)) self.docstore.delete(ids) remaining_ids = [id_ for i, id_ in sorted(self.index_to_docstore_id. items()) if i not in index_to_delete] self.index_to_docstore_id = {i: id_ for i, id_ in enumerate(remaining_ids)} return True
Delete by ID. These are the IDs in the vectorstore. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented.
load_local
"""Load ScaNN index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) scann_path = path / '{index_name}.scann'.format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) scann = dependable_scann_import() index = scann.scann_ops_pybind.load_searcher(str(scann_path)) with open(path / '{index_name}.pkl'.format(index_name=index_name), 'rb') as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embedding, index, docstore, index_to_docstore_id, **kwargs)
@classmethod def load_local(cls, folder_path: str, embedding: Embeddings, index_name: str='index', **kwargs: Any) ->ScaNN: """Load ScaNN index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) scann_path = path / '{index_name}.scann'.format(index_name=index_name) scann_path.mkdir(exist_ok=True, parents=True) scann = dependable_scann_import() index = scann.scann_ops_pybind.load_searcher(str(scann_path)) with open(path / '{index_name}.pkl'.format(index_name=index_name), 'rb' ) as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embedding, index, docstore, index_to_docstore_id, **kwargs)
Load ScaNN index, docstore, and index_to_docstore_id from disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name
test_load_success_init_args
retriever = WikipediaRetriever(lang='en', top_k_results=1, load_all_available_meta=True) docs = retriever.get_relevant_documents('HUNTER X HUNTER') assert len(docs) == 1 assert_docs(docs, all_meta=True)
def test_load_success_init_args() ->None: retriever = WikipediaRetriever(lang='en', top_k_results=1, load_all_available_meta=True) docs = retriever.get_relevant_documents('HUNTER X HUNTER') assert len(docs) == 1 assert_docs(docs, all_meta=True)
null
on_llm_error
self.on_llm_error_common(*args, **kwargs)
def on_llm_error(self, *args: Any, **kwargs: Any) ->Any: self.on_llm_error_common(*args, **kwargs)
null
add_images
"""Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images. """ b64_texts = [self.encode_image(uri=uri) for uri in uris] if ids is None: ids = [str(uuid.uuid1()) for _ in uris] embeddings = None if self._embedding_function is not None and hasattr(self. _embedding_function, 'embed_image'): embeddings = self._embedding_function.embed_image(uris=uris) if metadatas: length_diff = len(uris) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] images_with_metadatas = [uris[idx] for idx in non_empty_ids] embeddings_with_metadatas = [embeddings[idx] for idx in non_empty_ids ] if embeddings else None ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert(metadatas=metadatas, embeddings= embeddings_with_metadatas, documents=images_with_metadatas, ids=ids_with_metadata) except ValueError as e: if 'Expected metadata value to be' in str(e): msg = ( 'Try filtering complex metadata using langchain_community.vectorstores.utils.filter_complex_metadata.' ) raise ValueError(e.args[0] + '\n\n' + msg) else: raise e if empty_ids: images_without_metadatas = [uris[j] for j in empty_ids] embeddings_without_metadatas = [embeddings[j] for j in empty_ids ] if embeddings else None ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert(embeddings=embeddings_without_metadatas, documents=images_without_metadatas, ids=ids_without_metadatas) else: self._collection.upsert(embeddings=embeddings, documents=b64_texts, ids=ids ) return ids
def add_images(self, uris: List[str], metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images. """ b64_texts = [self.encode_image(uri=uri) for uri in uris] if ids is None: ids = [str(uuid.uuid1()) for _ in uris] embeddings = None if self._embedding_function is not None and hasattr(self. _embedding_function, 'embed_image'): embeddings = self._embedding_function.embed_image(uris=uris) if metadatas: length_diff = len(uris) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] images_with_metadatas = [uris[idx] for idx in non_empty_ids] embeddings_with_metadatas = [embeddings[idx] for idx in non_empty_ids] if embeddings else None ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert(metadatas=metadatas, embeddings= embeddings_with_metadatas, documents= images_with_metadatas, ids=ids_with_metadata) except ValueError as e: if 'Expected metadata value to be' in str(e): msg = ( 'Try filtering complex metadata using langchain_community.vectorstores.utils.filter_complex_metadata.' ) raise ValueError(e.args[0] + '\n\n' + msg) else: raise e if empty_ids: images_without_metadatas = [uris[j] for j in empty_ids] embeddings_without_metadatas = [embeddings[j] for j in empty_ids ] if embeddings else None ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert(embeddings=embeddings_without_metadatas, documents=images_without_metadatas, ids=ids_without_metadatas) else: self._collection.upsert(embeddings=embeddings, documents=b64_texts, ids=ids) return ids
Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images.
clear
"""Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({'SessionId': self.session_id}) except errors.WriteError as err: logger.error(err)
def clear(self) ->None: """Clear session memory from MongoDB""" from pymongo import errors try: self.collection.delete_many({'SessionId': self.session_id}) except errors.WriteError as err: logger.error(err)
Clear session memory from MongoDB
_get_wikipedia
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) ->BaseTool: return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
null
test_summary_buffer_memory_no_buffer_yet
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet.""" memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key='baz') output = memory.load_memory_variables({}) assert output == {'baz': ''}
def test_summary_buffer_memory_no_buffer_yet() ->None: """Test ConversationSummaryBufferMemory when no inputs put in buffer yet.""" memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key='baz') output = memory.load_memory_variables({}) assert output == {'baz': ''}
Test ConversationSummaryBufferMemory when no inputs put in buffer yet.
sql_histories
message_history = SQLChatMessageHistory(session_id='123', connection_string =con_str, table_name='test_table') other_history = SQLChatMessageHistory(session_id='456', connection_string= con_str, table_name='test_table') yield message_history, other_history message_history.clear() other_history.clear()
@pytest.fixture() def sql_histories(con_str: str) ->Generator[Tuple[SQLChatMessageHistory, SQLChatMessageHistory], None, None]: message_history = SQLChatMessageHistory(session_id='123', connection_string=con_str, table_name='test_table') other_history = SQLChatMessageHistory(session_id='456', connection_string=con_str, table_name='test_table') yield message_history, other_history message_history.clear() other_history.clear()
null
load
"""Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition(url=url, headers=self.headers, **self. unstructured_kwargs) else: elements = partition(url=url, **self.unstructured_kwargs) elif self.__is_headers_available_for_html(): elements = partition_html(url=url, headers=self.headers, **self .unstructured_kwargs) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e: if self.continue_on_failure: logger.error(f'Error fetching or processing {url}, exception: {e}') continue else: raise e if self.mode == 'single': text = '\n\n'.join([str(el) for el in elements]) metadata = {'source': url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == 'elements': for element in elements: metadata = element.metadata.to_dict() metadata['category'] = element.category docs.append(Document(page_content=str(element), metadata=metadata)) return docs
def load(self) ->List[Document]: """Load file.""" from unstructured.partition.auto import partition from unstructured.partition.html import partition_html docs: List[Document] = list() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False." ) from e urls = tqdm(self.urls) else: urls = self.urls for url in urls: try: if self.__is_non_html_available(): if self.__is_headers_available_for_non_html(): elements = partition(url=url, headers=self.headers, ** self.unstructured_kwargs) else: elements = partition(url=url, **self.unstructured_kwargs) elif self.__is_headers_available_for_html(): elements = partition_html(url=url, headers=self.headers, ** self.unstructured_kwargs) else: elements = partition_html(url=url, **self.unstructured_kwargs) except Exception as e: if self.continue_on_failure: logger.error( f'Error fetching or processing {url}, exception: {e}') continue else: raise e if self.mode == 'single': text = '\n\n'.join([str(el) for el in elements]) metadata = {'source': url} docs.append(Document(page_content=text, metadata=metadata)) elif self.mode == 'elements': for element in elements: metadata = element.metadata.to_dict() metadata['category'] = element.category docs.append(Document(page_content=str(element), metadata= metadata)) return docs
Load file.
test_cobol_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.COBOL, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ IDENTIFICATION DIVISION. PROGRAM-ID. HelloWorld. DATA DIVISION. WORKING-STORAGE SECTION. 01 GREETING PIC X(12) VALUE 'Hello, World!'. PROCEDURE DIVISION. DISPLAY GREETING. STOP RUN. """ chunks = splitter.split_text(code) assert chunks == ['IDENTIFICATION', 'DIVISION.', 'PROGRAM-ID.', 'HelloWorld.', 'DATA DIVISION.', 'WORKING-STORAGE', 'SECTION.', '01 GREETING', 'PIC X(12)', "VALUE 'Hello,", "World!'.", 'PROCEDURE', 'DIVISION.', 'DISPLAY', 'GREETING.', 'STOP RUN.']
def test_cobol_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.COBOL, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ IDENTIFICATION DIVISION. PROGRAM-ID. HelloWorld. DATA DIVISION. WORKING-STORAGE SECTION. 01 GREETING PIC X(12) VALUE 'Hello, World!'. PROCEDURE DIVISION. DISPLAY GREETING. STOP RUN. """ chunks = splitter.split_text(code) assert chunks == ['IDENTIFICATION', 'DIVISION.', 'PROGRAM-ID.', 'HelloWorld.', 'DATA DIVISION.', 'WORKING-STORAGE', 'SECTION.', '01 GREETING', 'PIC X(12)', "VALUE 'Hello,", "World!'.", 'PROCEDURE', 'DIVISION.', 'DISPLAY', 'GREETING.', 'STOP RUN.']
null
lazy_load
try: import feedparser except ImportError: raise ImportError( 'feedparser package not found, please install it with `pip install feedparser`' ) for url in self._get_urls: try: feed = feedparser.parse_folder(url) if getattr(feed, 'bozo', False): raise ValueError( f'Error fetching {url}, exception: {feed.bozo_exception}') except Exception as e: if self.continue_on_failure: logger.error(f'Error fetching {url}, exception: {e}') continue else: raise e try: for entry in feed.entries: loader = NewsURLLoader(urls=[entry.link], **self.newsloader_kwargs) article = loader.load()[0] article.metadata['feed'] = url yield article except Exception as e: if self.continue_on_failure: logger.error(f'Error processing entry {entry.link}, exception: {e}' ) continue else: raise e
def lazy_load(self) ->Iterator[Document]: try: import feedparser except ImportError: raise ImportError( 'feedparser package not found, please install it with `pip install feedparser`' ) for url in self._get_urls: try: feed = feedparser.parse_folder(url) if getattr(feed, 'bozo', False): raise ValueError( f'Error fetching {url}, exception: {feed.bozo_exception}') except Exception as e: if self.continue_on_failure: logger.error(f'Error fetching {url}, exception: {e}') continue else: raise e try: for entry in feed.entries: loader = NewsURLLoader(urls=[entry.link], **self. newsloader_kwargs) article = loader.load()[0] article.metadata['feed'] = url yield article except Exception as e: if self.continue_on_failure: logger.error( f'Error processing entry {entry.link}, exception: {e}') continue else: raise e
null
assign
"""Merge the Dict input with the output produced by the mapping argument. Args: mapping: A mapping from keys to runnables or callables. Returns: A runnable that merges the Dict input with the output produced by the mapping argument. """ return RunnableAssign(RunnableParallel(kwargs))
@classmethod def assign(cls, **kwargs: Union[Runnable[Dict[str, Any], Any], Callable[[ Dict[str, Any]], Any], Mapping[str, Union[Runnable[Dict[str, Any], Any], Callable[[Dict[str, Any]], Any]]]]) ->'RunnableAssign': """Merge the Dict input with the output produced by the mapping argument. Args: mapping: A mapping from keys to runnables or callables. Returns: A runnable that merges the Dict input with the output produced by the mapping argument. """ return RunnableAssign(RunnableParallel(kwargs))
Merge the Dict input with the output produced by the mapping argument. Args: mapping: A mapping from keys to runnables or callables. Returns: A runnable that merges the Dict input with the output produced by the mapping argument.
buffer
"""String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property def buffer(self) ->Any: """String buffer of memory.""" return (self.buffer_as_messages if self.return_messages else self. buffer_as_str)
String buffer of memory.
load
"""Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = ' '.join([t.text for t in parsed_info]) metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load using pysrt file.""" import pysrt parsed_info = pysrt.open(self.file_path) text = ' '.join([t.text for t in parsed_info]) metadata = {'source': self.file_path} return [Document(page_content=text, metadata=metadata)]
Load using pysrt file.
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() all_true = False count = 0 output = None original_input = inputs[self.input_key] chain_input = original_input while not all_true and count < self.max_checks: output = self.sequential_chain({'summary': chain_input}, callbacks= _run_manager.get_child()) count += 1 if output['all_true'].strip() == 'True': break if self.verbose: print(output['revised_summary']) chain_input = output['revised_summary'] if not output: raise ValueError('No output from chain') return {self.output_key: output['revised_summary'].strip()}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() all_true = False count = 0 output = None original_input = inputs[self.input_key] chain_input = original_input while not all_true and count < self.max_checks: output = self.sequential_chain({'summary': chain_input}, callbacks= _run_manager.get_child()) count += 1 if output['all_true'].strip() == 'True': break if self.verbose: print(output['revised_summary']) chain_input = output['revised_summary'] if not output: raise ValueError('No output from chain') return {self.output_key: output['revised_summary'].strip()}
null
search
"""Search Zep memory for messages matching the query""" from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload(text=query, metadata= metadata) return self.zep_client.memory.search_memory(self.session_id, payload, limit =limit)
def search(self, query: str, metadata: Optional[Dict]=None, limit: Optional [int]=None) ->List[MemorySearchResult]: """Search Zep memory for messages matching the query""" from zep_python import MemorySearchPayload payload: MemorySearchPayload = MemorySearchPayload(text=query, metadata =metadata) return self.zep_client.memory.search_memory(self.session_id, payload, limit=limit)
Search Zep memory for messages matching the query
test_opensearch
"""Test end to end indexing and search using Approximate Search.""" docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_opensearch() ->None: """Test end to end indexing and search using Approximate Search.""" docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end indexing and search using Approximate Search.
_stream
raise NotImplementedError()
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: raise NotImplementedError()
null
__init__
"""initialize with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: The path to the file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when calling get_text on the soup. """ try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {'features': 'lxml'} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator
def __init__(self, file_path: str, open_encoding: Union[str, None]=None, bs_kwargs: Union[dict, None]=None, get_text_separator: str='') ->None: """initialize with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: The path to the file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when calling get_text on the soup. """ try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) self.file_path = file_path self.open_encoding = open_encoding if bs_kwargs is None: bs_kwargs = {'features': 'lxml'} self.bs_kwargs = bs_kwargs self.get_text_separator = get_text_separator
initialize with path, and optionally, file encoding to use, and any kwargs to pass to the BeautifulSoup object. Args: file_path: The path to the file to load. open_encoding: The encoding to use when opening the file. bs_kwargs: Any kwargs to pass to the BeautifulSoup object. get_text_separator: The separator to use when calling get_text on the soup.
lazy_load
""" Search PubMed for documents matching the query. Return an iterator of dictionaries containing the document metadata. """ url = self.base_url_esearch + 'db=pubmed&term=' + str({urllib.parse.quote( query)}) + f'&retmode=json&retmax={self.top_k_results}&usehistory=y' result = urllib.request.urlopen(url) text = result.read().decode('utf-8') json_text = json.loads(text) webenv = json_text['esearchresult']['webenv'] for uid in json_text['esearchresult']['idlist']: yield self.retrieve_article(uid, webenv)
def lazy_load(self, query: str) ->Iterator[dict]: """ Search PubMed for documents matching the query. Return an iterator of dictionaries containing the document metadata. """ url = self.base_url_esearch + 'db=pubmed&term=' + str({urllib.parse. quote(query)} ) + f'&retmode=json&retmax={self.top_k_results}&usehistory=y' result = urllib.request.urlopen(url) text = result.read().decode('utf-8') json_text = json.loads(text) webenv = json_text['esearchresult']['webenv'] for uid in json_text['esearchresult']['idlist']: yield self.retrieve_article(uid, webenv)
Search PubMed for documents matching the query. Return an iterator of dictionaries containing the document metadata.
test_empty_input
self.assertEqual(get_buffer_string([]), '')
def test_empty_input(self) ->None: self.assertEqual(get_buffer_string([]), '')
null
__init__
"""Instantiate the generator class.""" super().__init__() self.client = client self.request_id = request_id self._batch = force_batch self._stop_words = stop_words
def __init__(self, client: grpcclient.InferenceServerClient, request_id: str, force_batch: bool, stop_words: Sequence[str]) ->None: """Instantiate the generator class.""" super().__init__() self.client = client self.request_id = request_id self._batch = force_batch self._stop_words = stop_words
Instantiate the generator class.
on_retriever_start
self.on_retriever_start_common()
def on_retriever_start(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_start_common()
null
add_embeddings
"""Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) texts, embeddings = zip(*text_embeddings) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def add_embeddings(self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, ** kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( f'If trying to add texts, the underlying docstore should support adding items, which {self.docstore} does not' ) texts, embeddings = zip(*text_embeddings) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs )
Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore.
load_llm_from_config
"""Load LLM from Config Dict.""" if '_type' not in config: raise ValueError('Must specify an LLM Type in config') config_type = config.pop('_type') type_to_cls_dict = get_type_to_cls_dict() if config_type not in type_to_cls_dict: raise ValueError(f'Loading {config_type} LLM not supported') llm_cls = type_to_cls_dict[config_type]() return llm_cls(**config)
def load_llm_from_config(config: dict) ->BaseLLM: """Load LLM from Config Dict.""" if '_type' not in config: raise ValueError('Must specify an LLM Type in config') config_type = config.pop('_type') type_to_cls_dict = get_type_to_cls_dict() if config_type not in type_to_cls_dict: raise ValueError(f'Loading {config_type} LLM not supported') llm_cls = type_to_cls_dict[config_type]() return llm_cls(**config)
Load LLM from Config Dict.
test_embeddings_filter
texts = ['What happened to all of my cookies?', 'I wish there were better Italian restaurants in my neighborhood.', 'My favorite color is green'] docs = [Document(page_content=t) for t in texts] embeddings = OpenAIEmbeddings() relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75) actual = relevant_filter.compress_documents(docs, 'What did I say about food?') assert len(actual) == 2 assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
def test_embeddings_filter() ->None: texts = ['What happened to all of my cookies?', 'I wish there were better Italian restaurants in my neighborhood.', 'My favorite color is green'] docs = [Document(page_content=t) for t in texts] embeddings = OpenAIEmbeddings() relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.75) actual = relevant_filter.compress_documents(docs, 'What did I say about food?') assert len(actual) == 2 assert len(set(texts[:2]).intersection([d.page_content for d in actual]) ) == 2
null
similarity_search_by_vector
documents = self.similarity_search_with_score_by_vector(embedding=embedding, k=k) return [doc for doc, _ in documents]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, ** kwargs: Any) ->List[Document]: documents = self.similarity_search_with_score_by_vector(embedding= embedding, k=k) return [doc for doc, _ in documents]
null
populate
hits = vector_store.similarity_search_by_vector(embedding=[0.001] * 1536, k=1) if len(hits) == 0: src_file_name = os.path.join(BASE_DIR, '..', 'sources.txt') lines = [line.strip() for line in open(src_file_name).readlines() if line.strip() if line[0] != '#'] ids = ['_'.join(line.split(' ')[:2]).lower().replace(':', '') for line in lines] vector_store.add_texts(texts=lines, ids=ids) return len(lines) else: return 0
def populate(vector_store): hits = vector_store.similarity_search_by_vector(embedding=[0.001] * 1536, k=1) if len(hits) == 0: src_file_name = os.path.join(BASE_DIR, '..', 'sources.txt') lines = [line.strip() for line in open(src_file_name).readlines() if line.strip() if line[0] != '#'] ids = ['_'.join(line.split(' ')[:2]).lower().replace(':', '') for line in lines] vector_store.add_texts(texts=lines, ids=ids) return len(lines) else: return 0
null
_load_collection
""" Load the collection from the Zep backend. """ from zep_python import NotFoundError try: collection = self._client.document.get_collection(self.collection_name) except NotFoundError: logger.info( f'Collection {self.collection_name} not found. Creating new collection.' ) collection = self._create_collection() return collection
def _load_collection(self) ->DocumentCollection: """ Load the collection from the Zep backend. """ from zep_python import NotFoundError try: collection = self._client.document.get_collection(self.collection_name) except NotFoundError: logger.info( f'Collection {self.collection_name} not found. Creating new collection.' ) collection = self._create_collection() return collection
Load the collection from the Zep backend.
__post_init__
""" Initialize the store. """ self.sync_client.create_tables() if self.pre_delete_collection: self.sync_client.delete_all()
def __post_init__(self) ->None: """ Initialize the store. """ self.sync_client.create_tables() if self.pre_delete_collection: self.sync_client.delete_all()
Initialize the store.
get_tools
"""Return a list of tools.""" return [RequestsGetTool(requests_wrapper=self.requests_wrapper), RequestsPostTool(requests_wrapper=self.requests_wrapper), RequestsPatchTool(requests_wrapper=self.requests_wrapper), RequestsPutTool(requests_wrapper=self.requests_wrapper), RequestsDeleteTool(requests_wrapper=self.requests_wrapper)]
def get_tools(self) ->List[BaseTool]: """Return a list of tools.""" return [RequestsGetTool(requests_wrapper=self.requests_wrapper), RequestsPostTool(requests_wrapper=self.requests_wrapper), RequestsPatchTool(requests_wrapper=self.requests_wrapper), RequestsPutTool(requests_wrapper=self.requests_wrapper), RequestsDeleteTool(requests_wrapper=self.requests_wrapper)]
Return a list of tools.
similarity_search_by_vector_with_relevance_scores
"""Return Elasticsearch documents most similar to query, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the embedding and score for each """ return self._search(query_vector=embedding, k=k, filter=filter, **kwargs)
def similarity_search_by_vector_with_relevance_scores(self, embedding: List [float], k: int=4, filter: Optional[List[Dict]]=None, **kwargs: Any ) ->List[Tuple[Document, float]]: """Return Elasticsearch documents most similar to query, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the embedding and score for each """ return self._search(query_vector=embedding, k=k, filter=filter, **kwargs)
Return Elasticsearch documents most similar to query, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the embedding and score for each
_load_pdf
cli, field_content = self._create_rspace_client() file_info = cli.get_file_info(self.global_id) _, ext = os.path.splitext(file_info['name']) if ext.lower() == '.pdf': outfile = f'{self.global_id}.pdf' cli.download_file(self.global_id, outfile) pdf_loader = PyPDFLoader(outfile) for pdf in pdf_loader.lazy_load(): pdf.metadata['rspace_src'] = self.global_id yield pdf
def _load_pdf(self) ->Iterator[Document]: cli, field_content = self._create_rspace_client() file_info = cli.get_file_info(self.global_id) _, ext = os.path.splitext(file_info['name']) if ext.lower() == '.pdf': outfile = f'{self.global_id}.pdf' cli.download_file(self.global_id, outfile) pdf_loader = PyPDFLoader(outfile) for pdf in pdf_loader.lazy_load(): pdf.metadata['rspace_src'] = self.global_id yield pdf
null
__init__
""" Args: file_path: The path to the CSV file. source_column: The name of the column in the CSV file to use as the source. Optional. Defaults to None. metadata_columns: A sequence of column names to use as metadata. Optional. csv_args: A dictionary of arguments to pass to the csv.DictReader. Optional. Defaults to None. encoding: The encoding of the CSV file. Optional. Defaults to None. autodetect_encoding: Whether to try to autodetect the file encoding. """ self.file_path = file_path self.source_column = source_column self.metadata_columns = metadata_columns self.encoding = encoding self.csv_args = csv_args or {} self.autodetect_encoding = autodetect_encoding
def __init__(self, file_path: str, source_column: Optional[str]=None, metadata_columns: Sequence[str]=(), csv_args: Optional[Dict]=None, encoding: Optional[str]=None, autodetect_encoding: bool=False): """ Args: file_path: The path to the CSV file. source_column: The name of the column in the CSV file to use as the source. Optional. Defaults to None. metadata_columns: A sequence of column names to use as metadata. Optional. csv_args: A dictionary of arguments to pass to the csv.DictReader. Optional. Defaults to None. encoding: The encoding of the CSV file. Optional. Defaults to None. autodetect_encoding: Whether to try to autodetect the file encoding. """ self.file_path = file_path self.source_column = source_column self.metadata_columns = metadata_columns self.encoding = encoding self.csv_args = csv_args or {} self.autodetect_encoding = autodetect_encoding
Args: file_path: The path to the CSV file. source_column: The name of the column in the CSV file to use as the source. Optional. Defaults to None. metadata_columns: A sequence of column names to use as metadata. Optional. csv_args: A dictionary of arguments to pass to the csv.DictReader. Optional. Defaults to None. encoding: The encoding of the CSV file. Optional. Defaults to None. autodetect_encoding: Whether to try to autodetect the file encoding.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **self._default_params}
Get the identifying parameters.
_import_qdrant
from langchain_community.vectorstores.qdrant import Qdrant return Qdrant
def _import_qdrant() ->Any: from langchain_community.vectorstores.qdrant import Qdrant return Qdrant
null
test_retrying
def _lambda(x: int) ->Union[int, Runnable]: if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x _lambda_mock = mocker.Mock(side_effect=_lambda) runnable = RunnableLambda(_lambda_mock) with pytest.raises(ValueError): runnable.invoke(1) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry(stop_after_attempt=2, retry_if_exception_type=( ValueError,)).invoke(1) assert _lambda_mock.call_count == 2 _lambda_mock.reset_mock() with pytest.raises(RuntimeError): runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,)).invoke(2) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,)).batch([1, 2, 0]) assert _lambda_mock.call_count == 3 + 2 _lambda_mock.reset_mock() output = runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter= False, retry_if_exception_type=(ValueError,)).batch([1, 2, 0], return_exceptions=True) assert _lambda_mock.call_count == 3 + 2 assert len(output) == 3 assert isinstance(output[0], ValueError) assert isinstance(output[1], RuntimeError) assert output[2] == 0 _lambda_mock.reset_mock()
def test_retrying(mocker: MockerFixture) ->None: def _lambda(x: int) ->Union[int, Runnable]: if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x _lambda_mock = mocker.Mock(side_effect=_lambda) runnable = RunnableLambda(_lambda_mock) with pytest.raises(ValueError): runnable.invoke(1) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry(stop_after_attempt=2, retry_if_exception_type=( ValueError,)).invoke(1) assert _lambda_mock.call_count == 2 _lambda_mock.reset_mock() with pytest.raises(RuntimeError): runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter= False, retry_if_exception_type=(ValueError,)).invoke(2) assert _lambda_mock.call_count == 1 _lambda_mock.reset_mock() with pytest.raises(ValueError): runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter= False, retry_if_exception_type=(ValueError,)).batch([1, 2, 0]) assert _lambda_mock.call_count == 3 + 2 _lambda_mock.reset_mock() output = runnable.with_retry(stop_after_attempt=2, wait_exponential_jitter=False, retry_if_exception_type=(ValueError,) ).batch([1, 2, 0], return_exceptions=True) assert _lambda_mock.call_count == 3 + 2 assert len(output) == 3 assert isinstance(output[0], ValueError) assert isinstance(output[1], RuntimeError) assert output[2] == 0 _lambda_mock.reset_mock()
null
test_from_filesystem_classmethod_with_path
loader = GenericLoader.from_filesystem(os.path.join(toy_dir, 'test.txt')) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == 'This is a test.txt file.'
def test_from_filesystem_classmethod_with_path(toy_dir: str) ->None: loader = GenericLoader.from_filesystem(os.path.join(toy_dir, 'test.txt')) docs = loader.load() assert len(docs) == 1 assert docs[0].page_content == 'This is a test.txt file.'
null
__init__
"""Initialize callback manager.""" self.handlers: List[BaseCallbackHandler] = handlers self.inheritable_handlers: List[BaseCallbackHandler ] = inheritable_handlers or [] self.parent_run_id: Optional[UUID] = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {}
def __init__(self, handlers: List[BaseCallbackHandler], inheritable_handlers: Optional[List[BaseCallbackHandler]]=None, parent_run_id: Optional[UUID]=None, *, tags: Optional[List[str]]=None, inheritable_tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None, inheritable_metadata: Optional[Dict[str, Any]]=None) ->None: """Initialize callback manager.""" self.handlers: List[BaseCallbackHandler] = handlers self.inheritable_handlers: List[BaseCallbackHandler ] = inheritable_handlers or [] self.parent_run_id: Optional[UUID] = parent_run_id self.tags = tags or [] self.inheritable_tags = inheritable_tags or [] self.metadata = metadata or {} self.inheritable_metadata = inheritable_metadata or {}
Initialize callback manager.
mock_connector_id
with patch('psychicapi.ConnectorId') as mock_connector_id: yield mock_connector_id
@pytest.fixture def mock_connector_id(): with patch('psychicapi.ConnectorId') as mock_connector_id: yield mock_connector_id
null
test_sql_chain_without_memory
queries = {'foo': 'SELECT baaz from foo', 'foo2': 'SELECT baaz from foo'} llm = FakeLLM(queries=queries, sequential_responses=True) db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True) assert db_chain.run('hello') == 'SELECT baaz from foo'
def test_sql_chain_without_memory() ->None: queries = {'foo': 'SELECT baaz from foo', 'foo2': 'SELECT baaz from foo'} llm = FakeLLM(queries=queries, sequential_responses=True) db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True) assert db_chain.run('hello') == 'SELECT baaz from foo'
null
test_numeric_field_schema_creation
"""Test creating a numeric field with default parameters.""" field = NumericFieldSchema(name='numeric') assert field.name == 'numeric' assert field.no_index is False
def test_numeric_field_schema_creation() ->None: """Test creating a numeric field with default parameters.""" field = NumericFieldSchema(name='numeric') assert field.name == 'numeric' assert field.no_index is False
Test creating a numeric field with default parameters.
_parse_resource
rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == 'data': rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b'' rsc_dict['hash'] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict
@staticmethod def _parse_resource(resource: list) ->dict: rsc_dict: Dict[str, Any] = {} for elem in resource: if elem.tag == 'data': rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b'' rsc_dict['hash'] = hashlib.md5(rsc_dict[elem.tag]).hexdigest() else: rsc_dict[elem.tag] = elem.text return rsc_dict
null
convert_messages_to_prompt_llama
"""Convert a list of messages to a prompt for llama.""" return '\n'.join([_convert_one_message_to_text_llama(message) for message in messages])
def convert_messages_to_prompt_llama(messages: List[BaseMessage]) ->str: """Convert a list of messages to a prompt for llama.""" return '\n'.join([_convert_one_message_to_text_llama(message) for message in messages])
Convert a list of messages to a prompt for llama.
_scrape
from bs4 import BeautifulSoup if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, **self.requests_kwargs) if self.raise_for_status: html_doc.raise_for_status() if self.encoding is not None: html_doc.encoding = self.encoding elif self.autoset_encoding: html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser, **bs_kwargs or {})
def _scrape(self, url: str, parser: Union[str, None]=None, bs_kwargs: Optional[dict]=None) ->Any: from bs4 import BeautifulSoup if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) html_doc = self.session.get(url, **self.requests_kwargs) if self.raise_for_status: html_doc.raise_for_status() if self.encoding is not None: html_doc.encoding = self.encoding elif self.autoset_encoding: html_doc.encoding = html_doc.apparent_encoding return BeautifulSoup(html_doc.text, parser, **bs_kwargs or {})
null
validate_environment
"""Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel except ImportError: raise ImportError( 'Could not import gpt4all python package. Please install it with `pip install gpt4all`.' ) full_path = values['model'] model_path, delimiter, model_name = full_path.rpartition('/') model_path += delimiter values['client'] = GPT4AllModel(model_name, model_path=model_path or None, model_type=values['backend'], allow_download=values['allow_download'], device=values['device']) if values['n_threads'] is not None: values['client'].model.set_thread_count(values['n_threads']) try: values['backend'] = values['client'].model_type except AttributeError: values['backend'] = values['client'].model.model_type return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that the python package exists in the environment.""" try: from gpt4all import GPT4All as GPT4AllModel except ImportError: raise ImportError( 'Could not import gpt4all python package. Please install it with `pip install gpt4all`.' ) full_path = values['model'] model_path, delimiter, model_name = full_path.rpartition('/') model_path += delimiter values['client'] = GPT4AllModel(model_name, model_path=model_path or None, model_type=values['backend'], allow_download=values[ 'allow_download'], device=values['device']) if values['n_threads'] is not None: values['client'].model.set_thread_count(values['n_threads']) try: values['backend'] = values['client'].model_type except AttributeError: values['backend'] = values['client'].model.model_type return values
Validate that the python package exists in the environment.
test_prompt_from_file_with_partial_variables
"""Test prompt can be successfully constructed from a file with partial variables.""" template = 'This is a {foo} test {bar}.' partial_variables = {'bar': 'baz'} with mock.patch('builtins.open', mock.mock_open(read_data=template)): prompt = PromptTemplate.from_file('mock_file_name', partial_variables= partial_variables) expected_prompt = PromptTemplate(template=template, input_variables=['foo'], partial_variables=partial_variables) assert prompt == expected_prompt
def test_prompt_from_file_with_partial_variables() ->None: """Test prompt can be successfully constructed from a file with partial variables.""" template = 'This is a {foo} test {bar}.' partial_variables = {'bar': 'baz'} with mock.patch('builtins.open', mock.mock_open(read_data=template)): prompt = PromptTemplate.from_file('mock_file_name', partial_variables=partial_variables) expected_prompt = PromptTemplate(template=template, input_variables=[ 'foo'], partial_variables=partial_variables) assert prompt == expected_prompt
Test prompt can be successfully constructed from a file with partial variables.
test_create_tool_keyword_args
"""Test that keyword arguments are allowed.""" test_tool = Tool(name='test_name', func=lambda x: x, description= 'test_description') assert test_tool.is_single_input assert test_tool('foo') == 'foo' assert test_tool.name == 'test_name' assert test_tool.description == 'test_description'
def test_create_tool_keyword_args() ->None: """Test that keyword arguments are allowed.""" test_tool = Tool(name='test_name', func=lambda x: x, description= 'test_description') assert test_tool.is_single_input assert test_tool('foo') == 'foo' assert test_tool.name == 'test_name' assert test_tool.description == 'test_description'
Test that keyword arguments are allowed.
_run
"""Use the tool.""" try: text = self._speech2text(query, self.speech_language) return text except Exception as e: raise RuntimeError(f'Error while running AzureCogsSpeech2TextTool: {e}')
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" try: text = self._speech2text(query, self.speech_language) return text except Exception as e: raise RuntimeError(f'Error while running AzureCogsSpeech2TextTool: {e}' )
Use the tool.
test_mosaicml_llm_call
"""Test valid call to MosaicML.""" llm = MosaicML(model_kwargs={}) output = llm('Say foo:') assert isinstance(output, str)
def test_mosaicml_llm_call() ->None: """Test valid call to MosaicML.""" llm = MosaicML(model_kwargs={}) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to MosaicML.
_run
"""Use the tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return self.api_wrapper.run(query)
Use the tool.
vector_search
""" Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.vector_search_with_score(query, k=k, filters=kwargs. get('filters', None)) return [doc for doc, _ in docs_and_scores]
def vector_search(self, query: str, k: int=4, **kwargs: Any) ->List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.vector_search_with_score(query, k=k, filters= kwargs.get('filters', None)) return [doc for doc, _ in docs_and_scores]
Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text.
_on_tool_start
"""Process the Tool Run upon start.""" self._process_start_trace(run)
def _on_tool_start(self, run: 'Run') ->None: """Process the Tool Run upon start.""" self._process_start_trace(run)
Process the Tool Run upon start.
_create_table_if_not_exists
create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} ( id SERIAL PRIMARY KEY, session_id TEXT NOT NULL, message JSONB NOT NULL );""" self.cursor.execute(create_table_query) self.connection.commit()
def _create_table_if_not_exists(self) ->None: create_table_query = f"""CREATE TABLE IF NOT EXISTS {self.table_name} ( id SERIAL PRIMARY KEY, session_id TEXT NOT NULL, message JSONB NOT NULL );""" self.cursor.execute(create_table_query) self.connection.commit()
null
_import_google_lens
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper return GoogleLensAPIWrapper
def _import_google_lens() ->Any: from langchain_community.utilities.google_lens import GoogleLensAPIWrapper return GoogleLensAPIWrapper
null
test_json_distance_evaluator_requires_reference
assert json_distance_evaluator.requires_reference is True
@pytest.mark.requires('rapidfuzz') def test_json_distance_evaluator_requires_reference(json_distance_evaluator: JsonEditDistanceEvaluator) ->None: assert json_distance_evaluator.requires_reference is True
null
_import_redis
from langchain_community.vectorstores.redis import Redis return Redis
def _import_redis() ->Any: from langchain_community.vectorstores.redis import Redis return Redis
null
test_list_directory_errs_outside_root_dir
"""Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) result = tool.run({'dir_path': '..'}) assert result == INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value='..')
def test_list_directory_errs_outside_root_dir() ->None: """Test the DirectoryListing tool when a root dir is specified.""" with TemporaryDirectory() as temp_dir: tool = ListDirectoryTool(root_dir=temp_dir) result = tool.run({'dir_path': '..'}) assert result == INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value='..')
Test the DirectoryListing tool when a root dir is specified.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
_encode
return self._tokenizer.encode(_text, allowed_special=self._allowed_special, disallowed_special=self._disallowed_special)
def _encode(_text: str) ->List[int]: return self._tokenizer.encode(_text, allowed_special=self. _allowed_special, disallowed_special=self._disallowed_special)
null
test_default_exact_matching
prediction = 'Mindy is the CTO' reference = 'Mindy is the CTO' result = exact_match_string_evaluator.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] == 1.0 reference = 'Mindy is the CEO' result = exact_match_string_evaluator.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] == 0.0
def test_default_exact_matching(exact_match_string_evaluator: ExactMatchStringEvaluator) ->None: prediction = 'Mindy is the CTO' reference = 'Mindy is the CTO' result = exact_match_string_evaluator.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] == 1.0 reference = 'Mindy is the CEO' result = exact_match_string_evaluator.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] == 0.0
null
run_command
"""Run shell command in the sandbox.""" proc = self.session.process.start(cmd) output = proc.wait() return {'stdout': output.stdout, 'stderr': output.stderr, 'exit_code': output.exit_code}
def run_command(self, cmd: str) ->dict: """Run shell command in the sandbox.""" proc = self.session.process.start(cmd) output = proc.wait() return {'stdout': output.stdout, 'stderr': output.stderr, 'exit_code': output.exit_code}
Run shell command in the sandbox.
__init__
"""Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``. """ self.text_kwargs = text_kwargs or {} self.extract_images = extract_images
def __init__(self, text_kwargs: Optional[Mapping[str, Any]]=None, extract_images: bool=False) ->None: """Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``. """ self.text_kwargs = text_kwargs or {} self.extract_images = extract_images
Initialize the parser. Args: text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``.
fix_table_names
"""Fix the table names.""" return [fix_table_name(table) for table in table_names]
@validator('table_names', allow_reuse=True) def fix_table_names(cls, table_names: List[str]) ->List[str]: """Fix the table names.""" return [fix_table_name(table) for table in table_names]
Fix the table names.
_parse_video_id
"""Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith('/watch'): query = parsed_url.query parsed_query = parse_qs(query) if 'v' in parsed_query: ids = parsed_query['v'] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip('/') video_id = path.split('/')[-1] if len(video_id) != 11: return None return video_id
def _parse_video_id(url: str) ->Optional[str]: """Parse a youtube url and return the video id if valid, otherwise None.""" parsed_url = urlparse(url) if parsed_url.scheme not in ALLOWED_SCHEMAS: return None if parsed_url.netloc not in ALLOWED_NETLOCK: return None path = parsed_url.path if path.endswith('/watch'): query = parsed_url.query parsed_query = parse_qs(query) if 'v' in parsed_query: ids = parsed_query['v'] video_id = ids if isinstance(ids, str) else ids[0] else: return None else: path = parsed_url.path.lstrip('/') video_id = path.split('/')[-1] if len(video_id) != 11: return None return video_id
Parse a youtube url and return the video id if valid, otherwise None.
test_neo4jvector_embeddings
"""Test end to end construction with embeddings and search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url= url, username=username, password=password, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] drop_vector_indexes(docsearch)
def test_neo4jvector_embeddings() ->None: """Test end to end construction with embeddings and search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] drop_vector_indexes(docsearch)
Test end to end construction with embeddings and search.