method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
split_text_from_url
"""Split HTML from web URL Args: url: web URL """ r = requests.get(url) return self.split_text_from_file(BytesIO(r.content))
def split_text_from_url(self, url: str) ->List[Document]: """Split HTML from web URL Args: url: web URL """ r = requests.get(url) return self.split_text_from_file(BytesIO(r.content))
Split HTML from web URL Args: url: web URL
validate_environment
"""Validate that api key and python package exists in environment.""" values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env(values, 'serp_api_key', 'SERPAPI_API_KEY')) try: from serpapi import SerpApiClient except ImportError: raise ImportError( 'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`' ) serp_search_engine = SerpApiClient values['serp_search_engine'] = serp_search_engine return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env( values, 'serp_api_key', 'SERPAPI_API_KEY')) try: from serpapi import SerpApiClient except ImportError: raise ImportError( 'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`' ) serp_search_engine = SerpApiClient values['serp_search_engine'] = serp_search_engine return values
Validate that api key and python package exists in environment.
on_llm_new_token
"""Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({'action': 'on_llm_new_token', 'token': token}) resp.update(self.get_custom_callback_meta()) self.action_records.append(resp)
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: """Run when LLM generates a new token.""" self.step += 1 self.llm_streams += 1 resp = self._init_resp() resp.update({'action': 'on_llm_new_token', 'token': token}) resp.update(self.get_custom_callback_meta()) self.action_records.append(resp)
Run when LLM generates a new token.
mock_unstructured_local
with patch( 'langchain_community.document_loaders.lakefs.UnstructuredLakeFSLoader' ) as mock_unstructured_lakefs: mock_unstructured_lakefs.return_value.load.return_value = [( 'text content', 'pdf content')] yield mock_unstructured_lakefs.return_value
@pytest.fixture def mock_unstructured_local() ->Any: with patch( 'langchain_community.document_loaders.lakefs.UnstructuredLakeFSLoader' ) as mock_unstructured_lakefs: mock_unstructured_lakefs.return_value.load.return_value = [( 'text content', 'pdf content')] yield mock_unstructured_lakefs.return_value
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_sim_search_with_score
"""Test end to end construction and similarity search with score.""" texts = ['foo', 'bar', 'baz'] hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_dim=10, dist_metric=metric) output = hnsw_vec_store.similarity_search_with_score('foo', k=1) assert len(output) == 1 out_doc, out_score = output[0] assert out_doc == Document(page_content='foo') assert np.isclose(out_score, 0.0, atol=1e-06)
@pytest.mark.parametrize('metric', ['cosine', 'l2']) def test_sim_search_with_score(metric: str, tmp_path: Path) ->None: """Test end to end construction and similarity search with score.""" texts = ['foo', 'bar', 'baz'] hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_dim=10, dist_metric=metric) output = hnsw_vec_store.similarity_search_with_score('foo', k=1) assert len(output) == 1 out_doc, out_score = output[0] assert out_doc == Document(page_content='foo') assert np.isclose(out_score, 0.0, atol=1e-06)
Test end to end construction and similarity search with score.
test_add_texts
"""Test end to end construction and simple similarity search.""" docsearch = DocArrayHnswSearch.from_params(work_dir=str(tmp_path), n_dim=10, embedding=FakeEmbeddings()) docsearch.add_texts(texts=texts) assert docsearch.doc_index.num_docs() == 3
def test_add_texts(texts: List[str], tmp_path: Path) ->None: """Test end to end construction and simple similarity search.""" docsearch = DocArrayHnswSearch.from_params(work_dir=str(tmp_path), n_dim=10, embedding=FakeEmbeddings()) docsearch.add_texts(texts=texts) assert docsearch.doc_index.num_docs() == 3
Test end to end construction and simple similarity search.
test_default_auto_embedder_is_off
llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=feature_embedder) str1 = '0' str2 = '1' str3 = '2' ctx_str_1 = 'context1' expected = f"""shared |User {ctx_str_1} |action {str1} |action {str2} |action {str3} """ actions = [str1, str2, str3] response = chain.run(User=pick_best_chain.base.BasedOn(ctx_str_1), action= pick_best_chain.base.ToSelectFrom(actions)) selection_metadata = response['selection_metadata'] vw_str = feature_embedder.format(selection_metadata) assert vw_str == expected
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_default_auto_embedder_is_off() ->None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, feature_embedder=feature_embedder) str1 = '0' str2 = '1' str3 = '2' ctx_str_1 = 'context1' expected = ( f'shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} ' ) actions = [str1, str2, str3] response = chain.run(User=pick_best_chain.base.BasedOn(ctx_str_1), action=pick_best_chain.base.ToSelectFrom(actions)) selection_metadata = response['selection_metadata'] vw_str = feature_embedder.format(selection_metadata) assert vw_str == expected
null
validate_environment
"""Validate that api key exists in environment.""" merriam_webster_api_key = get_from_dict_or_env(values, 'merriam_webster_api_key', 'MERRIAM_WEBSTER_API_KEY') values['merriam_webster_api_key'] = merriam_webster_api_key return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key exists in environment.""" merriam_webster_api_key = get_from_dict_or_env(values, 'merriam_webster_api_key', 'MERRIAM_WEBSTER_API_KEY') values['merriam_webster_api_key'] = merriam_webster_api_key return values
Validate that api key exists in environment.
_call
"""Increment counter, and then return response in that index.""" self.i += 1 print(f'=== Mock Response #{self.i} ===') print(self.responses[self.i]) return self.responses[self.i]
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Increment counter, and then return response in that index.""" self.i += 1 print(f'=== Mock Response #{self.i} ===') print(self.responses[self.i]) return self.responses[self.i]
Increment counter, and then return response in that index.
_load_reduce_documents_chain
combine_documents_chain = None collapse_documents_chain = None if 'combine_documents_chain' in config: combine_document_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_document_chain_config) elif 'combine_document_chain' in config: combine_document_chain_config = config.pop('combine_document_chain') combine_documents_chain = load_chain_from_config( combine_document_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) elif 'combine_document_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_document_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) if 'collapse_documents_chain' in config: collapse_document_chain_config = config.pop('collapse_documents_chain') if collapse_document_chain_config is None: collapse_documents_chain = None else: collapse_documents_chain = load_chain_from_config( collapse_document_chain_config) elif 'collapse_documents_chain_path' in config: collapse_documents_chain = load_chain(config.pop( 'collapse_documents_chain_path')) elif 'collapse_document_chain' in config: collapse_document_chain_config = config.pop('collapse_document_chain') if collapse_document_chain_config is None: collapse_documents_chain = None else: collapse_documents_chain = load_chain_from_config( collapse_document_chain_config) elif 'collapse_document_chain_path' in config: collapse_documents_chain = load_chain(config.pop( 'collapse_document_chain_path')) return ReduceDocumentsChain(combine_documents_chain=combine_documents_chain, collapse_documents_chain=collapse_documents_chain, **config)
def _load_reduce_documents_chain(config: dict, **kwargs: Any ) ->ReduceDocumentsChain: combine_documents_chain = None collapse_documents_chain = None if 'combine_documents_chain' in config: combine_document_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_document_chain_config) elif 'combine_document_chain' in config: combine_document_chain_config = config.pop('combine_document_chain') combine_documents_chain = load_chain_from_config( combine_document_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) elif 'combine_document_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_document_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) if 'collapse_documents_chain' in config: collapse_document_chain_config = config.pop('collapse_documents_chain') if collapse_document_chain_config is None: collapse_documents_chain = None else: collapse_documents_chain = load_chain_from_config( collapse_document_chain_config) elif 'collapse_documents_chain_path' in config: collapse_documents_chain = load_chain(config.pop( 'collapse_documents_chain_path')) elif 'collapse_document_chain' in config: collapse_document_chain_config = config.pop('collapse_document_chain') if collapse_document_chain_config is None: collapse_documents_chain = None else: collapse_documents_chain = load_chain_from_config( collapse_document_chain_config) elif 'collapse_document_chain_path' in config: collapse_documents_chain = load_chain(config.pop( 'collapse_document_chain_path')) return ReduceDocumentsChain(combine_documents_chain= combine_documents_chain, collapse_documents_chain= collapse_documents_chain, **config)
null
plan
"""Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{'intermediate_steps': intermediate_steps}} output = self.runnable.invoke(inputs, config={'callbacks': callbacks}) return output
def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks=None, **kwargs: Any) ->Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use. """ inputs = {**kwargs, **{'intermediate_steps': intermediate_steps}} output = self.runnable.invoke(inputs, config={'callbacks': callbacks}) return output
Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Action specifying what tool to use.
test_recursive_lambda
def _simple_recursion(x: int) ->Union[int, Runnable]: if x < 10: return RunnableLambda(lambda *args: _simple_recursion(x + 1)) else: return x runnable = RunnableLambda(_simple_recursion) assert runnable.invoke(5) == 10 with pytest.raises(RecursionError): runnable.invoke(0, {'recursion_limit': 9})
def test_recursive_lambda() ->None: def _simple_recursion(x: int) ->Union[int, Runnable]: if x < 10: return RunnableLambda(lambda *args: _simple_recursion(x + 1)) else: return x runnable = RunnableLambda(_simple_recursion) assert runnable.invoke(5) == 10 with pytest.raises(RecursionError): runnable.invoke(0, {'recursion_limit': 9})
null
_max_inner_product_relevance_score_fn
"""Normalize the distance to a score on a scale [0, 1].""" if distance > 0: return 1.0 - distance return -1.0 * distance
@staticmethod def _max_inner_product_relevance_score_fn(distance: float) ->float: """Normalize the distance to a score on a scale [0, 1].""" if distance > 0: return 1.0 - distance return -1.0 * distance
Normalize the distance to a score on a scale [0, 1].
get_session_history
if session_id not in chat_history_store: chat_history_store[session_id] = ChatMessageHistory() return chat_history_store[session_id]
def get_session_history(session_id: str, **kwargs: Any) ->ChatMessageHistory: if session_id not in chat_history_store: chat_history_store[session_id] = ChatMessageHistory() return chat_history_store[session_id]
null
test_titan_takeoff_pro_call
"""Test valid call to Titan Takeoff.""" url = 'http://localhost:3000/generate' responses.add(responses.POST, url, json={'message': '2 + 2 is 4'}, status=200) llm = TitanTakeoffPro() output = llm('What is 2 + 2?') assert isinstance(output, str)
@responses.activate def test_titan_takeoff_pro_call() ->None: """Test valid call to Titan Takeoff.""" url = 'http://localhost:3000/generate' responses.add(responses.POST, url, json={'message': '2 + 2 is 4'}, status=200) llm = TitanTakeoffPro() output = llm('What is 2 + 2?') assert isinstance(output, str)
Test valid call to Titan Takeoff.
_get_description
summary = getattr(o, 'summary', None) description = getattr(o, 'description', None) if prefer_short: return summary or description return description or summary
def _get_description(o: Any, prefer_short: bool) ->Optional[str]: summary = getattr(o, 'summary', None) description = getattr(o, 'description', None) if prefer_short: return summary or description return description or summary
null
create_results
"""Assemble documents.""" items = json_result['result'] query_result_list: List[Document] = [] for item in items: if 'fields' not in item or self.config.field_name_mapping['document' ] not in item['fields']: query_result_list.append(Document()) else: fields = item['fields'] query_result_list.append(Document(page_content=fields[self.config. field_name_mapping['document']], metadata=self. create_inverse_metadata(fields))) return query_result_list
def create_results(self, json_result: Dict[str, Any]) ->List[Document]: """Assemble documents.""" items = json_result['result'] query_result_list: List[Document] = [] for item in items: if 'fields' not in item or self.config.field_name_mapping['document' ] not in item['fields']: query_result_list.append(Document()) else: fields = item['fields'] query_result_list.append(Document(page_content=fields[self. config.field_name_mapping['document']], metadata=self. create_inverse_metadata(fields))) return query_result_list
Assemble documents.
_import_writer
from langchain_community.llms.writer import Writer return Writer
def _import_writer() ->Any: from langchain_community.llms.writer import Writer return Writer
null
load
""" Args: :param folder_ids: List of specific folder IDs to load, defaults to None :param thread_ids: List of specific thread IDs to load, defaults to None :param max_docs: Maximum number of docs to retrieve in total, defaults 1000 :param include_all_folders: Include all folders that your access_token can access, but doesn't include your private folder :param include_comments: Include comments, defaults to False :param include_images: Include images, defaults to False """ if not folder_ids and not thread_ids and not include_all_folders: raise ValueError( 'Must specify at least one among `folder_ids`, `thread_ids` or set `include_all`_folders as True' ) thread_ids = thread_ids or [] if folder_ids: for folder_id in folder_ids: self.get_thread_ids_by_folder_id(folder_id, 0, thread_ids) if include_all_folders: user = self.quip_client.get_authenticated_user() if 'group_folder_ids' in user: self.get_thread_ids_by_folder_id(user['group_folder_ids'], 0, thread_ids) if 'shared_folder_ids' in user: self.get_thread_ids_by_folder_id(user['shared_folder_ids'], 0, thread_ids) thread_ids = list(set(thread_ids[:max_docs])) return self.process_threads(thread_ids, include_images, include_comments)
def load(self, folder_ids: Optional[List[str]]=None, thread_ids: Optional[ List[str]]=None, max_docs: Optional[int]=1000, include_all_folders: bool=False, include_comments: bool=False, include_images: bool=False ) ->List[Document]: """ Args: :param folder_ids: List of specific folder IDs to load, defaults to None :param thread_ids: List of specific thread IDs to load, defaults to None :param max_docs: Maximum number of docs to retrieve in total, defaults 1000 :param include_all_folders: Include all folders that your access_token can access, but doesn't include your private folder :param include_comments: Include comments, defaults to False :param include_images: Include images, defaults to False """ if not folder_ids and not thread_ids and not include_all_folders: raise ValueError( 'Must specify at least one among `folder_ids`, `thread_ids` or set `include_all`_folders as True' ) thread_ids = thread_ids or [] if folder_ids: for folder_id in folder_ids: self.get_thread_ids_by_folder_id(folder_id, 0, thread_ids) if include_all_folders: user = self.quip_client.get_authenticated_user() if 'group_folder_ids' in user: self.get_thread_ids_by_folder_id(user['group_folder_ids'], 0, thread_ids) if 'shared_folder_ids' in user: self.get_thread_ids_by_folder_id(user['shared_folder_ids'], 0, thread_ids) thread_ids = list(set(thread_ids[:max_docs])) return self.process_threads(thread_ids, include_images, include_comments)
Args: :param folder_ids: List of specific folder IDs to load, defaults to None :param thread_ids: List of specific thread IDs to load, defaults to None :param max_docs: Maximum number of docs to retrieve in total, defaults 1000 :param include_all_folders: Include all folders that your access_token can access, but doesn't include your private folder :param include_comments: Include comments, defaults to False :param include_images: Include images, defaults to False
lazy_load
"""A lazy loader for Documents.""" raise NotImplementedError( f'{self.__class__.__name__} does not implement lazy_load()')
def lazy_load(self) ->Iterator[Document]: """A lazy loader for Documents.""" raise NotImplementedError( f'{self.__class__.__name__} does not implement lazy_load()')
A lazy loader for Documents.
load
""" Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ return list(self.lazy_load())
def load(self) ->List[Document]: """ Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title """ return list(self.lazy_load())
Get pages from OneNote notebooks. Returns: A list of Documents with attributes: - page_content - metadata - title
__init__
try: import neo4j except ImportError: raise ImportError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) if distance_strategy not in [DistanceStrategy.EUCLIDEAN_DISTANCE, DistanceStrategy.COSINE]: raise ValueError( "distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'") url = os.environ.get('NEO4J_URL', url) url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema = '' try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) self.verify_version() check_if_not_null(['index_name', 'node_label', 'embedding_node_property', 'text_node_property'], [index_name, node_label, embedding_node_property, text_node_property]) self.embedding = embedding self._distance_strategy = distance_strategy self.index_name = index_name self.keyword_index_name = keyword_index_name self.node_label = node_label self.embedding_node_property = embedding_node_property self.text_node_property = text_node_property self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.retrieval_query = retrieval_query self.search_type = search_type self.embedding_dimension = len(embedding.embed_query('foo')) if pre_delete_collection: from neo4j.exceptions import DatabaseError self.query( f'MATCH (n:`{self.node_label}`) CALL {{ WITH n DETACH DELETE n }} IN TRANSACTIONS OF 10000 ROWS;' ) try: self.query(f'DROP INDEX {self.index_name}') except DatabaseError: pass
def __init__(self, embedding: Embeddings, *, search_type: SearchType= SearchType.VECTOR, username: Optional[str]=None, password: Optional[str ]=None, url: Optional[str]=None, keyword_index_name: Optional[str]= 'keyword', database: str='neo4j', index_name: str='vector', node_label: str='Chunk', embedding_node_property: str='embedding', text_node_property: str='text', distance_strategy: DistanceStrategy= DEFAULT_DISTANCE_STRATEGY, logger: Optional[logging.Logger]=None, pre_delete_collection: bool=False, retrieval_query: str='', relevance_score_fn: Optional[Callable[[float], float]]=None) ->None: try: import neo4j except ImportError: raise ImportError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) if distance_strategy not in [DistanceStrategy.EUCLIDEAN_DISTANCE, DistanceStrategy.COSINE]: raise ValueError( "distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'" ) url = os.environ.get('NEO4J_URL', url) url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema = '' try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) self.verify_version() check_if_not_null(['index_name', 'node_label', 'embedding_node_property', 'text_node_property'], [index_name, node_label, embedding_node_property, text_node_property]) self.embedding = embedding self._distance_strategy = distance_strategy self.index_name = index_name self.keyword_index_name = keyword_index_name self.node_label = node_label self.embedding_node_property = embedding_node_property self.text_node_property = text_node_property self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.retrieval_query = retrieval_query self.search_type = search_type self.embedding_dimension = len(embedding.embed_query('foo')) if pre_delete_collection: from neo4j.exceptions import DatabaseError self.query( f'MATCH (n:`{self.node_label}`) CALL {{ WITH n DETACH DELETE n }} IN TRANSACTIONS OF 10000 ROWS;' ) try: self.query(f'DROP INDEX {self.index_name}') except DatabaseError: pass
null
_call_after_llm_before_scoring
...
@abstractmethod def _call_after_llm_before_scoring(self, llm_response: str, event: TEvent ) ->Tuple[Dict[str, Any], TEvent]: ...
null
_cosine_relevance_score_fn
"""Pinecone returns cosine similarity scores between [-1,1]""" return (score + 1) / 2
@staticmethod def _cosine_relevance_score_fn(score: float) ->float: """Pinecone returns cosine similarity scores between [-1,1]""" return (score + 1) / 2
Pinecone returns cosine similarity scores between [-1,1]
parse
action_prefix = 'Action: ' if not text.strip().split('\n')[-1].startswith(action_prefix): raise OutputParserException(f'Could not parse LLM Output: {text}') action_block = text.strip().split('\n')[-1] action_str = action_block[len(action_prefix):] re_matches = re.search('(.*?)\\[(.*?)\\]', action_str) if re_matches is None: raise OutputParserException( f'Could not parse action directive: {action_str}') action, action_input = re_matches.group(1), re_matches.group(2) if action == 'Finish': return AgentFinish({'output': action_input}, text) else: return AgentAction(action, action_input, text)
def parse(self, text: str) ->Union[AgentAction, AgentFinish]: action_prefix = 'Action: ' if not text.strip().split('\n')[-1].startswith(action_prefix): raise OutputParserException(f'Could not parse LLM Output: {text}') action_block = text.strip().split('\n')[-1] action_str = action_block[len(action_prefix):] re_matches = re.search('(.*?)\\[(.*?)\\]', action_str) if re_matches is None: raise OutputParserException( f'Could not parse action directive: {action_str}') action, action_input = re_matches.group(1), re_matches.group(2) if action == 'Finish': return AgentFinish({'output': action_input}, text) else: return AgentAction(action, action_input, text)
null
__len__
"""Get the length of the chat template.""" return len(self.messages)
def __len__(self) ->int: """Get the length of the chat template.""" return len(self.messages)
Get the length of the chat template.
test_tencent_vector_db
"""Test end to end construction and search.""" docsearch = _tencent_vector_db_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_tencent_vector_db() ->None: """Test end to end construction and search.""" docsearch = _tencent_vector_db_from_texts() output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
_generate
should_stream = stream if stream is not None else self.streaming params = self._prepare_params(stop=stop, stream=should_stream, **kwargs) generations: List[List[Generation]] = [] for prompt in prompts: if should_stream: generation = GenerationChunk(text='') for chunk in self._stream(prompt, stop=stop, run_manager= run_manager, **kwargs): generation += chunk generations.append([generation]) else: res = completion_with_retry(self, [prompt], stream=should_stream, is_gemini=self._is_gemini_model, run_manager=run_manager, **params) generations.append([self._response_to_generation(r) for r in res. candidates]) return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream: Optional[ bool]=None, **kwargs: Any) ->LLMResult: should_stream = stream if stream is not None else self.streaming params = self._prepare_params(stop=stop, stream=should_stream, **kwargs) generations: List[List[Generation]] = [] for prompt in prompts: if should_stream: generation = GenerationChunk(text='') for chunk in self._stream(prompt, stop=stop, run_manager= run_manager, **kwargs): generation += chunk generations.append([generation]) else: res = completion_with_retry(self, [prompt], stream= should_stream, is_gemini=self._is_gemini_model, run_manager =run_manager, **params) generations.append([self._response_to_generation(r) for r in res.candidates]) return LLMResult(generations=generations)
null
test_invalid_suffix
"""Test that a hub path with an invalid suffix raises an error.""" path = 'lc://chains/path.invalid' loader = Mock() valid_suffixes = {'json'} with pytest.raises(ValueError, match= f'Unsupported file type, must be one of {valid_suffixes}.'): try_load_from_hub(path, loader, 'chains', valid_suffixes) loader.assert_not_called()
def test_invalid_suffix() ->None: """Test that a hub path with an invalid suffix raises an error.""" path = 'lc://chains/path.invalid' loader = Mock() valid_suffixes = {'json'} with pytest.raises(ValueError, match= f'Unsupported file type, must be one of {valid_suffixes}.'): try_load_from_hub(path, loader, 'chains', valid_suffixes) loader.assert_not_called()
Test that a hub path with an invalid suffix raises an error.
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of Documents selected by maximal marginal relevance. """ query_embedding = self._embeddings.embed_query(query) prefetch_docs = self.similarity_search_by_vector(query_embedding, k=fetch_k, filter=filter, return_metadata=return_metadata, distance_threshold= distance_threshold, **kwargs) prefetch_ids = [doc.metadata['id'] for doc in prefetch_docs] prefetch_embeddings = [_buffer_to_array(cast(bytes, self.client.hget( prefetch_id, self._schema.content_vector_key)), dtype=self._schema. vector_dtype) for prefetch_id in prefetch_ids] selected_indices = maximal_marginal_relevance(np.array(query_embedding), prefetch_embeddings, lambda_mult=lambda_mult, k=k) selected_docs = [prefetch_docs[i] for i in selected_indices] return selected_docs
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[RedisFilterExpression]= None, return_metadata: bool=True, distance_threshold: Optional[float]= None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of Documents selected by maximal marginal relevance. """ query_embedding = self._embeddings.embed_query(query) prefetch_docs = self.similarity_search_by_vector(query_embedding, k= fetch_k, filter=filter, return_metadata=return_metadata, distance_threshold=distance_threshold, **kwargs) prefetch_ids = [doc.metadata['id'] for doc in prefetch_docs] prefetch_embeddings = [_buffer_to_array(cast(bytes, self.client.hget( prefetch_id, self._schema.content_vector_key)), dtype=self._schema. vector_dtype) for prefetch_id in prefetch_ids] selected_indices = maximal_marginal_relevance(np.array(query_embedding), prefetch_embeddings, lambda_mult=lambda_mult, k=k) selected_docs = [prefetch_docs[i] for i in selected_indices] return selected_docs
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of Documents selected by maximal marginal relevance.
test_docusarus
"""Test sitemap loader.""" loader = DocusaurusLoader(DOCS_URL, is_local=True) documents = loader.load() assert len(documents) > 1 assert '🦜️🔗 Langchain' in documents[0].page_content
def test_docusarus() ->None: """Test sitemap loader.""" loader = DocusaurusLoader(DOCS_URL, is_local=True) documents = loader.load() assert len(documents) > 1 assert '🦜️🔗 Langchain' in documents[0].page_content
Test sitemap loader.
random_string
return str(uuid.uuid4())
def random_string() ->str: return str(uuid.uuid4())
null
metrics
"""To log all metrics in the input dict.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self. mlf_expid): self.mlflow.log_metrics(data)
def metrics(self, data: Union[Dict[str, float], Dict[str, int]], step: Optional[int]=0) ->None: """To log all metrics in the input dict.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id= self.mlf_expid): self.mlflow.log_metrics(data)
To log all metrics in the input dict.
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter) return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[ str, Any]]=None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter ) return [doc for doc, _ in docs_and_scores]
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
_parse_intermediate_steps
last_action, last_output = intermediate_steps[-1] run = self._wait_for_run(last_action.run_id, last_action.thread_id) required_tool_call_ids = {tc.id for tc in run.required_action. submit_tool_outputs.tool_calls} tool_outputs = [{'output': str(output), 'tool_call_id': action.tool_call_id } for action, output in intermediate_steps if action.tool_call_id in required_tool_call_ids] submit_tool_outputs = {'tool_outputs': tool_outputs, 'run_id': last_action. run_id, 'thread_id': last_action.thread_id} return submit_tool_outputs
def _parse_intermediate_steps(self, intermediate_steps: List[Tuple[ OpenAIAssistantAction, str]]) ->dict: last_action, last_output = intermediate_steps[-1] run = self._wait_for_run(last_action.run_id, last_action.thread_id) required_tool_call_ids = {tc.id for tc in run.required_action. submit_tool_outputs.tool_calls} tool_outputs = [{'output': str(output), 'tool_call_id': action. tool_call_id} for action, output in intermediate_steps if action. tool_call_id in required_tool_call_ids] submit_tool_outputs = {'tool_outputs': tool_outputs, 'run_id': last_action.run_id, 'thread_id': last_action.thread_id} return submit_tool_outputs
null
from_env
aviary_url = os.getenv('AVIARY_URL') assert aviary_url, 'AVIARY_URL must be set' aviary_token = os.getenv('AVIARY_TOKEN', '') bearer = f'Bearer {aviary_token}' if aviary_token else '' aviary_url += '/' if not aviary_url.endswith('/') else '' return cls(aviary_url, bearer)
@classmethod def from_env(cls) ->'AviaryBackend': aviary_url = os.getenv('AVIARY_URL') assert aviary_url, 'AVIARY_URL must be set' aviary_token = os.getenv('AVIARY_TOKEN', '') bearer = f'Bearer {aviary_token}' if aviary_token else '' aviary_url += '/' if not aviary_url.endswith('/') else '' return cls(aviary_url, bearer)
null
test_konko_system_msg_test
"""Evaluate ChatKonko's handling of system messages.""" chat_instance = ChatKonko(max_tokens=10) sys_msg = SystemMessage(content='Initiate user chat.') user_msg = HumanMessage(content='Hi there') chat_response = chat_instance([sys_msg, user_msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str)
def test_konko_system_msg_test() ->None: """Evaluate ChatKonko's handling of system messages.""" chat_instance = ChatKonko(max_tokens=10) sys_msg = SystemMessage(content='Initiate user chat.') user_msg = HumanMessage(content='Hi there') chat_response = chat_instance([sys_msg, user_msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str)
Evaluate ChatKonko's handling of system messages.
perform_request
self.requests.append(kwargs) return super().perform_request(*args, **kwargs)
def perform_request(self, *args, **kwargs): self.requests.append(kwargs) return super().perform_request(*args, **kwargs)
null
on_tool_error
"""Do nothing when tool outputs an error.""" pass
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Do nothing when tool outputs an error.""" pass
Do nothing when tool outputs an error.
__init__
auto_embed = kwargs.get('auto_embed', False) feature_embedder = kwargs.get('feature_embedder', None) if feature_embedder: if 'auto_embed' in kwargs: logger.warning( 'auto_embed will take no effect when explicit feature_embedder is provided' ) auto_embed = False else: feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) kwargs['feature_embedder'] = feature_embedder vw_cmd = kwargs.get('vw_cmd', []) if vw_cmd: if '--cb_explore_adf' not in vw_cmd: raise ValueError( 'If vw_cmd is specified, it must include --cb_explore_adf') else: interactions = ['--interactions=::'] if auto_embed: interactions = ['--interactions=@#', '--ignore_linear=@', '--ignore_linear=#'] vw_cmd = interactions + ['--cb_explore_adf', '--coin', '--squarecb', '--quiet'] kwargs['vw_cmd'] = vw_cmd super().__init__(*args, **kwargs)
def __init__(self, *args: Any, **kwargs: Any): auto_embed = kwargs.get('auto_embed', False) feature_embedder = kwargs.get('feature_embedder', None) if feature_embedder: if 'auto_embed' in kwargs: logger.warning( 'auto_embed will take no effect when explicit feature_embedder is provided' ) auto_embed = False else: feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) kwargs['feature_embedder'] = feature_embedder vw_cmd = kwargs.get('vw_cmd', []) if vw_cmd: if '--cb_explore_adf' not in vw_cmd: raise ValueError( 'If vw_cmd is specified, it must include --cb_explore_adf') else: interactions = ['--interactions=::'] if auto_embed: interactions = ['--interactions=@#', '--ignore_linear=@', '--ignore_linear=#'] vw_cmd = interactions + ['--cb_explore_adf', '--coin', '--squarecb', '--quiet'] kwargs['vw_cmd'] = vw_cmd super().__init__(*args, **kwargs)
null
_default_params
"""Get the default parameters for calling OpenAI API.""" normal_params: Dict[str, Any] = {'temperature': self.temperature, 'top_p': self.top_p, 'frequency_penalty': self.frequency_penalty, 'presence_penalty': self.presence_penalty, 'n': self.n, 'logit_bias': self.logit_bias} if self.max_tokens is not None: normal_params['max_tokens'] = self.max_tokens if self.request_timeout is not None and not is_openai_v1(): normal_params['request_timeout'] = self.request_timeout if self.best_of > 1: normal_params['best_of'] = self.best_of return {**normal_params, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params: Dict[str, Any] = {'temperature': self.temperature, 'top_p': self.top_p, 'frequency_penalty': self.frequency_penalty, 'presence_penalty': self.presence_penalty, 'n': self.n, 'logit_bias': self.logit_bias} if self.max_tokens is not None: normal_params['max_tokens'] = self.max_tokens if self.request_timeout is not None and not is_openai_v1(): normal_params['request_timeout'] = self.request_timeout if self.best_of > 1: normal_params['best_of'] = self.best_of return {**normal_params, **self.model_kwargs}
Get the default parameters for calling OpenAI API.
test_deprecated_classmethod
"""Test deprecated classmethod.""" with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter('always') ClassWithDeprecatedMethods.deprecated_classmethod() assert len(warning_list) == 1 warning = warning_list[0].message assert str(warning ) == 'The function `deprecated_classmethod` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0' doc = ClassWithDeprecatedMethods.deprecated_classmethod.__doc__ assert isinstance(doc, str) assert doc.startswith('[*Deprecated*] original doc')
def test_deprecated_classmethod() ->None: """Test deprecated classmethod.""" with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter('always') ClassWithDeprecatedMethods.deprecated_classmethod() assert len(warning_list) == 1 warning = warning_list[0].message assert str(warning ) == 'The function `deprecated_classmethod` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0' doc = ClassWithDeprecatedMethods.deprecated_classmethod.__doc__ assert isinstance(doc, str) assert doc.startswith('[*Deprecated*] original doc')
Test deprecated classmethod.
_default_text_mapping
"""For Approximate k-NN Search, this is the default mapping to create index.""" return {'settings': {'index': {'knn': True, 'knn.algo_param.ef_search': ef_search}}, 'mappings': {'properties': {vector_field: {'type': 'knn_vector', 'dimension': dim, 'method': {'name': 'hnsw', 'space_type': space_type, 'engine': engine, 'parameters': {'ef_construction': ef_construction, 'm': m}}}}}}
def _default_text_mapping(dim: int, engine: str='nmslib', space_type: str= 'l2', ef_search: int=512, ef_construction: int=512, m: int=16, vector_field: str='vector_field') ->Dict: """For Approximate k-NN Search, this is the default mapping to create index.""" return {'settings': {'index': {'knn': True, 'knn.algo_param.ef_search': ef_search}}, 'mappings': {'properties': {vector_field: {'type': 'knn_vector', 'dimension': dim, 'method': {'name': 'hnsw', 'space_type': space_type, 'engine': engine, 'parameters': { 'ef_construction': ef_construction, 'm': m}}}}}}
For Approximate k-NN Search, this is the default mapping to create index.
__init__
self.known_texts: List[str] = [] self.dimensionality = dimensionality
def __init__(self, dimensionality: int=10) ->None: self.known_texts: List[str] = [] self.dimensionality = dimensionality
null
_import_weaviate
from langchain_community.vectorstores.weaviate import Weaviate return Weaviate
def _import_weaviate() ->Any: from langchain_community.vectorstores.weaviate import Weaviate return Weaviate
null
invoke_wrapper
return runnable.invoke(kwargs, {'callbacks': callbacks})
def invoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any) ->Any: return runnable.invoke(kwargs, {'callbacks': callbacks})
null
clear
"""Clear memory contents.""" super().clear() self.kg.clear()
def clear(self) ->None: """Clear memory contents.""" super().clear() self.kg.clear()
Clear memory contents.
test_momento_cache_miss
llm = FakeLLM() stub_llm_output = LLMResult(generations=[[Generation(text='foo')]]) assert llm.generate([random_string()]) == stub_llm_output
def test_momento_cache_miss(momento_cache: MomentoCache) ->None: llm = FakeLLM() stub_llm_output = LLMResult(generations=[[Generation(text='foo')]]) assert llm.generate([random_string()]) == stub_llm_output
null
metadata_column
return self.config.column_map['metadata']
@property def metadata_column(self) ->str: return self.config.column_map['metadata']
null
test_visit_operation
operator, expected = triplet op = Operation(operator=operator, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
@pytest.mark.parametrize('triplet', [(Operator.AND, "foo < 2 AND bar = 'baz'"), (Operator.OR, "foo < 2 OR bar = 'baz'")]) def test_visit_operation(triplet: Tuple[Operator, str]) ->None: operator, expected = triplet op = Operation(operator=operator, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
__init__
self.index: str = index self.session_id: str = session_id self.ensure_ascii: bool = esnsure_ascii if es_connection is not None: self.client = es_connection.options(headers={'user-agent': self. get_user_agent()}) elif es_url is not None or es_cloud_id is not None: self.client = ElasticsearchChatMessageHistory.connect_to_elasticsearch( es_url=es_url, username=es_user, password=es_password, cloud_id= es_cloud_id, api_key=es_api_key) else: raise ValueError( 'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.' ) if self.client.indices.exists(index=index): logger.debug( f'Chat history index {index} already exists, skipping creation.') else: logger.debug(f'Creating index {index} for storing chat history.') self.client.indices.create(index=index, mappings={'properties': { 'session_id': {'type': 'keyword'}, 'created_at': {'type': 'date'}, 'history': {'type': 'text'}}})
def __init__(self, index: str, session_id: str, *, es_connection: Optional[ 'Elasticsearch']=None, es_url: Optional[str]=None, es_cloud_id: Optional[str]=None, es_user: Optional[str]=None, es_api_key: Optional[ str]=None, es_password: Optional[str]=None, esnsure_ascii: Optional[ bool]=True): self.index: str = index self.session_id: str = session_id self.ensure_ascii: bool = esnsure_ascii if es_connection is not None: self.client = es_connection.options(headers={'user-agent': self. get_user_agent()}) elif es_url is not None or es_cloud_id is not None: self.client = ElasticsearchChatMessageHistory.connect_to_elasticsearch( es_url=es_url, username=es_user, password=es_password, cloud_id =es_cloud_id, api_key=es_api_key) else: raise ValueError( 'Either provide a pre-existing Elasticsearch connection, or valid credentials for creating a new connection.' ) if self.client.indices.exists(index=index): logger.debug( f'Chat history index {index} already exists, skipping creation.') else: logger.debug(f'Creating index {index} for storing chat history.') self.client.indices.create(index=index, mappings={'properties': { 'session_id': {'type': 'keyword'}, 'created_at': {'type': 'date'}, 'history': {'type': 'text'}}})
null
test_zero_distance_pairwise
eval_chain = PairwiseStringDistanceEvalChain(distance=distance, normalize_score=normalize_score) string = '三人行则必有我师' result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b= string) assert 'score' in result assert result['score'] == 0
@pytest.mark.requires('rapidfuzz') @pytest.mark.parametrize('distance', list(StringDistance)) @pytest.mark.parametrize('normalize_score', [True, False]) def test_zero_distance_pairwise(distance: StringDistance, normalize_score: bool ) ->None: eval_chain = PairwiseStringDistanceEvalChain(distance=distance, normalize_score=normalize_score) string = '三人行则必有我师' result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string) assert 'score' in result assert result['score'] == 0
null
completion_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(prompt: List[Union[str, 'Image']], is_gemini: bool=False, **kwargs: Any) ->Any: if is_gemini: return llm.client.generate_content(prompt, stream=stream, generation_config=kwargs) else: if stream: return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs) return _completion_with_retry(prompt, is_gemini, **kwargs)
def completion_with_retry(llm: VertexAI, prompt: List[Union[str, 'Image']], stream: bool=False, is_gemini: bool=False, run_manager: Optional[ CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any: """Use tenacity to retry the completion call.""" retry_decorator = create_retry_decorator(llm, run_manager=run_manager) @retry_decorator def _completion_with_retry(prompt: List[Union[str, 'Image']], is_gemini: bool=False, **kwargs: Any) ->Any: if is_gemini: return llm.client.generate_content(prompt, stream=stream, generation_config=kwargs) else: if stream: return llm.client.predict_streaming(prompt[0], **kwargs) return llm.client.predict(prompt[0], **kwargs) return _completion_with_retry(prompt, is_gemini, **kwargs)
Use tenacity to retry the completion call.
_bes_vector_db_from_texts
return BESVectorStore.from_texts(fake_texts, FakeEmbeddings(), metadatas= metadatas, bes_url='http://10.0.X.X')
def _bes_vector_db_from_texts(metadatas: Optional[List[dict]]=None, drop: bool=True) ->BESVectorStore: return BESVectorStore.from_texts(fake_texts, FakeEmbeddings(), metadatas=metadatas, bes_url='http://10.0.X.X')
null
test_json_schema_evaluator_valid_prediction
prediction = '{"name": "John", "age": 30}' reference = {'type': 'object', 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}}} result = json_schema_evaluator._evaluate_strings(prediction=prediction, reference=reference) assert result['score'] is True
@pytest.mark.requires('jsonschema') def test_json_schema_evaluator_valid_prediction(json_schema_evaluator: JsonSchemaEvaluator) ->None: prediction = '{"name": "John", "age": 30}' reference = {'type': 'object', 'properties': {'name': {'type': 'string' }, 'age': {'type': 'integer'}}} result = json_schema_evaluator._evaluate_strings(prediction=prediction, reference=reference) assert result['score'] is True
null
test_scala_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.SCALA, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ object HelloWorld { def main(args: Array[String]): Unit = { println("Hello, World!") } } """ chunks = splitter.split_text(code) assert chunks == ['object', 'HelloWorld {', 'def', 'main(args:', 'Array[String]):', 'Unit = {', 'println("Hello,', 'World!")', '}\n}']
def test_scala_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.SCALA, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ object HelloWorld { def main(args: Array[String]): Unit = { println("Hello, World!") } } """ chunks = splitter.split_text(code) assert chunks == ['object', 'HelloWorld {', 'def', 'main(args:', 'Array[String]):', 'Unit = {', 'println("Hello,', 'World!")', '}\n}']
null
test_importable_all
for path in glob.glob('../community/langchain_community/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_community.' + module_name) all_ = getattr(module, '__all__', []) for cls_ in all_: getattr(module, cls_)
def test_importable_all() ->None: for path in glob.glob('../community/langchain_community/*'): relative_path = Path(path).parts[-1] if relative_path.endswith('.typed'): continue module_name = relative_path.split('.')[0] module = importlib.import_module('langchain_community.' + module_name) all_ = getattr(module, '__all__', []) for cls_ in all_: getattr(module, cls_)
null
on_chain_start
"""Start a trace for a chain run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) chain_run = Run(id=run_id, parent_run_id=parent_run_id, serialized= serialized, inputs=inputs if isinstance(inputs, dict) else {'input': inputs}, extra=kwargs, events=[{'name': 'start', 'time': start_time}], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, child_runs=[], run_type=run_type or 'chain', name=name, tags=tags or []) self._start_trace(chain_run) self._on_chain_start(chain_run) return chain_run
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], *, run_id: UUID, tags: Optional[List[str]]=None, parent_run_id: Optional[UUID]=None, metadata: Optional[Dict[str, Any]]=None, run_type: Optional[str]=None, name: Optional[str]=None, **kwargs: Any) ->Run: """Start a trace for a chain run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) chain_run = Run(id=run_id, parent_run_id=parent_run_id, serialized= serialized, inputs=inputs if isinstance(inputs, dict) else {'input': inputs}, extra=kwargs, events=[{'name': 'start', 'time': start_time }], start_time=start_time, execution_order=execution_order, child_execution_order=execution_order, child_runs=[], run_type= run_type or 'chain', name=name, tags=tags or []) self._start_trace(chain_run) self._on_chain_start(chain_run) return chain_run
Start a trace for a chain run.
_llm_type
"""Return type of llm.""" return 'fake-list'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fake-list'
Return type of llm.
test_redis_semantic_cache
set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url= REDIS_TEST_URL, score_threshold=0.1)) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['bar']) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) assert output == expected_output get_llm_cache().clear(llm_string=llm_string) output = llm.generate(['bar']) assert output != expected_output get_llm_cache().clear(llm_string=llm_string)
def test_redis_semantic_cache() ->None: set_llm_cache(RedisSemanticCache(embedding=FakeEmbeddings(), redis_url= REDIS_TEST_URL, score_threshold=0.1)) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['bar']) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) assert output == expected_output get_llm_cache().clear(llm_string=llm_string) output = llm.generate(['bar']) assert output != expected_output get_llm_cache().clear(llm_string=llm_string)
null
__init__
self._h = self.HEIGHT self._w = len(name) + 2
def __init__(self, name: str) ->None: self._h = self.HEIGHT self._w = len(name) + 2
null
correct_query
""" Args: query: cypher query """ node_variable_dict = self.detect_node_variables(query) paths = self.extract_paths(query) for path in paths: original_path = path start_idx = 0 while start_idx < len(path): match_res = re.match(self.node_relation_node_pattern, path[start_idx:]) if match_res is None: break start_idx += match_res.start() match_dict = match_res.groupdict() left_node_labels = self.detect_labels(match_dict['left_node'], node_variable_dict) right_node_labels = self.detect_labels(match_dict['right_node'], node_variable_dict) end_idx = start_idx + 4 + len(match_dict['left_node']) + len(match_dict ['relation']) + len(match_dict['right_node']) original_partial_path = original_path[start_idx:end_idx + 1] relation_direction, relation_types = self.detect_relation_types( match_dict['relation']) if relation_types != [] and ''.join(relation_types).find('*') != -1: start_idx += len(match_dict['left_node']) + len(match_dict[ 'relation']) + 2 continue if relation_direction == 'OUTGOING': is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) if not is_legal: is_legal = self.verify_schema(right_node_labels, relation_types, left_node_labels) if is_legal: corrected_relation = '<' + match_dict['relation'][:-1] corrected_partial_path = original_partial_path.replace( match_dict['relation'], corrected_relation) query = query.replace(original_partial_path, corrected_partial_path) else: return '' elif relation_direction == 'INCOMING': is_legal = self.verify_schema(right_node_labels, relation_types, left_node_labels) if not is_legal: is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) if is_legal: corrected_relation = match_dict['relation'][1:] + '>' corrected_partial_path = original_partial_path.replace( match_dict['relation'], corrected_relation) query = query.replace(original_partial_path, corrected_partial_path) else: return '' else: is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) is_legal |= self.verify_schema(right_node_labels, relation_types, left_node_labels) if not is_legal: return '' start_idx += len(match_dict['left_node']) + len(match_dict['relation'] ) + 2 return query
def correct_query(self, query: str) ->str: """ Args: query: cypher query """ node_variable_dict = self.detect_node_variables(query) paths = self.extract_paths(query) for path in paths: original_path = path start_idx = 0 while start_idx < len(path): match_res = re.match(self.node_relation_node_pattern, path[ start_idx:]) if match_res is None: break start_idx += match_res.start() match_dict = match_res.groupdict() left_node_labels = self.detect_labels(match_dict['left_node'], node_variable_dict) right_node_labels = self.detect_labels(match_dict['right_node'], node_variable_dict) end_idx = start_idx + 4 + len(match_dict['left_node']) + len( match_dict['relation']) + len(match_dict['right_node']) original_partial_path = original_path[start_idx:end_idx + 1] relation_direction, relation_types = self.detect_relation_types( match_dict['relation']) if relation_types != [] and ''.join(relation_types).find('*' ) != -1: start_idx += len(match_dict['left_node']) + len(match_dict[ 'relation']) + 2 continue if relation_direction == 'OUTGOING': is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) if not is_legal: is_legal = self.verify_schema(right_node_labels, relation_types, left_node_labels) if is_legal: corrected_relation = '<' + match_dict['relation'][:-1] corrected_partial_path = original_partial_path.replace( match_dict['relation'], corrected_relation) query = query.replace(original_partial_path, corrected_partial_path) else: return '' elif relation_direction == 'INCOMING': is_legal = self.verify_schema(right_node_labels, relation_types, left_node_labels) if not is_legal: is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) if is_legal: corrected_relation = match_dict['relation'][1:] + '>' corrected_partial_path = original_partial_path.replace( match_dict['relation'], corrected_relation) query = query.replace(original_partial_path, corrected_partial_path) else: return '' else: is_legal = self.verify_schema(left_node_labels, relation_types, right_node_labels) is_legal |= self.verify_schema(right_node_labels, relation_types, left_node_labels) if not is_legal: return '' start_idx += len(match_dict['left_node']) + len(match_dict[ 'relation']) + 2 return query
Args: query: cypher query
_construct_refine_inputs
return {self.document_variable_name: format_document(doc, self. document_prompt), self.initial_response_name: res}
def _construct_refine_inputs(self, doc: Document, res: str) ->Dict[str, Any]: return {self.document_variable_name: format_document(doc, self. document_prompt), self.initial_response_name: res}
null
pytest_addoption
"""Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Only run core tests. Never runs any extended tests.')
def pytest_addoption(parser: Parser) ->None: """Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Only run core tests. Never runs any extended tests.')
Add custom command line options to pytest.
create_xorbits_agent
"""Construct a xorbits agent from an LLM and dataframe.""" try: from xorbits import numpy as np from xorbits import pandas as pd except ImportError: raise ImportError( 'Xorbits package not installed, please install with `pip install xorbits`' ) if not isinstance(data, (pd.DataFrame, np.ndarray)): raise ValueError( f'Expected Xorbits DataFrame or ndarray object, got {type(data)}') if input_variables is None: input_variables = ['data', 'input', 'agent_scratchpad'] tools = [PythonAstREPLTool(locals={'data': data})] prompt, partial_input = None, None if isinstance(data, pd.DataFrame): prompt = ZeroShotAgent.create_prompt(tools, prefix=PD_PREFIX if prefix == '' else prefix, suffix=PD_SUFFIX if suffix == '' else suffix, input_variables=input_variables) partial_input = str(data.head()) else: prompt = ZeroShotAgent.create_prompt(tools, prefix=NP_PREFIX if prefix == '' else prefix, suffix=NP_SUFFIX if suffix == '' else suffix, input_variables=input_variables) partial_input = str(data[:len(data) // 2]) partial_prompt = prompt.partial(data=partial_input) llm_chain = LLMChain(llm=llm, prompt=partial_prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations= max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **agent_executor_kwargs or {})
def create_xorbits_agent(llm: BaseLLM, data: Any, callback_manager: Optional[BaseCallbackManager]=None, prefix: str='', suffix: str='', input_variables: Optional[List[str]]=None, verbose: bool=False, return_intermediate_steps: bool=False, max_iterations: Optional[int]=15, max_execution_time: Optional[float]=None, early_stopping_method: str= 'force', agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Dict[str, Any]) ->AgentExecutor: """Construct a xorbits agent from an LLM and dataframe.""" try: from xorbits import numpy as np from xorbits import pandas as pd except ImportError: raise ImportError( 'Xorbits package not installed, please install with `pip install xorbits`' ) if not isinstance(data, (pd.DataFrame, np.ndarray)): raise ValueError( f'Expected Xorbits DataFrame or ndarray object, got {type(data)}') if input_variables is None: input_variables = ['data', 'input', 'agent_scratchpad'] tools = [PythonAstREPLTool(locals={'data': data})] prompt, partial_input = None, None if isinstance(data, pd.DataFrame): prompt = ZeroShotAgent.create_prompt(tools, prefix=PD_PREFIX if prefix == '' else prefix, suffix=PD_SUFFIX if suffix == '' else suffix, input_variables=input_variables) partial_input = str(data.head()) else: prompt = ZeroShotAgent.create_prompt(tools, prefix=NP_PREFIX if prefix == '' else prefix, suffix=NP_SUFFIX if suffix == '' else suffix, input_variables=input_variables) partial_input = str(data[:len(data) // 2]) partial_prompt = prompt.partial(data=partial_input) llm_chain = LLMChain(llm=llm, prompt=partial_prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations =max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, ** agent_executor_kwargs or {})
Construct a xorbits agent from an LLM and dataframe.
from_llm_and_ai_plugin_url
"""Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin(llm=llm, ai_plugin=plugin, requests= requests, verbose=verbose, **kwargs)
@classmethod def from_llm_and_ai_plugin_url(cls, llm: BaseLanguageModel, ai_plugin_url: str, requests: Optional[Requests]=None, verbose: bool=False, **kwargs: Any ) ->NLAToolkit: """Instantiate the toolkit from an OpenAPI Spec URL""" plugin = AIPlugin.from_url(ai_plugin_url) return cls.from_llm_and_ai_plugin(llm=llm, ai_plugin=plugin, requests= requests, verbose=verbose, **kwargs)
Instantiate the toolkit from an OpenAPI Spec URL
fake_func
return 'foo'
def fake_func(inp: str) ->str: return 'foo'
null
create_pull_request
""" Makes a pull request from the bot's branch to the base branch Parameters: pr_query(str): a string which contains the PR title and the PR body. The title is the first line in the string, and the body are the rest of the string. For example, "Updated README made changes to add info" Returns: str: A success or failure message """ if self.gitlab_base_branch == self.gitlab_branch: return """Cannot make a pull request because commits are already in the master branch""" else: try: title = pr_query.split('\n')[0] body = pr_query[len(title) + 2:] pr = self.gitlab_repo_instance.mergerequests.create({ 'source_branch': self.gitlab_branch, 'target_branch': self. gitlab_base_branch, 'title': title, 'description': body, 'labels': ['created-by-agent']}) return f'Successfully created PR number {pr.iid}' except Exception as e: return 'Unable to make pull request due to error:\n' + str(e)
def create_pull_request(self, pr_query: str) ->str: """ Makes a pull request from the bot's branch to the base branch Parameters: pr_query(str): a string which contains the PR title and the PR body. The title is the first line in the string, and the body are the rest of the string. For example, "Updated README made changes to add info" Returns: str: A success or failure message """ if self.gitlab_base_branch == self.gitlab_branch: return """Cannot make a pull request because commits are already in the master branch""" else: try: title = pr_query.split('\n')[0] body = pr_query[len(title) + 2:] pr = self.gitlab_repo_instance.mergerequests.create({ 'source_branch': self.gitlab_branch, 'target_branch': self. gitlab_base_branch, 'title': title, 'description': body, 'labels': ['created-by-agent']}) return f'Successfully created PR number {pr.iid}' except Exception as e: return 'Unable to make pull request due to error:\n' + str(e)
Makes a pull request from the bot's branch to the base branch Parameters: pr_query(str): a string which contains the PR title and the PR body. The title is the first line in the string, and the body are the rest of the string. For example, "Updated README made changes to add info" Returns: str: A success or failure message
_completion_with_retry
generation_config = kwargs.get('generation_config', {}) if is_gemini: return llm.client.generate_content(contents=prompt, stream=stream, generation_config=generation_config) return llm.client.generate_text(prompt=prompt, **kwargs)
@retry_decorator def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool, stream: bool, **kwargs: Any) ->Any: generation_config = kwargs.get('generation_config', {}) if is_gemini: return llm.client.generate_content(contents=prompt, stream=stream, generation_config=generation_config) return llm.client.generate_text(prompt=prompt, **kwargs)
null
test_konko_multiple_outputs_test
"""Test multiple completions with ChatKonko.""" chat_instance = ChatKonko(max_tokens=10, n=5) msg = HumanMessage(content='Hi') gen_response = chat_instance._generate([msg]) assert isinstance(gen_response, ChatResult) assert len(gen_response.generations) == 5 for gen in gen_response.generations: assert isinstance(gen.message, BaseMessage) assert isinstance(gen.message.content, str)
def test_konko_multiple_outputs_test() ->None: """Test multiple completions with ChatKonko.""" chat_instance = ChatKonko(max_tokens=10, n=5) msg = HumanMessage(content='Hi') gen_response = chat_instance._generate([msg]) assert isinstance(gen_response, ChatResult) assert len(gen_response.generations) == 5 for gen in gen_response.generations: assert isinstance(gen.message, BaseMessage) assert isinstance(gen.message.content, str)
Test multiple completions with ChatKonko.
scrape_all
"""Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser, **self.bs_kwargs)) return final_results
def scrape_all(self, urls: List[str], parser: Union[str, None]=None) ->List[Any ]: """Fetch all urls, then return soups for all results.""" from bs4 import BeautifulSoup results = asyncio.run(self.fetch_all(urls)) final_results = [] for i, result in enumerate(results): url = urls[i] if parser is None: if url.endswith('.xml'): parser = 'xml' else: parser = self.default_parser self._check_parser(parser) final_results.append(BeautifulSoup(result, parser, **self.bs_kwargs)) return final_results
Fetch all urls, then return soups for all results.
check_database_utf8
""" Helper function: Test the database is UTF-8 encoded """ cursor = self._connection.cursor() query = ( 'SELECT pg_encoding_to_char(encoding) FROM pg_database WHERE datname = current_database();' ) cursor.execute(query) encoding = cursor.fetchone()[0] cursor.close() if encoding.lower() == 'utf8' or encoding.lower() == 'utf-8': return True else: raise Exception( f"Database '{self.connection_string.split('/')[-1]}' encoding is not UTF-8" )
def check_database_utf8(self) ->bool: """ Helper function: Test the database is UTF-8 encoded """ cursor = self._connection.cursor() query = ( 'SELECT pg_encoding_to_char(encoding) FROM pg_database WHERE datname = current_database();' ) cursor.execute(query) encoding = cursor.fetchone()[0] cursor.close() if encoding.lower() == 'utf8' or encoding.lower() == 'utf-8': return True else: raise Exception( f"Database '{self.connection_string.split('/')[-1]}' encoding is not UTF-8" )
Helper function: Test the database is UTF-8 encoded
_moderation_class
return moderation_class(client=self.client, callback=self. moderation_callback, unique_id=self.unique_id, chain_id=self.chain_id ).validate
def _moderation_class(self, moderation_class: Any) ->Callable: return moderation_class(client=self.client, callback=self. moderation_callback, unique_id=self.unique_id, chain_id=self.chain_id ).validate
null
test_get_action_and_input_newline_after_keyword
"""Test getting an action and action input from the text when there is a new line before the action (after the keywords "Action:" and "Action Input:") """ llm_output = """ I can use the `ls` command to list the contents of the directory and `grep` to search for the specific file. Action: Terminal Action Input: ls -l ~/.bashrc.d/ """ action, action_input = get_action_and_input(llm_output) assert action == 'Terminal' assert action_input == 'ls -l ~/.bashrc.d/\n'
def test_get_action_and_input_newline_after_keyword() ->None: """Test getting an action and action input from the text when there is a new line before the action (after the keywords "Action:" and "Action Input:") """ llm_output = """ I can use the `ls` command to list the contents of the directory and `grep` to search for the specific file. Action: Terminal Action Input: ls -l ~/.bashrc.d/ """ action, action_input = get_action_and_input(llm_output) assert action == 'Terminal' assert action_input == 'ls -l ~/.bashrc.d/\n'
Test getting an action and action input from the text when there is a new line before the action (after the keywords "Action:" and "Action Input:")
bind_functions
"""Bind functions (and other objects) to this chat model. Args: functions: A list of function definitions to bind to this chat model. Can be a dictionary, pydantic model, or callable. Pydantic models and callables will be automatically converted to their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or "auto" to automatically determine which function to call (if any). kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. """ from langchain.chains.openai_functions.base import convert_to_openai_function formatted_functions = [convert_to_openai_function(fn) for fn in functions] if function_call is not None: if len(formatted_functions) != 1: raise ValueError( 'When specifying `function_call`, you must provide exactly one function.' ) if formatted_functions[0]['name'] != function_call: raise ValueError( f"Function call {function_call} was specified, but the only provided function was {formatted_functions[0]['name']}." ) function_call_ = {'name': function_call} kwargs = {**kwargs, 'function_call': function_call_} return super().bind(functions=formatted_functions, **kwargs)
def bind_functions(self, functions: Sequence[Union[Dict[str, Any], Type[ BaseModel], Callable]], function_call: Optional[str]=None, **kwargs: Any ) ->Runnable[LanguageModelInput, BaseMessage]: """Bind functions (and other objects) to this chat model. Args: functions: A list of function definitions to bind to this chat model. Can be a dictionary, pydantic model, or callable. Pydantic models and callables will be automatically converted to their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or "auto" to automatically determine which function to call (if any). kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. """ from langchain.chains.openai_functions.base import convert_to_openai_function formatted_functions = [convert_to_openai_function(fn) for fn in functions] if function_call is not None: if len(formatted_functions) != 1: raise ValueError( 'When specifying `function_call`, you must provide exactly one function.' ) if formatted_functions[0]['name'] != function_call: raise ValueError( f"Function call {function_call} was specified, but the only provided function was {formatted_functions[0]['name']}." ) function_call_ = {'name': function_call} kwargs = {**kwargs, 'function_call': function_call_} return super().bind(functions=formatted_functions, **kwargs)
Bind functions (and other objects) to this chat model. Args: functions: A list of function definitions to bind to this chat model. Can be a dictionary, pydantic model, or callable. Pydantic models and callables will be automatically converted to their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or "auto" to automatically determine which function to call (if any). kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor.
max_marginal_relevance_search_by_vector
raise NotImplementedError
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, **kwargs: Any) ->List[ Document]: raise NotImplementedError
null
_get_relevant_documents
"""Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ if self.search_type == SearchType.mmr: sub_docs = self.vectorstore.max_marginal_relevance_search(query, **self .search_kwargs) else: sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs) ids = [] for d in sub_docs: if self.id_key in d.metadata and d.metadata[self.id_key] not in ids: ids.append(d.metadata[self.id_key]) docs = self.docstore.mget(ids) return [d for d in docs if d is not None]
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents """ if self.search_type == SearchType.mmr: sub_docs = self.vectorstore.max_marginal_relevance_search(query, ** self.search_kwargs) else: sub_docs = self.vectorstore.similarity_search(query, **self. search_kwargs) ids = [] for d in sub_docs: if self.id_key in d.metadata and d.metadata[self.id_key] not in ids: ids.append(d.metadata[self.id_key]) docs = self.docstore.mget(ids) return [d for d in docs if d is not None]
Get documents relevant to a query. Args: query: String to find relevant documents for run_manager: The callbacks handler to use Returns: List of relevant documents
test_langchain_together_embedding_documents
"""Test cohere embeddings.""" documents = ['foo bar'] embedding = TogetherEmbeddings(model= 'togethercomputer/m2-bert-80M-8k-retrieval') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) > 0
def test_langchain_together_embedding_documents() ->None: """Test cohere embeddings.""" documents = ['foo bar'] embedding = TogetherEmbeddings(model= 'togethercomputer/m2-bert-80M-8k-retrieval') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) > 0
Test cohere embeddings.
get_tools
"""Get the tools in the toolkit.""" return [AINAppOps(), AINOwnerOps(), AINRuleOps(), AINTransfer(), AINValueOps()]
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" return [AINAppOps(), AINOwnerOps(), AINRuleOps(), AINTransfer(), AINValueOps()]
Get the tools in the toolkit.
__init__
"""Create a new Memgraph graph wrapper instance.""" super().__init__(url, username, password, database=database)
def __init__(self, url: str, username: str, password: str, *, database: str ='memgraph') ->None: """Create a new Memgraph graph wrapper instance.""" super().__init__(url, username, password, database=database)
Create a new Memgraph graph wrapper instance.
random_string
return str(uuid.uuid4())
def random_string() ->str: return str(uuid.uuid4())
null
load_schema
""" Load the graph schema information. """ def _rdf_s_schema(classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow]) ->str: return f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The RDF graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in classes])} The RDF graph supports the following relationships: {', '.join([self._res_to_str(r, 'rel') for r in relationships])} """ if self.standard == 'rdf': clss = self.query(cls_query_rdf) rels = self.query(rel_query_rdf) self.schema = _rdf_s_schema(clss, rels) elif self.standard == 'rdfs': clss = self.query(cls_query_rdfs) rels = self.query(rel_query_rdfs) self.schema = _rdf_s_schema(clss, rels) elif self.standard == 'owl': clss = self.query(cls_query_owl) ops = self.query(op_query_owl) dps = self.query(dp_query_owl) self.schema = f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The OWL graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in clss])} The OWL graph supports the following object properties, i.e., relationships between objects: {', '.join([self._res_to_str(r, 'op') for r in ops])} The OWL graph supports the following data properties, i.e., relationships between objects and literals: {', '.join([self._res_to_str(r, 'dp') for r in dps])} """ else: raise ValueError(f"Mode '{self.standard}' is currently not supported.")
def load_schema(self) ->None: """ Load the graph schema information. """ def _rdf_s_schema(classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow]) ->str: return f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The RDF graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in classes])} The RDF graph supports the following relationships: {', '.join([self._res_to_str(r, 'rel') for r in relationships])} """ if self.standard == 'rdf': clss = self.query(cls_query_rdf) rels = self.query(rel_query_rdf) self.schema = _rdf_s_schema(clss, rels) elif self.standard == 'rdfs': clss = self.query(cls_query_rdfs) rels = self.query(rel_query_rdfs) self.schema = _rdf_s_schema(clss, rels) elif self.standard == 'owl': clss = self.query(cls_query_owl) ops = self.query(op_query_owl) dps = self.query(dp_query_owl) self.schema = f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The OWL graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in clss])} The OWL graph supports the following object properties, i.e., relationships between objects: {', '.join([self._res_to_str(r, 'op') for r in ops])} The OWL graph supports the following data properties, i.e., relationships between objects and literals: {', '.join([self._res_to_str(r, 'dp') for r in dps])} """ else: raise ValueError(f"Mode '{self.standard}' is currently not supported.")
Load the graph schema information.
_get_default_output_parser
return ConvoOutputParser(ai_prefix=ai_prefix)
@classmethod def _get_default_output_parser(cls, ai_prefix: str='AI', **kwargs: Any ) ->AgentOutputParser: return ConvoOutputParser(ai_prefix=ai_prefix)
null
__init__
"""Initialize ArceeWrapper. Arguments: arcee_api_key: API key for Arcee API. arcee_api_url: URL for Arcee API. arcee_api_version: Version of Arcee API. model_kwargs: Keyword arguments for Arcee API. model_name: Name of an Arcee model. """ if isinstance(arcee_api_key, str): arcee_api_key_ = SecretStr(arcee_api_key) else: arcee_api_key_ = arcee_api_key self.arcee_api_key: SecretStr = arcee_api_key_ self.model_kwargs = model_kwargs self.arcee_api_url = arcee_api_url self.arcee_api_version = arcee_api_version try: route = ArceeRoute.model_training_status.value.format(id_or_name=model_name ) response = self._make_request('get', route) self.model_id = response.get('model_id') self.model_training_status = response.get('status') except Exception as e: raise ValueError( f"Error while validating model training status for '{model_name}': {e}" ) from e
def __init__(self, arcee_api_key: Union[str, SecretStr], arcee_api_url: str, arcee_api_version: str, model_kwargs: Optional[Dict[str, Any]], model_name: str): """Initialize ArceeWrapper. Arguments: arcee_api_key: API key for Arcee API. arcee_api_url: URL for Arcee API. arcee_api_version: Version of Arcee API. model_kwargs: Keyword arguments for Arcee API. model_name: Name of an Arcee model. """ if isinstance(arcee_api_key, str): arcee_api_key_ = SecretStr(arcee_api_key) else: arcee_api_key_ = arcee_api_key self.arcee_api_key: SecretStr = arcee_api_key_ self.model_kwargs = model_kwargs self.arcee_api_url = arcee_api_url self.arcee_api_version = arcee_api_version try: route = ArceeRoute.model_training_status.value.format(id_or_name= model_name) response = self._make_request('get', route) self.model_id = response.get('model_id') self.model_training_status = response.get('status') except Exception as e: raise ValueError( f"Error while validating model training status for '{model_name}': {e}" ) from e
Initialize ArceeWrapper. Arguments: arcee_api_key: API key for Arcee API. arcee_api_url: URL for Arcee API. arcee_api_version: Version of Arcee API. model_kwargs: Keyword arguments for Arcee API. model_name: Name of an Arcee model.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, bulk_size=bulk_size, **kwargs)
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, bulk_size: int=500, **kwargs: Any ) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, bulk_size=bulk_size, **kwargs)
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. bulk_size: Bulk API request count; Default: 500 Returns: List of ids from adding the texts into the vectorstore. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text".
_model_default_parameters
return {'tokens': self.tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'temperature': self.temperature, 'repetition_penalty': self. repetition_penalty, 'length_penalty': self.length_penalty, 'beam_width': self.beam_width}
@property def _model_default_parameters(self) ->Dict[str, Any]: return {'tokens': self.tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'temperature': self.temperature, 'repetition_penalty': self. repetition_penalty, 'length_penalty': self.length_penalty, 'beam_width': self.beam_width}
null
_get_insights_on_topic
"""Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( """Statements relevant to: '{topic}' --- {related_statements} --- What 5 high-level novel insights can you infer from the above statements that are relevant for answering the following question? Do not include any insights that are not relevant to the question. Do not repeat any insights that have already been made. Question: {topic} (example format: insight (because of 1, 5, 3)) """ ) related_memories = self.fetch_memories(topic, now=now) related_statements = '\n'.join([self._format_memory_detail(memory, prefix= f'{i + 1}. ') for i, memory in enumerate(related_memories)]) result = self.chain(prompt).run(topic=topic, related_statements= related_statements) return self._parse_list(result)
def _get_insights_on_topic(self, topic: str, now: Optional[datetime]=None ) ->List[str]: """Generate 'insights' on a topic of reflection, based on pertinent memories.""" prompt = PromptTemplate.from_template( """Statements relevant to: '{topic}' --- {related_statements} --- What 5 high-level novel insights can you infer from the above statements that are relevant for answering the following question? Do not include any insights that are not relevant to the question. Do not repeat any insights that have already been made. Question: {topic} (example format: insight (because of 1, 5, 3)) """ ) related_memories = self.fetch_memories(topic, now=now) related_statements = '\n'.join([self._format_memory_detail(memory, prefix=f'{i + 1}. ') for i, memory in enumerate(related_memories)]) result = self.chain(prompt).run(topic=topic, related_statements= related_statements) return self._parse_list(result)
Generate 'insights' on a topic of reflection, based on pertinent memories.
_call
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict(question=inputs[self.input_key], stop=[ '```output'], callbacks=_run_manager.get_child()) return self._process_llm_result(llm_output, _run_manager)
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(inputs[self.input_key]) llm_output = self.llm_chain.predict(question=inputs[self.input_key], stop=['```output'], callbacks=_run_manager.get_child()) return self._process_llm_result(llm_output, _run_manager)
null
test_sql_database_run
"""Test that commands can be run successfully and returned in correct format.""" engine = create_engine('sqlite:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison', user_company='Foo' ) with engine.connect() as conn: conn.execute(stmt) db = SQLDatabase(engine) db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db) output = db_chain.run('What company does Harrison work at?') expected_output = ' Harrison works at Foo.' assert output == expected_output
def test_sql_database_run() ->None: """Test that commands can be run successfully and returned in correct format.""" engine = create_engine('sqlite:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison', user_company='Foo') with engine.connect() as conn: conn.execute(stmt) db = SQLDatabase(engine) db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db) output = db_chain.run('What company does Harrison work at?') expected_output = ' Harrison works at Foo.' assert output == expected_output
Test that commands can be run successfully and returned in correct format.
available_models
"""Map the available models that can be invoked.""" return self.client.available_models
@property def available_models(self) ->dict: """Map the available models that can be invoked.""" return self.client.available_models
Map the available models that can be invoked.
get_format_instructions
schema = self.pydantic_object.schema() reduced_schema = schema if 'title' in reduced_schema: del reduced_schema['title'] if 'type' in reduced_schema: del reduced_schema['type'] schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
def get_format_instructions(self) ->str: schema = self.pydantic_object.schema() reduced_schema = schema if 'title' in reduced_schema: del reduced_schema['title'] if 'type' in reduced_schema: del reduced_schema['type'] schema_str = json.dumps(reduced_schema) return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
null
on_text
""" Run when agent is ending. """ self.metrics['step'] += 1 self.metrics['text_ctr'] += 1 text_ctr = self.metrics['text_ctr'] resp: Dict[str, Any] = {} resp.update({'action': 'on_text', 'text': text}) resp.update(self.metrics) self.jsonf(resp, self.temp_dir, f'on_text_{text_ctr}')
def on_text(self, text: str, **kwargs: Any) ->None: """ Run when agent is ending. """ self.metrics['step'] += 1 self.metrics['text_ctr'] += 1 text_ctr = self.metrics['text_ctr'] resp: Dict[str, Any] = {} resp.update({'action': 'on_text', 'text': text}) resp.update(self.metrics) self.jsonf(resp, self.temp_dir, f'on_text_{text_ctr}')
Run when agent is ending.
_prune_old_thought_containers
"""If we have too many thoughts onscreen, move older thoughts to the 'history container.' """ while self._num_thought_containers > self._max_thought_containers and len(self ._completed_thoughts) > 0: if self._history_container is None and self._max_thought_containers > 1: self._history_container = MutableExpander(self._history_parent, label=self._thought_labeler.get_history_label(), expanded=False) oldest_thought = self._completed_thoughts.pop(0) if self._history_container is not None: self._history_container.markdown(oldest_thought.container.label) self._history_container.append_copy(oldest_thought.container) oldest_thought.clear()
def _prune_old_thought_containers(self) ->None: """If we have too many thoughts onscreen, move older thoughts to the 'history container.' """ while self._num_thought_containers > self._max_thought_containers and len( self._completed_thoughts) > 0: if (self._history_container is None and self. _max_thought_containers > 1): self._history_container = MutableExpander(self._history_parent, label=self._thought_labeler.get_history_label(), expanded=False ) oldest_thought = self._completed_thoughts.pop(0) if self._history_container is not None: self._history_container.markdown(oldest_thought.container.label) self._history_container.append_copy(oldest_thought.container) oldest_thought.clear()
If we have too many thoughts onscreen, move older thoughts to the 'history container.'
__init__
"""Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path)
def __init__(self, zip_path: str, workspace_url: Optional[str]=None): """Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None. """ self.zip_path = Path(zip_path) self.workspace_url = workspace_url self.channel_id_map = self._get_channel_id_map(self.zip_path)
Initialize the SlackDirectoryLoader. Args: zip_path (str): The path to the Slack directory dump zip file. workspace_url (Optional[str]): The Slack workspace URL. Including the URL will turn sources into links. Defaults to None.
on_retriever_start
self.on_retriever_start_common()
def on_retriever_start(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_start_common()
null
add_graph_documents
""" Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: for node in document.nodes: self.query( f"MERGE (n:{node.type} {{id:'{node.id}'}}) SET n += $properties RETURN distinct 'done' AS result" , {'properties': node.properties}) for rel in document.relationships: self.query( f"MATCH (a:{rel.source.type} {{id:'{rel.source.id}'}}), (b:{rel.target.type} {{id:'{rel.target.id}'}}) MERGE (a)-[r:{rel.type.replace(' ', '_').upper()}]->(b) SET r += $properties RETURN distinct 'done' AS result" , {'properties': rel.properties})
def add_graph_documents(self, graph_documents: List[GraphDocument], include_source: bool=False) ->None: """ Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: for node in document.nodes: self.query( f"MERGE (n:{node.type} {{id:'{node.id}'}}) SET n += $properties RETURN distinct 'done' AS result" , {'properties': node.properties}) for rel in document.relationships: self.query( f"MATCH (a:{rel.source.type} {{id:'{rel.source.id}'}}), (b:{rel.target.type} {{id:'{rel.target.id}'}}) MERGE (a)-[r:{rel.type.replace(' ', '_').upper()}]->(b) SET r += $properties RETURN distinct 'done' AS result" , {'properties': rel.properties})
Take GraphDocument as input as uses it to construct a graph.
test_cosine_similarity_empty
empty_list: List[List[float]] = [] assert len(cosine_similarity(empty_list, empty_list)) == 0 assert len(cosine_similarity(empty_list, np.random.random((3, 3)))) == 0
def test_cosine_similarity_empty() ->None: empty_list: List[List[float]] = [] assert len(cosine_similarity(empty_list, empty_list)) == 0 assert len(cosine_similarity(empty_list, np.random.random((3, 3)))) == 0
null
create_task
""" Creates a new task. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} list_id = self.list_id url = f'{DEFAULT_URL}/list/{list_id}/task' params = {'custom_task_ids': 'true', 'team_id': self.team_id} payload = extract_dict_elements_from_component_fields(query_dict, Task) headers = self.get_headers() response = requests.post(url, json=payload, headers=headers, params=params) data: Dict = response.json() return parse_dict_through_component(data, Task, fault_tolerant=True)
def create_task(self, query: str) ->Dict: """ Creates a new task. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} list_id = self.list_id url = f'{DEFAULT_URL}/list/{list_id}/task' params = {'custom_task_ids': 'true', 'team_id': self.team_id} payload = extract_dict_elements_from_component_fields(query_dict, Task) headers = self.get_headers() response = requests.post(url, json=payload, headers=headers, params=params) data: Dict = response.json() return parse_dict_through_component(data, Task, fault_tolerant=True)
Creates a new task.
get_default_document_variable_name
"""Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['llm_chain'].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
@root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) ->Dict: """Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['llm_chain'].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
Get default document variable name, if not provided.