method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
generate_filter_query
if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_key, md_value in search_filter.items()]) return filter_clause
def generate_filter_query() ->str: if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_key, md_value in search_filter.items()]) return filter_clause
null
__init__
super().__init__(*args, **kwargs) self.allowed_comparators = allowed_comparators self.allowed_operators = allowed_operators self.allowed_attributes = allowed_attributes
def __init__(self, *args: Any, allowed_comparators: Optional[Sequence[ Comparator]]=None, allowed_operators: Optional[Sequence[Operator]]=None, allowed_attributes: Optional[Sequence[str]]=None, **kwargs: Any): super().__init__(*args, **kwargs) self.allowed_comparators = allowed_comparators self.allowed_operators = allowed_operators self.allowed_attributes = allowed_attributes
null
_is_interactive_env
"""Determine if running within IPython or Jupyter.""" import sys return hasattr(sys, 'ps2')
def _is_interactive_env() ->bool: """Determine if running within IPython or Jupyter.""" import sys return hasattr(sys, 'ps2')
Determine if running within IPython or Jupyter.
raise_deprecation
warnings.warn( '`ChatVectorDBChain` is deprecated - please use `from langchain.chains import ConversationalRetrievalChain`' ) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: warnings.warn( '`ChatVectorDBChain` is deprecated - please use `from langchain.chains import ConversationalRetrievalChain`' ) return values
null
load
if self.page_type == 'Device': return self.load_device() elif self.page_type == 'Guide' or self.page_type == 'Teardown': return self.load_guide() elif self.page_type == 'Answers': return self.load_questions_and_answers() else: raise ValueError('Unknown page type: ' + self.page_type)
def load(self) ->List[Document]: if self.page_type == 'Device': return self.load_device() elif self.page_type == 'Guide' or self.page_type == 'Teardown': return self.load_guide() elif self.page_type == 'Answers': return self.load_questions_and_answers() else: raise ValueError('Unknown page type: ' + self.page_type)
null
test_load_twice
""" Test that loading documents twice from the same repository does not raise an error. """ clone_url = init_repo(tmpdir, 'remote_repo') repo_path = tmpdir.mkdir('local_repo').strpath loader = GitLoader(repo_path=repo_path, clone_url=clone_url) documents = loader.load() assert len(documents) == 1 documents = loader.load() assert len(documents) == 1
@pytest.mark.requires('git') def test_load_twice(tmpdir: py.path.local) ->None: """ Test that loading documents twice from the same repository does not raise an error. """ clone_url = init_repo(tmpdir, 'remote_repo') repo_path = tmpdir.mkdir('local_repo').strpath loader = GitLoader(repo_path=repo_path, clone_url=clone_url) documents = loader.load() assert len(documents) == 1 documents = loader.load() assert len(documents) == 1
Test that loading documents twice from the same repository does not raise an error.
convert_messages_to_prompt_anthropic
"""Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. human_prompt (str, optional): Human prompt tag. Defaults to " Human:". ai_prompt (str, optional): AI prompt tag. Defaults to " Assistant:". Returns: str: Combined string with necessary human_prompt and ai_prompt tags. """ messages = messages.copy() if not isinstance(messages[-1], AIMessage): messages.append(AIMessage(content='')) text = ''.join(_convert_one_message_to_text(message, human_prompt, ai_prompt) for message in messages) return text.rstrip()
def convert_messages_to_prompt_anthropic(messages: List[BaseMessage], *, human_prompt: str='\n\nHuman:', ai_prompt: str='\n\nAssistant:') ->str: """Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. human_prompt (str, optional): Human prompt tag. Defaults to " Human:". ai_prompt (str, optional): AI prompt tag. Defaults to " Assistant:". Returns: str: Combined string with necessary human_prompt and ai_prompt tags. """ messages = messages.copy() if not isinstance(messages[-1], AIMessage): messages.append(AIMessage(content='')) text = ''.join(_convert_one_message_to_text(message, human_prompt, ai_prompt) for message in messages) return text.rstrip()
Format a list of messages into a full prompt for the Anthropic model Args: messages (List[BaseMessage]): List of BaseMessage to combine. human_prompt (str, optional): Human prompt tag. Defaults to " Human:". ai_prompt (str, optional): AI prompt tag. Defaults to " Assistant:". Returns: str: Combined string with necessary human_prompt and ai_prompt tags.
embeddings
return self._embedding_function
@property def embeddings(self) ->Embeddings: return self._embedding_function
null
llm_with_tools
return RunnableLambda(lambda x: x['input']) | ChatOpenAI(temperature=0).bind( functions=input['functions'])
def llm_with_tools(input: Dict) ->Runnable: return RunnableLambda(lambda x: x['input']) | ChatOpenAI(temperature=0 ).bind(functions=input['functions'])
null
_get_relevant_documents
ret: List[Dict[str, Any]] = self.sql_db_chain(query, callbacks=run_manager. get_child(), **kwargs)['result'] return [Document(page_content=r[self.page_content_key], metadata=r) for r in ret]
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: ret: List[Dict[str, Any]] = self.sql_db_chain(query, callbacks= run_manager.get_child(), **kwargs)['result'] return [Document(page_content=r[self.page_content_key], metadata=r) for r in ret]
null
test_init_fail_columns_not_in_schema
index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, columns=['some_random_column']) assert "column 'some_random_column' is not in the index's schema." in str(ex .value)
@pytest.mark.requires('databricks', 'databricks.vector_search') @pytest.mark.parametrize('index_details', [DIRECT_ACCESS_INDEX]) def test_init_fail_columns_not_in_schema(index_details: dict) ->None: index = mock_index(index_details) with pytest.raises(ValueError) as ex: DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN, columns=['some_random_column']) assert "column 'some_random_column' is not in the index's schema." in str( ex.value)
null
test_slack_chat_loader
chat_path = pathlib.Path(__file__).parents[2] / 'examples' / 'slack_export.zip' loader = slack.SlackChatLoader(str(chat_path)) chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender= 'U0500003428')) assert chat_sessions, 'Chat sessions should not be empty' assert chat_sessions[1]['messages'], 'Chat messages should not be empty' assert 'Example message' in chat_sessions[1]['messages'][0 ].content, 'Chat content mismatch'
def test_slack_chat_loader() ->None: chat_path = pathlib.Path(__file__).parents[2 ] / 'examples' / 'slack_export.zip' loader = slack.SlackChatLoader(str(chat_path)) chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender= 'U0500003428')) assert chat_sessions, 'Chat sessions should not be empty' assert chat_sessions[1]['messages'], 'Chat messages should not be empty' assert 'Example message' in chat_sessions[1]['messages'][0 ].content, 'Chat content mismatch'
null
test_sequential_internal_chain_use_memory
"""Test sequential usage with memory for one of the internal chains.""" memory = ConversationBufferMemory(memory_key='bla') memory.save_context({'input': 'yo'}, {'output': 'ya'}) chain_1 = FakeChain(input_variables=['foo', 'bla'], output_variables=['bar' ], memory=memory) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo']) output = chain({'foo': '123'}) print('HEYYY OUTPUT', output) expected_output = {'foo': '123', 'baz': """123 Human: yo AI: yafoofoo"""} assert output == expected_output
def test_sequential_internal_chain_use_memory() ->None: """Test sequential usage with memory for one of the internal chains.""" memory = ConversationBufferMemory(memory_key='bla') memory.save_context({'input': 'yo'}, {'output': 'ya'}) chain_1 = FakeChain(input_variables=['foo', 'bla'], output_variables=[ 'bar'], memory=memory) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo']) output = chain({'foo': '123'}) print('HEYYY OUTPUT', output) expected_output = {'foo': '123', 'baz': '123 Human: yo\nAI: yafoofoo'} assert output == expected_output
Test sequential usage with memory for one of the internal chains.
test_konko_available_model_test
"""Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(max_tokens=10, n=2) res = chat_instance.get_available_models() assert isinstance(res, set)
def test_konko_available_model_test() ->None: """Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(max_tokens=10, n=2) res = chat_instance.get_available_models() assert isinstance(res, set)
Check how ChatKonko manages model_name.
get_batch_prompts
"""Get the sub prompts for llm call.""" sub_prompts = [prompts[i:i + self.batch_size] for i in range(0, len(prompts ), self.batch_size)] return sub_prompts
def get_batch_prompts(self, prompts: List[str]) ->List[List[str]]: """Get the sub prompts for llm call.""" sub_prompts = [prompts[i:i + self.batch_size] for i in range(0, len( prompts), self.batch_size)] return sub_prompts
Get the sub prompts for llm call.
get_agent_trajectory
"""Get the agent trajectory as a formatted string. Args: steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. Returns: str: The formatted agent trajectory. """ if isinstance(steps, str): return steps return '\n\n'.join([ f"""Step {i}: Tool used: {action.tool} Tool input: {action.tool_input} Tool output: {output}""" for i, (action, output) in enumerate(steps, 1)])
@staticmethod def get_agent_trajectory(steps: Union[str, Sequence[Tuple[AgentAction, str]]] ) ->str: """Get the agent trajectory as a formatted string. Args: steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. Returns: str: The formatted agent trajectory. """ if isinstance(steps, str): return steps return '\n\n'.join([ f"""Step {i}: Tool used: {action.tool} Tool input: {action.tool_input} Tool output: {output}""" for i, (action, output) in enumerate(steps, 1)])
Get the agent trajectory as a formatted string. Args: steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory. Returns: str: The formatted agent trajectory.
_import_octoai_endpoint
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint
def _import_octoai_endpoint() ->Any: from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint
null
_list_handling
for list_item in subsection_list: if isinstance(list_item, dict): self._parse_json_multilevel(list_item, formatted_list, level) elif isinstance(list_item, list): self._list_handling(list_item, formatted_list, level + 1) else: formatted_list.append(f"{' ' * level}{list_item}")
def _list_handling(self, subsection_list: list, formatted_list: list, level: int) ->None: for list_item in subsection_list: if isinstance(list_item, dict): self._parse_json_multilevel(list_item, formatted_list, level) elif isinstance(list_item, list): self._list_handling(list_item, formatted_list, level + 1) else: formatted_list.append(f"{' ' * level}{list_item}")
null
metric
""" Get the distance metric function. Returns: Callable: The distance metric function. """ return _RapidFuzzChainMixin._get_metric(self.distance, normalize_score=self .normalize_score)
@property def metric(self) ->Callable: """ Get the distance metric function. Returns: Callable: The distance metric function. """ return _RapidFuzzChainMixin._get_metric(self.distance, normalize_score= self.normalize_score)
Get the distance metric function. Returns: Callable: The distance metric function.
test_tongyi_generate_stream
"""Test valid call to tongyi.""" llm = Tongyi(streaming=True) output = llm.generate(['who are you']) print(output) assert isinstance(output, LLMResult) assert isinstance(output.generations, list)
def test_tongyi_generate_stream() ->None: """Test valid call to tongyi.""" llm = Tongyi(streaming=True) output = llm.generate(['who are you']) print(output) assert isinstance(output, LLMResult) assert isinstance(output.generations, list)
Test valid call to tongyi.
test_metadata_and_source
"""Test metadata and source""" blob = Blob(path='some_file', data='b') assert blob.source == 'some_file' assert blob.metadata == {} blob = Blob(data=b'', metadata={'source': 'hello'}) assert blob.source == 'hello' assert blob.metadata == {'source': 'hello'} blob = Blob.from_data('data', metadata={'source': 'somewhere'}) assert blob.source == 'somewhere' with get_temp_file(b'hello') as path: blob = Blob.from_path(path, metadata={'source': 'somewhere'}) assert blob.source == 'somewhere'
def test_metadata_and_source() ->None: """Test metadata and source""" blob = Blob(path='some_file', data='b') assert blob.source == 'some_file' assert blob.metadata == {} blob = Blob(data=b'', metadata={'source': 'hello'}) assert blob.source == 'hello' assert blob.metadata == {'source': 'hello'} blob = Blob.from_data('data', metadata={'source': 'somewhere'}) assert blob.source == 'somewhere' with get_temp_file(b'hello') as path: blob = Blob.from_path(path, metadata={'source': 'somewhere'}) assert blob.source == 'somewhere'
Test metadata and source
test_check_instances
"""Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( 'This is John Smith. John Smith works in a bakery.John Smith is a good guy' ) anonymizer = PresidioReversibleAnonymizer(['PERSON'], faker_seed=42) anonymized_text = anonymizer.anonymize(text) persons = list(anonymizer.deanonymizer_mapping['PERSON'].keys()) assert len(persons) == 1 anonymized_name = persons[0] assert anonymized_text.count(anonymized_name) == 3 anonymized_text = anonymizer.anonymize(text) assert anonymized_text.count(anonymized_name) == 3 assert anonymizer.deanonymizer_mapping['PERSON'][anonymized_name ] == 'John Smith' text = 'This is Jane Smith' anonymized_text = anonymizer.anonymize(text) persons = list(anonymizer.deanonymizer_mapping['PERSON'].keys()) assert len(persons) == 2
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_check_instances() ->None: """Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( 'This is John Smith. John Smith works in a bakery.John Smith is a good guy' ) anonymizer = PresidioReversibleAnonymizer(['PERSON'], faker_seed=42) anonymized_text = anonymizer.anonymize(text) persons = list(anonymizer.deanonymizer_mapping['PERSON'].keys()) assert len(persons) == 1 anonymized_name = persons[0] assert anonymized_text.count(anonymized_name) == 3 anonymized_text = anonymizer.anonymize(text) assert anonymized_text.count(anonymized_name) == 3 assert anonymizer.deanonymizer_mapping['PERSON'][anonymized_name ] == 'John Smith' text = 'This is Jane Smith' anonymized_text = anonymizer.anonymize(text) persons = list(anonymizer.deanonymizer_mapping['PERSON'].keys()) assert len(persons) == 2
Test anonymizing multiple items in a sentence
_combine_llm_outputs
overall_token_usage: dict = {} system_fingerprint = None for output in llm_outputs: if output is None: continue token_usage = output['token_usage'] if token_usage is not None: for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v if system_fingerprint is None: system_fingerprint = output.get('system_fingerprint') combined = {'token_usage': overall_token_usage, 'model_name': self.model_name} if system_fingerprint: combined['system_fingerprint'] = system_fingerprint return combined
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) ->dict: overall_token_usage: dict = {} system_fingerprint = None for output in llm_outputs: if output is None: continue token_usage = output['token_usage'] if token_usage is not None: for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v if system_fingerprint is None: system_fingerprint = output.get('system_fingerprint') combined = {'token_usage': overall_token_usage, 'model_name': self. model_name} if system_fingerprint: combined['system_fingerprint'] = system_fingerprint return combined
null
test_criteria_eval_chain_missing_reference
chain = LabeledCriteriaEvalChain.from_llm(llm=FakeLLM(queries={'text': """The meaning of life Y"""}, sequential_responses=True), criteria={ 'my criterion': 'my criterion description'}) with pytest.raises(ValueError): chain.evaluate_strings(prediction='my prediction', input='my input')
def test_criteria_eval_chain_missing_reference() ->None: chain = LabeledCriteriaEvalChain.from_llm(llm=FakeLLM(queries={'text': 'The meaning of life\nY'}, sequential_responses=True), criteria={ 'my criterion': 'my criterion description'}) with pytest.raises(ValueError): chain.evaluate_strings(prediction='my prediction', input='my input')
null
test_faiss_mmr
texts = ['foo', 'foo', 'fou', 'foy'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec , k=10, lambda_mult=0.1) assert len(output) == len(texts) assert output[0][0] == Document(page_content='foo') assert output[0][1] == 0.0 assert output[1][0] != Document(page_content='foo')
@pytest.mark.requires('faiss') def test_faiss_mmr() ->None: texts = ['foo', 'foo', 'fou', 'foy'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) query_vec = FakeEmbeddings().embed_query(text='foo') output = docsearch.max_marginal_relevance_search_with_score_by_vector( query_vec, k=10, lambda_mult=0.1) assert len(output) == len(texts) assert output[0][0] == Document(page_content='foo') assert output[0][1] == 0.0 assert output[1][0] != Document(page_content='foo')
null
results
"""Run query through Tavily Search and return metadata. Args: query: The query to search for. max_results: The maximum number of results to return. search_depth: The depth of the search. Can be "basic" or "advanced". include_domains: A list of domains to include in the search. exclude_domains: A list of domains to exclude from the search. include_answer: Whether to include the answer in the results. include_raw_content: Whether to include the raw content in the results. include_images: Whether to include images in the results. Returns: query: The query that was searched for. follow_up_questions: A list of follow up questions. response_time: The response time of the query. answer: The answer to the query. images: A list of images. results: A list of dictionaries containing the results: title: The title of the result. url: The url of the result. content: The content of the result. score: The score of the result. raw_content: The raw content of the result. """ raw_search_results = self.raw_results(query, max_results=max_results, search_depth=search_depth, include_domains=include_domains, exclude_domains=exclude_domains, include_answer=include_answer, include_raw_content=include_raw_content, include_images=include_images) return self.clean_results(raw_search_results['results'])
def results(self, query: str, max_results: Optional[int]=5, search_depth: Optional[str]='advanced', include_domains: Optional[List[str]]=[], exclude_domains: Optional[List[str]]=[], include_answer: Optional[bool] =False, include_raw_content: Optional[bool]=False, include_images: Optional[bool]=False) ->List[Dict]: """Run query through Tavily Search and return metadata. Args: query: The query to search for. max_results: The maximum number of results to return. search_depth: The depth of the search. Can be "basic" or "advanced". include_domains: A list of domains to include in the search. exclude_domains: A list of domains to exclude from the search. include_answer: Whether to include the answer in the results. include_raw_content: Whether to include the raw content in the results. include_images: Whether to include images in the results. Returns: query: The query that was searched for. follow_up_questions: A list of follow up questions. response_time: The response time of the query. answer: The answer to the query. images: A list of images. results: A list of dictionaries containing the results: title: The title of the result. url: The url of the result. content: The content of the result. score: The score of the result. raw_content: The raw content of the result. """ raw_search_results = self.raw_results(query, max_results=max_results, search_depth=search_depth, include_domains=include_domains, exclude_domains=exclude_domains, include_answer=include_answer, include_raw_content=include_raw_content, include_images=include_images) return self.clean_results(raw_search_results['results'])
Run query through Tavily Search and return metadata. Args: query: The query to search for. max_results: The maximum number of results to return. search_depth: The depth of the search. Can be "basic" or "advanced". include_domains: A list of domains to include in the search. exclude_domains: A list of domains to exclude from the search. include_answer: Whether to include the answer in the results. include_raw_content: Whether to include the raw content in the results. include_images: Whether to include images in the results. Returns: query: The query that was searched for. follow_up_questions: A list of follow up questions. response_time: The response time of the query. answer: The answer to the query. images: A list of images. results: A list of dictionaries containing the results: title: The title of the result. url: The url of the result. content: The content of the result. score: The score of the result. raw_content: The raw content of the result.
from_llm
"""Initialize the PairwiseStringEvalChain from an LLM. Args: llm (BaseChatModel): The LLM to use (GPT-4 recommended). prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ if not (isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) and llm.model_name. startswith('gpt-4')): logger.warning( 'This chain was only tested with GPT-4. Performance may be significantly worse with other models.' ) expected_input_vars = {'prediction', 'prediction_b', 'input', 'criteria'} prompt_ = prompt or COMPARISON_TEMPLATE.partial(reference='') if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = '\n'.join(f'{k}: {v}' if v else k for k, v in criteria_.items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else '' return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, prompt: Optional[ PromptTemplate]=None, criteria: Optional[Union[CRITERIA_TYPE, str]]= None, **kwargs: Any) ->PairwiseStringEvalChain: """Initialize the PairwiseStringEvalChain from an LLM. Args: llm (BaseChatModel): The LLM to use (GPT-4 recommended). prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected. """ if not (isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) and llm. model_name.startswith('gpt-4')): logger.warning( 'This chain was only tested with GPT-4. Performance may be significantly worse with other models.' ) expected_input_vars = {'prediction', 'prediction_b', 'input', 'criteria'} prompt_ = prompt or COMPARISON_TEMPLATE.partial(reference='') if expected_input_vars != set(prompt_.input_variables): raise ValueError( f'Input variables should be {expected_input_vars}, but got {prompt_.input_variables}' ) criteria_ = resolve_pairwise_criteria(criteria) criteria_str = '\n'.join(f'{k}: {v}' if v else k for k, v in criteria_. items()) criteria_str = CRITERIA_INSTRUCTIONS + criteria_str if criteria_str else '' return cls(llm=llm, prompt=prompt_.partial(criteria=criteria_str), **kwargs )
Initialize the PairwiseStringEvalChain from an LLM. Args: llm (BaseChatModel): The LLM to use (GPT-4 recommended). prompt (PromptTemplate, optional): The prompt to use. **kwargs (Any): Additional keyword arguments. Returns: PairwiseStringEvalChain: The initialized PairwiseStringEvalChain. Raises: ValueError: If the input variables are not as expected.
generate_prompt_string
"""Generate a prompt string. Returns: str: The generated prompt string. """ formatted_response_format = json.dumps(self.response_format, indent=4) prompt_string = f"""Constraints: {self._generate_numbered_list(self.constraints)} Commands: {self._generate_numbered_list(self.commands, item_type='command')} Resources: {self._generate_numbered_list(self.resources)} Performance Evaluation: {self._generate_numbered_list(self.performance_evaluation)} You should only respond in JSON format as described below Response Format: {formatted_response_format} Ensure the response can be parsed by Python json.loads""" return prompt_string
def generate_prompt_string(self) ->str: """Generate a prompt string. Returns: str: The generated prompt string. """ formatted_response_format = json.dumps(self.response_format, indent=4) prompt_string = f"""Constraints: {self._generate_numbered_list(self.constraints)} Commands: {self._generate_numbered_list(self.commands, item_type='command')} Resources: {self._generate_numbered_list(self.resources)} Performance Evaluation: {self._generate_numbered_list(self.performance_evaluation)} You should only respond in JSON format as described below Response Format: {formatted_response_format} Ensure the response can be parsed by Python json.loads""" return prompt_string
Generate a prompt string. Returns: str: The generated prompt string.
_get_elements
from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self.unstructured_kwargs) elif satisfies_min_unstructured_version('0.5.8') and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self.unstructured_kwargs) else: raise ValueError( f'Filetype {filetype} is not supported in UnstructuredEmailLoader.')
def _get_elements(self) ->List: from unstructured.file_utils.filetype import FileType, detect_filetype filetype = detect_filetype(self.file_path) if filetype == FileType.EML: from unstructured.partition.email import partition_email return partition_email(filename=self.file_path, **self. unstructured_kwargs) elif satisfies_min_unstructured_version('0.5.8' ) and filetype == FileType.MSG: from unstructured.partition.msg import partition_msg return partition_msg(filename=self.file_path, **self. unstructured_kwargs) else: raise ValueError( f'Filetype {filetype} is not supported in UnstructuredEmailLoader.' )
null
full_table_name
return f'{self.table_name}_{self.session_id}'
@property def full_table_name(self) ->str: return f'{self.table_name}_{self.session_id}'
null
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k, search_k) return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, search_k: int=-1, **kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k, search_k) return [doc for doc, _ in docs_and_scores]
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding.
__init__
"""Create a new Neo4j graph wrapper instance.""" try: import neo4j except ImportError: raise ValueError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema: str = '' self.structured_schema: Dict[str, Any] = {} try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) try: self.refresh_schema() except neo4j.exceptions.ClientError: raise ValueError( "Could not use APOC procedures. Please ensure the APOC plugin is installed in Neo4j and that 'apoc.meta.data()' is allowed in Neo4j configuration " )
def __init__(self, url: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, database: str='neo4j') ->None: """Create a new Neo4j graph wrapper instance.""" try: import neo4j except ImportError: raise ValueError( 'Could not import neo4j python package. Please install it with `pip install neo4j`.' ) url = get_from_env('url', 'NEO4J_URI', url) username = get_from_env('username', 'NEO4J_USERNAME', username) password = get_from_env('password', 'NEO4J_PASSWORD', password) database = get_from_env('database', 'NEO4J_DATABASE', database) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database self.schema: str = '' self.structured_schema: Dict[str, Any] = {} try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the url is correct' ) except neo4j.exceptions.AuthError: raise ValueError( 'Could not connect to Neo4j database. Please ensure that the username and password are correct' ) try: self.refresh_schema() except neo4j.exceptions.ClientError: raise ValueError( "Could not use APOC procedures. Please ensure the APOC plugin is installed in Neo4j and that 'apoc.meta.data()' is allowed in Neo4j configuration " )
Create a new Neo4j graph wrapper instance.
tearDown
shutil.rmtree(self.tmpdir, ignore_errors=True)
def tearDown(self) ->None: shutil.rmtree(self.tmpdir, ignore_errors=True)
null
_construct_documents_from_results_without_score
"""Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Document] = [] for res in results['hits']: if self.page_content_builder is None: text = res['text'] else: text = self.page_content_builder(res) metadata = json.loads(res.get('metadata', '{}')) documents.append(Document(page_content=text, metadata=metadata)) return documents
def _construct_documents_from_results_without_score(self, results: Dict[str, List[Dict[str, str]]]) ->List[Document]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Document] = [] for res in results['hits']: if self.page_content_builder is None: text = res['text'] else: text = self.page_content_builder(res) metadata = json.loads(res.get('metadata', '{}')) documents.append(Document(page_content=text, metadata=metadata)) return documents
Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true.
test__split_list_single_doc
"""Test splitting works with just a single doc.""" docs = [Document(page_content='foo')] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 100) assert doc_list == [docs]
def test__split_list_single_doc() ->None: """Test splitting works with just a single doc.""" docs = [Document(page_content='foo')] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 100) assert doc_list == [docs]
Test splitting works with just a single doc.
_llm_type
"""Return type of llm.""" return 'textgen'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'textgen'
Return type of llm.
add_example
"""Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example))
def add_example(self, example: Dict[str, str]) ->None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example))
Add new example to list.
test_language_loader_for_python_with_parser_threshold
"""Test Python loader with parser enabled and below threshold.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = GenericLoader.from_filesystem(file_path, glob='hello_world.py', parser=LanguageParser(language='python', parser_threshold=1000)) docs = loader.load() assert len(docs) == 1
def test_language_loader_for_python_with_parser_threshold() ->None: """Test Python loader with parser enabled and below threshold.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = GenericLoader.from_filesystem(file_path, glob='hello_world.py', parser=LanguageParser(language='python', parser_threshold=1000)) docs = loader.load() assert len(docs) == 1
Test Python loader with parser enabled and below threshold.
get_documents_array_uri_from_group
"""Get the URI of the documents array from group. Args: group: TileDB group object. Returns: URI of the documents array. """ return group[DOCUMENTS_ARRAY_NAME].uri
def get_documents_array_uri_from_group(group: Any) ->str: """Get the URI of the documents array from group. Args: group: TileDB group object. Returns: URI of the documents array. """ return group[DOCUMENTS_ARRAY_NAME].uri
Get the URI of the documents array from group. Args: group: TileDB group object. Returns: URI of the documents array.
_page_to_document
main_meta = {'title': page_title, 'summary': wiki_page.summary, 'source': wiki_page.url} add_meta = {'categories': wiki_page.categories, 'page_url': wiki_page.url, 'image_urls': wiki_page.images, 'related_titles': wiki_page.links, 'parent_id': wiki_page.parent_id, 'references': wiki_page.references, 'revision_id': wiki_page.revision_id, 'sections': wiki_page.sections } if self.load_all_available_meta else {} doc = Document(page_content=wiki_page.content[:self.doc_content_chars_max], metadata={**main_meta, **add_meta}) return doc
def _page_to_document(self, page_title: str, wiki_page: Any) ->Document: main_meta = {'title': page_title, 'summary': wiki_page.summary, 'source': wiki_page.url} add_meta = {'categories': wiki_page.categories, 'page_url': wiki_page. url, 'image_urls': wiki_page.images, 'related_titles': wiki_page. links, 'parent_id': wiki_page.parent_id, 'references': wiki_page. references, 'revision_id': wiki_page.revision_id, 'sections': wiki_page.sections} if self.load_all_available_meta else {} doc = Document(page_content=wiki_page.content[:self. doc_content_chars_max], metadata={**main_meta, **add_meta}) return doc
null
from_texts
"""Create alibaba cloud opensearch vector store instance. Args: texts: The text segments to be inserted into the vector storage, should not be empty. embedding: Embedding function, Embedding function. config: Alibaba OpenSearch instance configuration. metadatas: Metadata information. Returns: AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance. """ if texts is None or len(texts) == 0: raise Exception('the inserted text segments, should not be empty.') if embedding is None: raise Exception('the embeddings should not be empty.') if config is None: raise Exception('config should not be none.') ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts=texts, metadatas=metadatas) return ctx
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, config: Optional[ AlibabaCloudOpenSearchSettings]=None, **kwargs: Any ) ->'AlibabaCloudOpenSearch': """Create alibaba cloud opensearch vector store instance. Args: texts: The text segments to be inserted into the vector storage, should not be empty. embedding: Embedding function, Embedding function. config: Alibaba OpenSearch instance configuration. metadatas: Metadata information. Returns: AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance. """ if texts is None or len(texts) == 0: raise Exception('the inserted text segments, should not be empty.') if embedding is None: raise Exception('the embeddings should not be empty.') if config is None: raise Exception('config should not be none.') ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts=texts, metadatas=metadatas) return ctx
Create alibaba cloud opensearch vector store instance. Args: texts: The text segments to be inserted into the vector storage, should not be empty. embedding: Embedding function, Embedding function. config: Alibaba OpenSearch instance configuration. metadatas: Metadata information. Returns: AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance.
test_prompttemplate_prefix_suffix
"""Test that few shot works when prefix and suffix are PromptTemplates.""" prefix = PromptTemplate(input_variables=['content'], template= 'This is a test about {content}.') suffix = PromptTemplate(input_variables=['new_content'], template= 'Now you try to talk about {new_content}.') examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz', 'answer': 'foo'}] prompt = FewShotPromptWithTemplates(suffix=suffix, prefix=prefix, input_variables=['content', 'new_content'], examples=examples, example_prompt=EXAMPLE_PROMPT, example_separator='\n') output = prompt.format(content='animals', new_content='party') expected_output = """This is a test about animals. foo: bar baz: foo Now you try to talk about party.""" assert output == expected_output
def test_prompttemplate_prefix_suffix() ->None: """Test that few shot works when prefix and suffix are PromptTemplates.""" prefix = PromptTemplate(input_variables=['content'], template= 'This is a test about {content}.') suffix = PromptTemplate(input_variables=['new_content'], template= 'Now you try to talk about {new_content}.') examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz', 'answer': 'foo'}] prompt = FewShotPromptWithTemplates(suffix=suffix, prefix=prefix, input_variables=['content', 'new_content'], examples=examples, example_prompt=EXAMPLE_PROMPT, example_separator='\n') output = prompt.format(content='animals', new_content='party') expected_output = """This is a test about animals. foo: bar baz: foo Now you try to talk about party.""" assert output == expected_output
Test that few shot works when prefix and suffix are PromptTemplates.
_generate_payload
"""Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload['instruction'] = self.instruction return payload
def _generate_payload(self, texts: List[str]) ->EmbaasEmbeddingsPayload: """Generates payload for the API request.""" payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) if self.instruction: payload['instruction'] = self.instruction return payload
Generates payload for the API request.
_generate
generations = [] if self.streaming: generation: Optional[ChatGenerationChunk] = None for chunk in self._stream(messages, stop=stop, run_manager=run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append(self._chunk_to_generation(generation)) else: params: Dict[str, Any] = self._invocation_params(messages=messages, stop=stop, **kwargs) resp = self.completion_with_retry(**params) generations.append(ChatGeneration(**self. _chat_generation_from_qwen_resp(resp))) return ChatResult(generations=generations, llm_output={'model_name': self. model_name})
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: generations = [] if self.streaming: generation: Optional[ChatGenerationChunk] = None for chunk in self._stream(messages, stop=stop, run_manager= run_manager, **kwargs): if generation is None: generation = chunk else: generation += chunk assert generation is not None generations.append(self._chunk_to_generation(generation)) else: params: Dict[str, Any] = self._invocation_params(messages=messages, stop=stop, **kwargs) resp = self.completion_with_retry(**params) generations.append(ChatGeneration(**self. _chat_generation_from_qwen_resp(resp))) return ChatResult(generations=generations, llm_output={'model_name': self.model_name})
null
_import_clarifai
from langchain_community.llms.clarifai import Clarifai return Clarifai
def _import_clarifai() ->Any: from langchain_community.llms.clarifai import Clarifai return Clarifai
null
unique_union
"""Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents """ return _unique_documents(documents)
def unique_union(self, documents: List[Document]) ->List[Document]: """Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents """ return _unique_documents(documents)
Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents
_select_relevance_score_fn
return self._max_inner_product_relevance_score_fn
def _select_relevance_score_fn(self) ->Callable[[float], float]: return self._max_inner_product_relevance_score_fn
null
map_ai_messages_in_session
"""Convert messages from the specified 'sender' to AI messages. This is useful for fine-tuning the AI to adapt to your voice. """ messages = [] num_converted = 0 for message in chat_sessions['messages']: if message.additional_kwargs.get('sender') == sender: message = AIMessage(content=message.content, additional_kwargs= message.additional_kwargs.copy(), example=getattr(message, 'example', None)) num_converted += 1 messages.append(message) return ChatSession(messages=messages)
def map_ai_messages_in_session(chat_sessions: ChatSession, sender: str ) ->ChatSession: """Convert messages from the specified 'sender' to AI messages. This is useful for fine-tuning the AI to adapt to your voice. """ messages = [] num_converted = 0 for message in chat_sessions['messages']: if message.additional_kwargs.get('sender') == sender: message = AIMessage(content=message.content, additional_kwargs= message.additional_kwargs.copy(), example=getattr(message, 'example', None)) num_converted += 1 messages.append(message) return ChatSession(messages=messages)
Convert messages from the specified 'sender' to AI messages. This is useful for fine-tuning the AI to adapt to your voice.
from_llm_and_tools
"""Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt(tools, prefix=prefix, suffix=suffix, human_message_template=human_message_template, format_instructions= format_instructions, input_variables=input_variables, memory_prompts= memory_prompts) llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser(llm=llm) return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser= _output_parser, **kwargs)
@classmethod def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[ BaseTool], callback_manager: Optional[BaseCallbackManager]=None, output_parser: Optional[AgentOutputParser]=None, prefix: str=PREFIX, suffix: str=SUFFIX, human_message_template: str=HUMAN_MESSAGE_TEMPLATE, format_instructions: str=FORMAT_INSTRUCTIONS, input_variables: Optional [List[str]]=None, memory_prompts: Optional[List[BasePromptTemplate]]= None, **kwargs: Any) ->Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt(tools, prefix=prefix, suffix=suffix, human_message_template=human_message_template, format_instructions= format_instructions, input_variables=input_variables, memory_prompts=memory_prompts) llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser(llm=llm) return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser =_output_parser, **kwargs)
Construct an agent from an LLM and tools.
test_edenai_embedding_documents
"""Test edenai embeddings with openai.""" documents = ['foo bar', 'test text'] embedding = EdenAiEmbeddings(provider='openai') output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1536 assert len(output[1]) == 1536
def test_edenai_embedding_documents() ->None: """Test edenai embeddings with openai.""" documents = ['foo bar', 'test text'] embedding = EdenAiEmbeddings(provider='openai') output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1536 assert len(output[1]) == 1536
Test edenai embeddings with openai.
lookup
"""Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None)
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]: """Look up based on prompt and llm_string.""" return self._cache.get((prompt, llm_string), None)
Look up based on prompt and llm_string.
dict
"""Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict['_type'] = self._llm_type return starter_dict
def dict(self, **kwargs: Any) ->Dict: """Return a dictionary of the LLM.""" starter_dict = dict(self._identifying_params) starter_dict['_type'] = self._llm_type return starter_dict
Return a dictionary of the LLM.
on_text_common
self.text += 1
def on_text_common(self) ->None: self.text += 1
null
_default_params
"""Get the default parameters for calling Anthropic API.""" d = {'max_tokens_to_sample': self.max_tokens_to_sample, 'model': self.model} if self.temperature is not None: d['temperature'] = self.temperature if self.top_k is not None: d['top_k'] = self.top_k if self.top_p is not None: d['top_p'] = self.top_p return {**d, **self.model_kwargs}
@property def _default_params(self) ->Mapping[str, Any]: """Get the default parameters for calling Anthropic API.""" d = {'max_tokens_to_sample': self.max_tokens_to_sample, 'model': self.model } if self.temperature is not None: d['temperature'] = self.temperature if self.top_k is not None: d['top_k'] = self.top_k if self.top_p is not None: d['top_p'] = self.top_p return {**d, **self.model_kwargs}
Get the default parameters for calling Anthropic API.
test_agent_stream
"""Test react chain with callbacks by setting verbose globally.""" tool = 'Search' responses = [f"""FooBarBaz Action: {tool} Action Input: misalignment""", f"""FooBarBaz Action: {tool} Action Input: something else""", """Oh well Final Answer: curses foiled again"""] fake_llm = FakeListLLM(responses=responses) tools = [Tool(name='Search', func=lambda x: f'Results for: {x}', description='Useful for searching')] agent = initialize_agent(tools, fake_llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION) output = [a for a in agent.stream('when was langchain made')] assert output == [{'actions': [AgentAction(tool='Search', tool_input= 'misalignment', log= """FooBarBaz Action: Search Action Input: misalignment""")], 'messages': [AIMessage(content= """FooBarBaz Action: Search Action Input: misalignment""")]}, {'steps': [AgentStep(action=AgentAction(tool='Search', tool_input='misalignment', log="""FooBarBaz Action: Search Action Input: misalignment"""), observation='Results for: misalignment')], 'messages': [HumanMessage( content='Results for: misalignment')]}, {'actions': [AgentAction(tool= 'Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else""")], 'messages': [AIMessage(content= """FooBarBaz Action: Search Action Input: something else""")]}, { 'steps': [AgentStep(action=AgentAction(tool='Search', tool_input= 'something else', log= """FooBarBaz Action: Search Action Input: something else"""), observation='Results for: something else')], 'messages': [HumanMessage( content='Results for: something else')]}, {'output': 'curses foiled again', 'messages': [AIMessage(content= """Oh well Final Answer: curses foiled again""")]}] assert add(output) == {'actions': [AgentAction(tool='Search', tool_input= 'misalignment', log= """FooBarBaz Action: Search Action Input: misalignment"""), AgentAction (tool='Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else""")], 'steps': [AgentStep(action=AgentAction(tool='Search', tool_input='misalignment', log="""FooBarBaz Action: Search Action Input: misalignment"""), observation='Results for: misalignment'), AgentStep(action=AgentAction( tool='Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else"""), observation='Results for: something else')], 'messages': [AIMessage( content="""FooBarBaz Action: Search Action Input: misalignment"""), HumanMessage(content='Results for: misalignment'), AIMessage(content= """FooBarBaz Action: Search Action Input: something else"""), HumanMessage(content='Results for: something else'), AIMessage(content= """Oh well Final Answer: curses foiled again""")], 'output': 'curses foiled again'}
def test_agent_stream() ->None: """Test react chain with callbacks by setting verbose globally.""" tool = 'Search' responses = [f'FooBarBaz\nAction: {tool}\nAction Input: misalignment', f"""FooBarBaz Action: {tool} Action Input: something else""", """Oh well Final Answer: curses foiled again"""] fake_llm = FakeListLLM(responses=responses) tools = [Tool(name='Search', func=lambda x: f'Results for: {x}', description='Useful for searching')] agent = initialize_agent(tools, fake_llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION) output = [a for a in agent.stream('when was langchain made')] assert output == [{'actions': [AgentAction(tool='Search', tool_input= 'misalignment', log= """FooBarBaz Action: Search Action Input: misalignment""")], 'messages': [AIMessage(content= """FooBarBaz Action: Search Action Input: misalignment""")]}, { 'steps': [AgentStep(action=AgentAction(tool='Search', tool_input= 'misalignment', log= """FooBarBaz Action: Search Action Input: misalignment"""), observation='Results for: misalignment')], 'messages': [ HumanMessage(content='Results for: misalignment')]}, {'actions': [ AgentAction(tool='Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else""")], 'messages': [AIMessage(content= """FooBarBaz Action: Search Action Input: something else""")]}, { 'steps': [AgentStep(action=AgentAction(tool='Search', tool_input= 'something else', log= """FooBarBaz Action: Search Action Input: something else"""), observation='Results for: something else')], 'messages': [ HumanMessage(content='Results for: something else')]}, {'output': 'curses foiled again', 'messages': [AIMessage(content= """Oh well Final Answer: curses foiled again""")]}] assert add(output) == {'actions': [AgentAction(tool='Search', tool_input='misalignment', log= """FooBarBaz Action: Search Action Input: misalignment"""), AgentAction(tool='Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else""")], 'steps': [AgentStep(action=AgentAction(tool='Search', tool_input= 'misalignment', log= """FooBarBaz Action: Search Action Input: misalignment"""), observation='Results for: misalignment'), AgentStep(action= AgentAction(tool='Search', tool_input='something else', log= """FooBarBaz Action: Search Action Input: something else"""), observation='Results for: something else')], 'messages': [AIMessage (content="""FooBarBaz Action: Search Action Input: misalignment"""), HumanMessage(content='Results for: misalignment'), AIMessage( content="""FooBarBaz Action: Search Action Input: something else""" ), HumanMessage(content='Results for: something else'), AIMessage( content="""Oh well Final Answer: curses foiled again""")], 'output': 'curses foiled again'}
Test react chain with callbacks by setting verbose globally.
_client
"""Returns GigaChat API client""" import gigachat return gigachat.GigaChat(base_url=self.base_url, auth_url=self.auth_url, credentials=self.credentials, scope=self.scope, access_token=self. access_token, model=self.model, user=self.user, password=self.password, timeout=self.timeout, verify_ssl_certs=self.verify_ssl_certs, ca_bundle_file=self.ca_bundle_file, cert_file=self.cert_file, key_file= self.key_file, key_file_password=self.key_file_password)
@cached_property def _client(self) ->Any: """Returns GigaChat API client""" import gigachat return gigachat.GigaChat(base_url=self.base_url, auth_url=self.auth_url, credentials=self.credentials, scope=self.scope, access_token=self. access_token, model=self.model, user=self.user, password=self. password, timeout=self.timeout, verify_ssl_certs=self. verify_ssl_certs, ca_bundle_file=self.ca_bundle_file, cert_file= self.cert_file, key_file=self.key_file, key_file_password=self. key_file_password)
Returns GigaChat API client
get_temp_file
"""Yield a temporary field with some content.""" with NamedTemporaryFile(suffix=suffix, delete=False) as temp_file: temp_file.write(content) path = Path(temp_file.name) try: yield path finally: os.remove(str(path))
@contextmanager def get_temp_file(content: bytes, suffix: Optional[str]=None) ->Generator[ Path, None, None]: """Yield a temporary field with some content.""" with NamedTemporaryFile(suffix=suffix, delete=False) as temp_file: temp_file.write(content) path = Path(temp_file.name) try: yield path finally: os.remove(str(path))
Yield a temporary field with some content.
test_init_direct_access_index
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) assert vectorsearch.index == index
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_init_direct_access_index() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) assert vectorsearch.index == index
null
test_add_text
"""Test adding additional text elements to existing index.""" text_input = ['test', 'add', 'text', 'method'] metadatas = [{'page': i} for i in range(len(text_input))] docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL) docids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas) assert len(docids) == len(text_input)
def test_add_text() ->None: """Test adding additional text elements to existing index.""" text_input = ['test', 'add', 'text', 'method'] metadatas = [{'page': i} for i in range(len(text_input))] docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL) docids = OpenSearchVectorSearch.add_texts(docsearch, text_input, metadatas) assert len(docids) == len(text_input)
Test adding additional text elements to existing index.
test_no_imports_disallowed_code_validation
"""Test the validator.""" with pytest.raises(ValueError): PALChain.validate_code(_SAMPLE_CODE_4, _NO_IMPORTS_VALIDATIONS)
def test_no_imports_disallowed_code_validation() ->None: """Test the validator.""" with pytest.raises(ValueError): PALChain.validate_code(_SAMPLE_CODE_4, _NO_IMPORTS_VALIDATIONS)
Test the validator.
_identifying_params
"""Get the identifying parameters.""" return {**{'model': self.model}, **self._default_params}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {**{'model': self.model}, **self._default_params}
Get the identifying parameters.
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
from_string
"""Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template)
@classmethod def from_string(cls, llm: BaseLanguageModel, template: str) ->LLMChain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template)
Create LLMChain from LLM and template.
_detect_value_type
if isinstance(tokenId, int): return 'int' elif tokenId.startswith('0x'): return 'hex_0x' elif tokenId.startswith('0xbf'): return 'hex_0xbf' else: return 'hex_0xbf'
@staticmethod def _detect_value_type(tokenId: str) ->str: if isinstance(tokenId, int): return 'int' elif tokenId.startswith('0x'): return 'hex_0x' elif tokenId.startswith('0xbf'): return 'hex_0xbf' else: return 'hex_0xbf'
null
add_message
self.messages.append(message) self.upsert_messages()
def add_message(self, message: BaseMessage) ->None: self.messages.append(message) self.upsert_messages()
null
get_summaries_as_docs
""" Performs an arxiv search and returns list of documents, with summaries as the content. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ try: if self.is_arxiv_identifier(query): results = self.arxiv_search(id_list=query.split(), max_results=self .top_k_results).results() else: results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results).results() except self.arxiv_exceptions as ex: return [Document(page_content=f'Arxiv exception: {ex}')] docs = [Document(page_content=result.summary, metadata={'Entry ID': result. entry_id, 'Published': result.updated.date(), 'Title': result.title, 'Authors': ', '.join(a.name for a in result.authors)}) for result in results] return docs
def get_summaries_as_docs(self, query: str) ->List[Document]: """ Performs an arxiv search and returns list of documents, with summaries as the content. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query """ try: if self.is_arxiv_identifier(query): results = self.arxiv_search(id_list=query.split(), max_results= self.top_k_results).results() else: results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results).results() except self.arxiv_exceptions as ex: return [Document(page_content=f'Arxiv exception: {ex}')] docs = [Document(page_content=result.summary, metadata={'Entry ID': result.entry_id, 'Published': result.updated.date(), 'Title': result.title, 'Authors': ', '.join(a.name for a in result.authors)} ) for result in results] return docs
Performs an arxiv search and returns list of documents, with summaries as the content. If an error occurs or no documents found, error text is returned instead. Wrapper for https://lukasschwab.me/arxiv.py/index.html#Search Args: query: a plaintext search query
clear
"""Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
def clear(self, **kwargs: Any) ->None: """Clear the cache. Raises: SdkException: Momento service or network error """ from momento.responses import CacheFlush flush_response = self.cache_client.flush_cache(self.cache_name) if isinstance(flush_response, CacheFlush.Success): pass elif isinstance(flush_response, CacheFlush.Error): raise flush_response.inner_exception
Clear the cache. Raises: SdkException: Momento service or network error
parse
match = re.search('```(xml)?(.*)```', text, re.DOTALL) if match is not None: text = match.group(2) encoding_match = self.encoding_matcher.search(text) if encoding_match: text = encoding_match.group(2) text = text.strip() if (text.startswith('<') or text.startswith('\n<')) and (text.endswith('>') or text.endswith('>\n')): root = ET.fromstring(text) return self._root_to_dict(root) else: raise ValueError(f'Could not parse output: {text}')
def parse(self, text: str) ->Dict[str, List[Any]]: match = re.search('```(xml)?(.*)```', text, re.DOTALL) if match is not None: text = match.group(2) encoding_match = self.encoding_matcher.search(text) if encoding_match: text = encoding_match.group(2) text = text.strip() if (text.startswith('<') or text.startswith('\n<')) and (text.endswith( '>') or text.endswith('>\n')): root = ET.fromstring(text) return self._root_to_dict(root) else: raise ValueError(f'Could not parse output: {text}')
null
test_gradient_wrong_setup
with pytest.raises(Exception): GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token='', gradient_workspace_id= _GRADIENT_WORKSPACE_ID, model=_MODEL_ID)
def test_gradient_wrong_setup() ->None: with pytest.raises(Exception): GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL, gradient_access_token='', gradient_workspace_id= _GRADIENT_WORKSPACE_ID, model=_MODEL_ID)
null
_evaluate_strings
"""Evaluate a prediction against the criteria. Parameters ---------- prediction : str The predicted text to evaluate. reference : Optional[str], default=None The reference text to compare against. This is required if `requires_reference` is `True`. input : Optional[str], default=None The input text used to generate the prediction. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` `__call__` method. Returns ------- dict The evaluation results. Examples -------- >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", input="What is the answer to life, the universe, and everything?", ) """ input_ = self._get_eval_input(prediction, reference, input) result = self(input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info) return self._prepare_output(result)
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]= None, input: Optional[str]=None, callbacks: Callbacks=None, tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None, include_run_info: bool=False, **kwargs: Any) ->dict: """Evaluate a prediction against the criteria. Parameters ---------- prediction : str The predicted text to evaluate. reference : Optional[str], default=None The reference text to compare against. This is required if `requires_reference` is `True`. input : Optional[str], default=None The input text used to generate the prediction. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` `__call__` method. Returns ------- dict The evaluation results. Examples -------- >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", input="What is the answer to life, the universe, and everything?", ) """ input_ = self._get_eval_input(prediction, reference, input) result = self(input_, callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=include_run_info) return self._prepare_output(result)
Evaluate a prediction against the criteria. Parameters ---------- prediction : str The predicted text to evaluate. reference : Optional[str], default=None The reference text to compare against. This is required if `requires_reference` is `True`. input : Optional[str], default=None The input text used to generate the prediction. **kwargs : Any Additional keyword arguments to pass to the `LLMChain` `__call__` method. Returns ------- dict The evaluation results. Examples -------- >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" >>> chain = CriteriaEvalChain.from_llm(llm=llm, criteria=criteria) >>> chain.evaluate_strings( prediction="The answer is 42.", reference="42", input="What is the answer to life, the universe, and everything?", )
transform_documents
""" Transform a list of Document objects by cleaning their HTML content. Args: documents: A sequence of Document objects containing HTML content. unwanted_tags: A list of tags to be removed from the HTML. tags_to_extract: A list of tags whose content will be extracted. remove_lines: If set to True, unnecessary lines will be removed from the HTML content. Returns: A sequence of Document objects with transformed content. """ for doc in documents: cleaned_content = doc.page_content cleaned_content = self.remove_unwanted_tags(cleaned_content, unwanted_tags) cleaned_content = self.extract_tags(cleaned_content, tags_to_extract) if remove_lines: cleaned_content = self.remove_unnecessary_lines(cleaned_content) doc.page_content = cleaned_content return documents
def transform_documents(self, documents: Sequence[Document], unwanted_tags: List[str]=['script', 'style'], tags_to_extract: List[str]=['p', 'li', 'div', 'a'], remove_lines: bool=True, **kwargs: Any) ->Sequence[Document]: """ Transform a list of Document objects by cleaning their HTML content. Args: documents: A sequence of Document objects containing HTML content. unwanted_tags: A list of tags to be removed from the HTML. tags_to_extract: A list of tags whose content will be extracted. remove_lines: If set to True, unnecessary lines will be removed from the HTML content. Returns: A sequence of Document objects with transformed content. """ for doc in documents: cleaned_content = doc.page_content cleaned_content = self.remove_unwanted_tags(cleaned_content, unwanted_tags) cleaned_content = self.extract_tags(cleaned_content, tags_to_extract) if remove_lines: cleaned_content = self.remove_unnecessary_lines(cleaned_content) doc.page_content = cleaned_content return documents
Transform a list of Document objects by cleaning their HTML content. Args: documents: A sequence of Document objects containing HTML content. unwanted_tags: A list of tags to be removed from the HTML. tags_to_extract: A list of tags whose content will be extracted. remove_lines: If set to True, unnecessary lines will be removed from the HTML content. Returns: A sequence of Document objects with transformed content.
_import_xinference
from langchain_community.llms.xinference import Xinference return Xinference
def _import_xinference() ->Any: from langchain_community.llms.xinference import Xinference return Xinference
null
update
"""Update cache based on prompt and llm_string.""" embedding_vector = self._get_embedding(text=prompt) llm_string_hash = _hash(llm_string) body = _dumps_generations(return_val) metadata = {'_prompt': prompt, '_llm_string_hash': llm_string_hash} row_id = f'{_hash(prompt)}-{llm_string_hash}' self.table.put(body_blob=body, vector=embedding_vector, row_id=row_id, metadata=metadata)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE ) ->None: """Update cache based on prompt and llm_string.""" embedding_vector = self._get_embedding(text=prompt) llm_string_hash = _hash(llm_string) body = _dumps_generations(return_val) metadata = {'_prompt': prompt, '_llm_string_hash': llm_string_hash} row_id = f'{_hash(prompt)}-{llm_string_hash}' self.table.put(body_blob=body, vector=embedding_vector, row_id=row_id, metadata=metadata)
Update cache based on prompt and llm_string.
_import_edenai_EdenaiTool
from langchain_community.tools.edenai import EdenaiTool return EdenaiTool
def _import_edenai_EdenaiTool() ->Any: from langchain_community.tools.edenai import EdenaiTool return EdenaiTool
null
visit_structured_query
if structured_query.filter is None: kwargs = {} else: kwargs = {'pre_filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[ str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {'pre_filter': structured_query.filter.accept(self)} return structured_query.query, kwargs
null
evaluation_name
""" Get the evaluation name. Returns: str: The evaluation name. """ return f'{self.distance.value}_distance'
@property def evaluation_name(self) ->str: """ Get the evaluation name. Returns: str: The evaluation name. """ return f'{self.distance.value}_distance'
Get the evaluation name. Returns: str: The evaluation name.
get_tools
"""Get the tools in the toolkit.""" description = VectorStoreQATool.get_description(self.vectorstore_info.name, self.vectorstore_info.description) qa_tool = VectorStoreQATool(name=self.vectorstore_info.name, description= description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm) description = VectorStoreQAWithSourcesTool.get_description(self. vectorstore_info.name, self.vectorstore_info.description) qa_with_sources_tool = VectorStoreQAWithSourcesTool(name= f'{self.vectorstore_info.name}_with_sources', description=description, vectorstore=self.vectorstore_info.vectorstore, llm=self.llm) return [qa_tool, qa_with_sources_tool]
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" description = VectorStoreQATool.get_description(self.vectorstore_info. name, self.vectorstore_info.description) qa_tool = VectorStoreQATool(name=self.vectorstore_info.name, description=description, vectorstore=self.vectorstore_info. vectorstore, llm=self.llm) description = VectorStoreQAWithSourcesTool.get_description(self. vectorstore_info.name, self.vectorstore_info.description) qa_with_sources_tool = VectorStoreQAWithSourcesTool(name= f'{self.vectorstore_info.name}_with_sources', description= description, vectorstore=self.vectorstore_info.vectorstore, llm= self.llm) return [qa_tool, qa_with_sources_tool]
Get the tools in the toolkit.
test_get_final_answer_multiline
"""Test getting final answer that is multiline.""" llm_output = """Thought: I can now answer the question Final Answer: 1994 1993""" action, action_input = get_action_and_input(llm_output) assert action == 'Final Answer' assert action_input == '1994\n1993'
def test_get_final_answer_multiline() ->None: """Test getting final answer that is multiline.""" llm_output = ( 'Thought: I can now answer the question\nFinal Answer: 1994\n1993') action, action_input = get_action_and_input(llm_output) assert action == 'Final Answer' assert action_input == '1994\n1993'
Test getting final answer that is multiline.
test_transform_empty_html
html2text_transformer = Html2TextTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == '\n\n'
@pytest.mark.requires('html2text') def test_transform_empty_html() ->None: html2text_transformer = Html2TextTransformer() empty_html = '<html></html>' documents = [Document(page_content=empty_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == '\n\n'
null
test_load_more_docs_success
top_k_results = 20 api_client = WikipediaAPIWrapper(top_k_results=top_k_results) docs = api_client.load('HUNTER X HUNTER') assert len(docs) > 10 assert len(docs) <= top_k_results assert_docs(docs, all_meta=False)
def test_load_more_docs_success(api_client: WikipediaAPIWrapper) ->None: top_k_results = 20 api_client = WikipediaAPIWrapper(top_k_results=top_k_results) docs = api_client.load('HUNTER X HUNTER') assert len(docs) > 10 assert len(docs) <= top_k_results assert_docs(docs, all_meta=False)
null
_Subscript
self.dispatch(t.value) self.write('[') if isinstance(t.slice, ast.Index) and isinstance(t.slice.value, ast.Tuple ) and t.slice.value.elts: if len(t.slice.value.elts) == 1: elt = t.slice.value.elts[0] self.dispatch(elt) self.write(',') else: interleave(lambda : self.write(', '), self.dispatch, t.slice.value.elts ) else: self.dispatch(t.slice) self.write(']')
def _Subscript(self, t): self.dispatch(t.value) self.write('[') if isinstance(t.slice, ast.Index) and isinstance(t.slice.value, ast.Tuple ) and t.slice.value.elts: if len(t.slice.value.elts) == 1: elt = t.slice.value.elts[0] self.dispatch(elt) self.write(',') else: interleave(lambda : self.write(', '), self.dispatch, t.slice. value.elts) else: self.dispatch(t.slice) self.write(']')
null
test_requests_patch_tool
tool = RequestsPatchTool(requests_wrapper=mock_requests_wrapper) input_text = '{"url": "https://example.com", "data": {"key": "value"}}' assert tool.run(input_text) == "patch {'key': 'value'}" assert asyncio.run(tool.arun(input_text)) == "apatch {'key': 'value'}"
def test_requests_patch_tool(mock_requests_wrapper: TextRequestsWrapper ) ->None: tool = RequestsPatchTool(requests_wrapper=mock_requests_wrapper) input_text = '{"url": "https://example.com", "data": {"key": "value"}}' assert tool.run(input_text) == "patch {'key': 'value'}" assert asyncio.run(tool.arun(input_text)) == "apatch {'key': 'value'}"
null
on_llm_error_common
self.errors += 1
def on_llm_error_common(self) ->None: self.errors += 1
null
mset
"""Set the given key-value pairs.""" pipe = self.client.pipeline() for key, value in key_value_pairs: pipe.set(self._get_prefixed_key(key), value, ex=self.ttl) pipe.execute()
def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) ->None: """Set the given key-value pairs.""" pipe = self.client.pipeline() for key, value in key_value_pairs: pipe.set(self._get_prefixed_key(key), value, ex=self.ttl) pipe.execute()
Set the given key-value pairs.
test_create_internal_handler
"""If we're using a Streamlit that does not expose its own StreamlitCallbackHandler, use our own implementation. """ def external_import_error(name: str, globals: Any, locals: Any, fromlist: Any, level: int) ->Any: if name == 'streamlit.external.langchain': raise ImportError return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_error parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler) mock_internal_handler.assert_called_once_with(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler)
@mock.patch( 'langchain_community.callbacks.streamlit._InternalStreamlitCallbackHandler' ) def test_create_internal_handler(self, mock_internal_handler: Any) ->None: """If we're using a Streamlit that does not expose its own StreamlitCallbackHandler, use our own implementation. """ def external_import_error(name: str, globals: Any, locals: Any, fromlist: Any, level: int) ->Any: if name == 'streamlit.external.langchain': raise ImportError return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_error parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler) mock_internal_handler.assert_called_once_with(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler)
If we're using a Streamlit that does not expose its own StreamlitCallbackHandler, use our own implementation.
test_similarity_search_without_metadata
"""Test end to end constructions and search without metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = XataVectorStore.from_texts(api_key=os.getenv('XATA_API_KEY'), db_url=os.getenv('XATA_DB_URL'), texts=texts, embedding=embedding_openai) docsearch.wait_for_indexing(ndocs=3) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch.delete(delete_all=True)
def test_similarity_search_without_metadata(self, embedding_openai: OpenAIEmbeddings) ->None: """Test end to end constructions and search without metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = XataVectorStore.from_texts(api_key=os.getenv('XATA_API_KEY' ), db_url=os.getenv('XATA_DB_URL'), texts=texts, embedding= embedding_openai) docsearch.wait_for_indexing(ndocs=3) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')] docsearch.delete(delete_all=True)
Test end to end constructions and search without metadata.
__aiter__
return self
def __aiter__(self) ->AsyncIterator[Any]: return self
null
from_collection_name
"""Create new empty vectorstore with collection_name. Or connect to an existing vectorstore in database if exists. Arguments should be the same as when the vectorstore was created.""" sample_embedding = embedding.embed_query('Hello pgvecto_rs!') return cls(embedding=embedding, dimension=len(sample_embedding), db_url= db_url, collection_name=collection_name)
@classmethod def from_collection_name(cls, embedding: Embeddings, db_url: str, collection_name: str) ->PGVecto_rs: """Create new empty vectorstore with collection_name. Or connect to an existing vectorstore in database if exists. Arguments should be the same as when the vectorstore was created.""" sample_embedding = embedding.embed_query('Hello pgvecto_rs!') return cls(embedding=embedding, dimension=len(sample_embedding), db_url =db_url, collection_name=collection_name)
Create new empty vectorstore with collection_name. Or connect to an existing vectorstore in database if exists. Arguments should be the same as when the vectorstore was created.
_get_text
"""Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f'Expected page_content is string, got {type(content)} instead. Set `text_content=False` if the desired input for `page_content` is not a string' ) elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else '' else: return str(content) if content is not None else ''
def _get_text(self, sample: Any) ->str: """Convert sample to string format""" if self._content_key is not None: content = sample.get(self._content_key) else: content = sample if self._text_content and not isinstance(content, str): raise ValueError( f'Expected page_content is string, got {type(content)} instead. Set `text_content=False` if the desired input for `page_content` is not a string' ) elif isinstance(content, str): return content elif isinstance(content, dict): return json.dumps(content) if content else '' else: return str(content) if content is not None else ''
Convert sample to string format
on_tool_error
"""Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event(self.handlers, 'on_tool_error', 'ignore_agent', error, run_id= self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error. """ handle_event(self.handlers, 'on_tool_error', 'ignore_agent', error, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self. tags, **kwargs)
Run when tool errors. Args: error (Exception or KeyboardInterrupt): The error.
test_redis_cache
from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.llm_cache.update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['foo']) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) assert output == expected_output lookup_output = langchain.llm_cache.lookup('foo', llm_string) if lookup_output and len(lookup_output) > 0: assert lookup_output == expected_output.generations[0] langchain.llm_cache.clear() output = llm.generate(['foo']) assert output != expected_output langchain.llm_cache.redis.flushall()
@pytest.mark.requires('upstash_redis') def test_redis_cache() ->None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token= TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.llm_cache.update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['foo']) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) assert output == expected_output lookup_output = langchain.llm_cache.lookup('foo', llm_string) if lookup_output and len(lookup_output) > 0: assert lookup_output == expected_output.generations[0] langchain.llm_cache.clear() output = llm.generate(['foo']) assert output != expected_output langchain.llm_cache.redis.flushall()
null
load
"""Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.file.name}' self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
def load(self) ->List[Document]: """Load Documents""" with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.file.name}' self.file.download(to_path=temp_dir, chunk_size=CHUNK_SIZE) loader = UnstructuredFileLoader(file_path) return loader.load()
Load Documents
on_agent_action_common
self.agent_actions += 1 self.starts += 1
def on_agent_action_common(self) ->None: self.agent_actions += 1 self.starts += 1
null
_import_clickhouse
from langchain_community.vectorstores.clickhouse import Clickhouse return Clickhouse
def _import_clickhouse() ->Any: from langchain_community.vectorstores.clickhouse import Clickhouse return Clickhouse
null
custom_postprocess
if 'content' in msg: return msg['content'] logger.warning( f'Got ambiguous message in postprocessing; returning as-is: msg = {msg}') return str(msg)
def custom_postprocess(self, msg: dict) ->str: if 'content' in msg: return msg['content'] logger.warning( f'Got ambiguous message in postprocessing; returning as-is: msg = {msg}' ) return str(msg)
null
_insert
_i_str = self._build_istr(transac, column_names) self.client.command(_i_str)
def _insert(self, transac: Iterable, column_names: Iterable[str]) ->None: _i_str = self._build_istr(transac, column_names) self.client.command(_i_str)
null
test_vertexai_single_call_with_examples
model = ChatVertexAI() raw_context = 'My name is Ned. You are my personal assistant.' question = '2+2' text_question, text_answer = '4+4', '8' inp = HumanMessage(content=text_question) output = AIMessage(content=text_answer) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message], examples=[inp, output]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
@pytest.mark.scheduled def test_vertexai_single_call_with_examples() ->None: model = ChatVertexAI() raw_context = 'My name is Ned. You are my personal assistant.' question = '2+2' text_question, text_answer = '4+4', '8' inp = HumanMessage(content=text_question) output = AIMessage(content=text_answer) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) response = model([context, message], examples=[inp, output]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
check_libcublas
if not is_libcublas_available(): pytest.skip(reason='libcublas.so is not available') yield
@pytest.fixture(scope='module', autouse=True) def check_libcublas() ->Iterator[None]: if not is_libcublas_available(): pytest.skip(reason='libcublas.so is not available') yield
null
on_llm_end
"""Collect token usage.""" if response.llm_output is None: return None if 'token_usage' not in response.llm_output: with self._lock: self.successful_requests += 1 return None token_usage = response.llm_output['token_usage'] completion_tokens = token_usage.get('completion_tokens', 0) prompt_tokens = token_usage.get('prompt_tokens', 0) model_name = standardize_model_name(response.llm_output.get('model_name', '')) if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model(model_name, completion_tokens, is_completion=True) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) else: completion_cost = 0 prompt_cost = 0 with self._lock: self.total_cost += prompt_cost + completion_cost self.total_tokens += token_usage.get('total_tokens', 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens self.successful_requests += 1
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None: """Collect token usage.""" if response.llm_output is None: return None if 'token_usage' not in response.llm_output: with self._lock: self.successful_requests += 1 return None token_usage = response.llm_output['token_usage'] completion_tokens = token_usage.get('completion_tokens', 0) prompt_tokens = token_usage.get('prompt_tokens', 0) model_name = standardize_model_name(response.llm_output.get( 'model_name', '')) if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model(model_name, completion_tokens, is_completion=True) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens ) else: completion_cost = 0 prompt_cost = 0 with self._lock: self.total_cost += prompt_cost + completion_cost self.total_tokens += token_usage.get('total_tokens', 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens self.successful_requests += 1
Collect token usage.
update_file
""" Updates a file with new content. Parameters: file_query(str): Contains the file path and the file contents. The old file contents is wrapped in OLD <<<< and >>>> OLD The new file contents is wrapped in NEW <<<< and >>>> NEW For example: test/hello.txt OLD <<<< Hello Earth! >>>> OLD NEW <<<< Hello Mars! >>>> NEW Returns: A success or failure message """ try: file_path = file_query.split('\n')[0] old_file_contents = file_query.split('OLD <<<<')[1].split('>>>> OLD')[0 ].strip() new_file_contents = file_query.split('NEW <<<<')[1].split('>>>> NEW')[0 ].strip() file_content = self.read_file(file_path) updated_file_content = file_content.replace(old_file_contents, new_file_contents) if file_content == updated_file_content: return ( 'File content was not updated because old content was not found.It may be helpful to use the read_file action to get the current file contents.' ) commit = {'branch': self.gitlab_branch, 'commit_message': 'Create ' + file_path, 'actions': [{'action': 'update', 'file_path': file_path, 'content': updated_file_content}]} self.gitlab_repo_instance.commits.create(commit) return 'Updated file ' + file_path except Exception as e: return 'Unable to update file due to error:\n' + str(e)
def update_file(self, file_query: str) ->str: """ Updates a file with new content. Parameters: file_query(str): Contains the file path and the file contents. The old file contents is wrapped in OLD <<<< and >>>> OLD The new file contents is wrapped in NEW <<<< and >>>> NEW For example: test/hello.txt OLD <<<< Hello Earth! >>>> OLD NEW <<<< Hello Mars! >>>> NEW Returns: A success or failure message """ try: file_path = file_query.split('\n')[0] old_file_contents = file_query.split('OLD <<<<')[1].split('>>>> OLD')[0 ].strip() new_file_contents = file_query.split('NEW <<<<')[1].split('>>>> NEW')[0 ].strip() file_content = self.read_file(file_path) updated_file_content = file_content.replace(old_file_contents, new_file_contents) if file_content == updated_file_content: return ( 'File content was not updated because old content was not found.It may be helpful to use the read_file action to get the current file contents.' ) commit = {'branch': self.gitlab_branch, 'commit_message': 'Create ' + file_path, 'actions': [{'action': 'update', 'file_path': file_path, 'content': updated_file_content}]} self.gitlab_repo_instance.commits.create(commit) return 'Updated file ' + file_path except Exception as e: return 'Unable to update file due to error:\n' + str(e)
Updates a file with new content. Parameters: file_query(str): Contains the file path and the file contents. The old file contents is wrapped in OLD <<<< and >>>> OLD The new file contents is wrapped in NEW <<<< and >>>> NEW For example: test/hello.txt OLD <<<< Hello Earth! >>>> OLD NEW <<<< Hello Mars! >>>> NEW Returns: A success or failure message