method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_get_elements
from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.partition.md import partition_md _unstructured_version = __unstructured_version__.split('-')[0] unstructured_version = tuple([int(x) for x in _unstructured_version.split('.')] ) if unstructured_version < (0, 4, 16): raise ValueError( f'You are on unstructured version {__unstructured_version__}. Partitioning markdown files is only supported in unstructured>=0.4.16.' ) return partition_md(filename=self.file_path, **self.unstructured_kwargs)
def _get_elements(self) ->List: from unstructured.__version__ import __version__ as __unstructured_version__ from unstructured.partition.md import partition_md _unstructured_version = __unstructured_version__.split('-')[0] unstructured_version = tuple([int(x) for x in _unstructured_version. split('.')]) if unstructured_version < (0, 4, 16): raise ValueError( f'You are on unstructured version {__unstructured_version__}. Partitioning markdown files is only supported in unstructured>=0.4.16.' ) return partition_md(filename=self.file_path, **self.unstructured_kwargs)
null
post
headers = {'Authorization': f'Bearer {self.api_key.get_secret_value()}'} response = requests.post(self.api_url, headers=headers, json=request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}') if response.json()['base_resp']['status_code'] > 0: raise ValueError( f"API {response.json()['base_resp']['status_code']} error: {response.json()['base_resp']['status_msg']}" ) return response.json()['reply']
def post(self, request: Any) ->Any: headers = {'Authorization': f'Bearer {self.api_key.get_secret_value()}'} response = requests.post(self.api_url, headers=headers, json=request) if not response.ok: raise ValueError(f'HTTP {response.status_code} error: {response.text}') if response.json()['base_resp']['status_code'] > 0: raise ValueError( f"API {response.json()['base_resp']['status_code']} error: {response.json()['base_resp']['status_msg']}" ) return response.json()['reply']
null
update_cache
"""Update the cache and get the LLM output.""" llm_cache = get_llm_cache() for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if llm_cache is not None: llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output
def update_cache(existing_prompts: Dict[int, List], llm_string: str, missing_prompt_idxs: List[int], new_results: LLMResult, prompts: List[str] ) ->Optional[dict]: """Update the cache and get the LLM output.""" llm_cache = get_llm_cache() for i, result in enumerate(new_results.generations): existing_prompts[missing_prompt_idxs[i]] = result prompt = prompts[missing_prompt_idxs[i]] if llm_cache is not None: llm_cache.update(prompt, llm_string, result) llm_output = new_results.llm_output return llm_output
Update the cache and get the LLM output.
_llm_type
"""Return type of llm.""" return 'pai_eas_endpoint'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'pai_eas_endpoint'
Return type of llm.
test_get_combined_score
document = Document(page_content='Test document', metadata={ 'last_accessed_at': datetime(2023, 4, 14, 12, 0)}) vector_salience = 0.7 expected_hours_passed = 2.5 current_time = datetime(2023, 4, 14, 14, 30) combined_score = time_weighted_retriever._get_combined_score(document, vector_salience, current_time) expected_score = (1.0 - time_weighted_retriever.decay_rate ) ** expected_hours_passed + vector_salience assert combined_score == pytest.approx(expected_score)
def test_get_combined_score(time_weighted_retriever: TimeWeightedVectorStoreRetriever) ->None: document = Document(page_content='Test document', metadata={ 'last_accessed_at': datetime(2023, 4, 14, 12, 0)}) vector_salience = 0.7 expected_hours_passed = 2.5 current_time = datetime(2023, 4, 14, 14, 30) combined_score = time_weighted_retriever._get_combined_score(document, vector_salience, current_time) expected_score = (1.0 - time_weighted_retriever.decay_rate ) ** expected_hours_passed + vector_salience assert combined_score == pytest.approx(expected_score)
null
stream
key = input['key'] actual_input = input['input'] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] yield from runnable.stream(actual_input, config)
def stream(self, input: RouterInput, config: Optional[RunnableConfig]=None, **kwargs: Optional[Any]) ->Iterator[Output]: key = input['key'] actual_input = input['input'] if key not in self.runnables: raise ValueError(f"No runnable associated with key '{key}'") runnable = self.runnables[key] yield from runnable.stream(actual_input, config)
null
_stop
return ['Observation:']
@property def _stop(self) ->List[str]: return ['Observation:']
null
_import_outline
from langchain_community.utilities.outline import OutlineAPIWrapper return OutlineAPIWrapper
def _import_outline() ->Any: from langchain_community.utilities.outline import OutlineAPIWrapper return OutlineAPIWrapper
null
test_tongyi_call
"""Test valid call to tongyi.""" llm = Tongyi() output = llm('who are you') assert isinstance(output, str)
def test_tongyi_call() ->None: """Test valid call to tongyi.""" llm = Tongyi() output = llm('who are you') assert isinstance(output, str)
Test valid call to tongyi.
test_typeerror
assert dumps({(1, 2): 3} ) == '{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}'
def test_typeerror() ->None: assert dumps({(1, 2): 3} ) == '{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}'
null
on_tool_error
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: pass
null
_file_types
"""Return supported file types."""
@property @abstractmethod def _file_types(self) ->Sequence[_FileType]: """Return supported file types."""
Return supported file types.
load
with open(self.persist_path, 'r') as fp: return json.load(fp)
def load(self) ->Any: with open(self.persist_path, 'r') as fp: return json.load(fp)
null
__init__
"""Create a new HugeGraph wrapper instance.""" try: from hugegraph.connection import PyHugeGraph except ImportError: raise ValueError( 'Please install HugeGraph Python client first: `pip3 install hugegraph-python`' ) self.username = username self.password = password self.address = address self.port = port self.graph = graph self.client = PyHugeGraph(address, port, user=username, pwd=password, graph =graph) self.schema = '' try: self.refresh_schema() except Exception as e: raise ValueError(f'Could not refresh schema. Error: {e}')
def __init__(self, username: str='default', password: str='default', address: str='127.0.0.1', port: int=8081, graph: str='hugegraph') ->None: """Create a new HugeGraph wrapper instance.""" try: from hugegraph.connection import PyHugeGraph except ImportError: raise ValueError( 'Please install HugeGraph Python client first: `pip3 install hugegraph-python`' ) self.username = username self.password = password self.address = address self.port = port self.graph = graph self.client = PyHugeGraph(address, port, user=username, pwd=password, graph=graph) self.schema = '' try: self.refresh_schema() except Exception as e: raise ValueError(f'Could not refresh schema. Error: {e}')
Create a new HugeGraph wrapper instance.
_import_mlflow_ai_gateway
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway
def _import_mlflow_ai_gateway() ->Any: from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway
null
_load_retrieval_qa_with_sources_chain
if 'retriever' in kwargs: retriever = kwargs.pop('retriever') else: raise ValueError('`retriever` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_documents_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) return RetrievalQAWithSourcesChain(combine_documents_chain= combine_documents_chain, retriever=retriever, **config)
def _load_retrieval_qa_with_sources_chain(config: dict, **kwargs: Any ) ->RetrievalQAWithSourcesChain: if 'retriever' in kwargs: retriever = kwargs.pop('retriever') else: raise ValueError('`retriever` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_documents_chain_config) elif 'combine_documents_chain_path' in config: combine_documents_chain = load_chain(config.pop( 'combine_documents_chain_path')) else: raise ValueError( 'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.' ) return RetrievalQAWithSourcesChain(combine_documents_chain= combine_documents_chain, retriever=retriever, **config)
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self.embedding_model.embed_query(query) doc_tuples = self._search_with_score_and_embeddings_by_vector(query_embedding, fetch_k, filter, brute_force, fraction_lists_to_search) doc_embeddings = [d[1] for d in doc_tuples] mmr_doc_indexes = maximal_marginal_relevance(np.array(query_embedding), doc_embeddings, lambda_mult=lambda_mult, k=k) return [doc_tuples[i][0] for i in mmr_doc_indexes]
def max_marginal_relevance_search(self, query: str, k: int=DEFAULT_TOP_K, fetch_k: int=DEFAULT_TOP_K * 5, lambda_mult: float=0.5, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any) ->List[ Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self.embedding_model.embed_query(query) doc_tuples = self._search_with_score_and_embeddings_by_vector( query_embedding, fetch_k, filter, brute_force, fraction_lists_to_search ) doc_embeddings = [d[1] for d in doc_tuples] mmr_doc_indexes = maximal_marginal_relevance(np.array(query_embedding), doc_embeddings, lambda_mult=lambda_mult, k=k) return [doc_tuples[i][0] for i in mmr_doc_indexes]
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents selected by maximal marginal relevance.
embeddings
return None
@property def embeddings(self) ->Optional[Embeddings]: return None
null
test_pdfminer_pdf_as_html_loader
"""Test PDFMinerPDFasHTMLLoader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
def test_pdfminer_pdf_as_html_loader() ->None: """Test PDFMinerPDFasHTMLLoader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__ ).parent.parent / 'examples/layout-parser-paper.pdf' loader = PDFMinerPDFasHTMLLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
Test PDFMinerPDFasHTMLLoader.
OutputType
return self.input_type or Any
@property def OutputType(self) ->Any: return self.input_type or Any
null
on_retriever_start_common
self.starts += 1 self.retriever_starts += 1
def on_retriever_start_common(self) ->None: self.starts += 1 self.retriever_starts += 1
null
load
"""Load web pages.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load web pages.""" return list(self.lazy_load())
Load web pages.
InputType
return self.input_type or Any
@property def InputType(self) ->Any: return self.input_type or Any
null
test_output_dict
runnable = RunnableLambda(lambda input: {'output': [AIMessage(content= 'you said: ' + '\n'.join([str(m.content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']]))]}) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history', output_messages_key='output') config: RunnableConfig = {'configurable': {'session_id': '6'}} output = with_history.invoke({'input': 'hello'}, config) assert output == {'output': [AIMessage(content='you said: hello')]} output = with_history.invoke({'input': 'good bye'}, config) assert output == {'output': [AIMessage(content="""you said: hello good bye""")] }
def test_output_dict() ->None: runnable = RunnableLambda(lambda input: {'output': [AIMessage(content= 'you said: ' + '\n'.join([str(m.content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']]))]}) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history', output_messages_key='output') config: RunnableConfig = {'configurable': {'session_id': '6'}} output = with_history.invoke({'input': 'hello'}, config) assert output == {'output': [AIMessage(content='you said: hello')]} output = with_history.invoke({'input': 'good bye'}, config) assert output == {'output': [AIMessage(content= 'you said: hello\ngood bye')]}
null
_validate_commands
"""Validate commands.""" commands = values.get('commands') if not isinstance(commands, list): values['commands'] = [commands] warnings.warn( 'The shell tool has no safeguards by default. Use at your own risk.') return values
@root_validator def _validate_commands(cls, values: dict) ->dict: """Validate commands.""" commands = values.get('commands') if not isinstance(commands, list): values['commands'] = [commands] warnings.warn( 'The shell tool has no safeguards by default. Use at your own risk.') return values
Validate commands.
_construct_json_body
return {'prompt': prompt, **params}
def _construct_json_body(self, prompt: str, params: dict) ->dict: return {'prompt': prompt, **params}
null
prepare_inputs_for_autoembed
""" go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status """ next_inputs = inputs.copy() for k, v in next_inputs.items(): if isinstance(v, _ToSelectFrom) or isinstance(v, _BasedOn): if not isinstance(v.value, _Embed): next_inputs[k].value = EmbedAndKeep(v.value) return next_inputs
def prepare_inputs_for_autoembed(inputs: Dict[str, Any]) ->Dict[str, Any]: """ go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status """ next_inputs = inputs.copy() for k, v in next_inputs.items(): if isinstance(v, _ToSelectFrom) or isinstance(v, _BasedOn): if not isinstance(v.value, _Embed): next_inputs[k].value = EmbedAndKeep(v.value) return next_inputs
go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status
load_evaluators
"""Load evaluators specified by a list of evaluator types. Parameters ---------- evaluators : Sequence[EvaluatorType] The list of evaluator types to load. llm : BaseLanguageModel, optional The language model to use for evaluation, if none is provided, a default ChatOpenAI gpt-4 model will be used. config : dict, optional A dictionary mapping evaluator types to additional keyword arguments, by default None **kwargs : Any Additional keyword arguments to pass to all evaluators. Returns ------- List[Chain] The loaded evaluators. Examples -------- >>> from langchain.evaluation import load_evaluators, EvaluatorType >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA] >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness") """ loaded = [] for evaluator in evaluators: _kwargs = config.get(evaluator, {}) if config else {} loaded.append(load_evaluator(evaluator, llm=llm, **{**kwargs, **_kwargs})) return loaded
def load_evaluators(evaluators: Sequence[EvaluatorType], *, llm: Optional[ BaseLanguageModel]=None, config: Optional[dict]=None, **kwargs: Any ) ->List[Union[Chain, StringEvaluator]]: """Load evaluators specified by a list of evaluator types. Parameters ---------- evaluators : Sequence[EvaluatorType] The list of evaluator types to load. llm : BaseLanguageModel, optional The language model to use for evaluation, if none is provided, a default ChatOpenAI gpt-4 model will be used. config : dict, optional A dictionary mapping evaluator types to additional keyword arguments, by default None **kwargs : Any Additional keyword arguments to pass to all evaluators. Returns ------- List[Chain] The loaded evaluators. Examples -------- >>> from langchain.evaluation import load_evaluators, EvaluatorType >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA] >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness") """ loaded = [] for evaluator in evaluators: _kwargs = config.get(evaluator, {}) if config else {} loaded.append(load_evaluator(evaluator, llm=llm, **{**kwargs, ** _kwargs})) return loaded
Load evaluators specified by a list of evaluator types. Parameters ---------- evaluators : Sequence[EvaluatorType] The list of evaluator types to load. llm : BaseLanguageModel, optional The language model to use for evaluation, if none is provided, a default ChatOpenAI gpt-4 model will be used. config : dict, optional A dictionary mapping evaluator types to additional keyword arguments, by default None **kwargs : Any Additional keyword arguments to pass to all evaluators. Returns ------- List[Chain] The loaded evaluators. Examples -------- >>> from langchain.evaluation import load_evaluators, EvaluatorType >>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA] >>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness")
_call
"""Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} params = {**params, **kwargs} response_post = requests.post(url=self.api_url, json={'prompt': prompt, 'params': params}, headers={'apiKey': f'{self.stochasticai_api_key.get_secret_value()}', 'Accept': 'application/json', 'Content-Type': 'application/json'}) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get(url=response_post_json['data'][ 'responseUrl'], headers={'apiKey': f'{self.stochasticai_api_key.get_secret_value()}', 'Accept': 'application/json', 'Content-Type': 'application/json'}) response_get.raise_for_status() response_get_json = response_get.json()['data'] text = response_get_json.get('completion') completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: text = enforce_stop_tokens(text, stop) return text
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} params = {**params, **kwargs} response_post = requests.post(url=self.api_url, json={'prompt': prompt, 'params': params}, headers={'apiKey': f'{self.stochasticai_api_key.get_secret_value()}', 'Accept': 'application/json', 'Content-Type': 'application/json'}) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get(url=response_post_json['data'][ 'responseUrl'], headers={'apiKey': f'{self.stochasticai_api_key.get_secret_value()}', 'Accept': 'application/json', 'Content-Type': 'application/json'}) response_get.raise_for_status() response_get_json = response_get.json()['data'] text = response_get_json.get('completion') completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: text = enforce_stop_tokens(text, stop) return text
Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.")
ideation_prompt
return ChatPromptTemplate.from_strings(self.get_prompt_strings('ideation'))
def ideation_prompt(self) ->ChatPromptTemplate: return ChatPromptTemplate.from_strings(self.get_prompt_strings('ideation'))
null
text_to_docs
"""Convert a string or list of strings to a list of Documents with metadata.""" from langchain.text_splitter import RecursiveCharacterTextSplitter if isinstance(text, str): text = [text] page_docs = [Document(page_content=page) for page in text] for i, doc in enumerate(page_docs): doc.metadata['page'] = i + 1 doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, separators=['\n\n', '\n', '.', '!', '?', ',', ' ', ''], chunk_overlap=20) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document(page_content=chunk, metadata={'page': doc.metadata[ 'page'], 'chunk': i}) doc.metadata['source' ] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks
def text_to_docs(text: Union[str, List[str]]) ->List[Document]: """Convert a string or list of strings to a list of Documents with metadata.""" from langchain.text_splitter import RecursiveCharacterTextSplitter if isinstance(text, str): text = [text] page_docs = [Document(page_content=page) for page in text] for i, doc in enumerate(page_docs): doc.metadata['page'] = i + 1 doc_chunks = [] for doc in page_docs: text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, separators=['\n\n', '\n', '.', '!', '?', ',', ' ', ''], chunk_overlap=20) chunks = text_splitter.split_text(doc.page_content) for i, chunk in enumerate(chunks): doc = Document(page_content=chunk, metadata={'page': doc. metadata['page'], 'chunk': i}) doc.metadata['source' ] = f"{doc.metadata['page']}-{doc.metadata['chunk']}" doc_chunks.append(doc) return doc_chunks
Convert a string or list of strings to a list of Documents with metadata.
test_serialize_llmchain_env
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) import os has_env = 'OPENAI_API_KEY' in os.environ if not has_env: os.environ['OPENAI_API_KEY'] = 'env_variable' llm_2 = OpenAI(model='davinci', temperature=0.5) prompt_2 = PromptTemplate.from_template('hello {name}!') chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ['OPENAI_API_KEY']
@pytest.mark.requires('openai') def test_serialize_llmchain_env() ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello') prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) import os has_env = 'OPENAI_API_KEY' in os.environ if not has_env: os.environ['OPENAI_API_KEY'] = 'env_variable' llm_2 = OpenAI(model='davinci', temperature=0.5) prompt_2 = PromptTemplate.from_template('hello {name}!') chain_2 = LLMChain(llm=llm_2, prompt=prompt_2) assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True) if not has_env: del os.environ['OPENAI_API_KEY']
null
_client_params
"""Get the parameters used for the openai client.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name self.client.api_base = self.api_base self.client.organization = self.organization creds: Dict[str, Any] = {'model': set_model_value, 'force_timeout': self. request_timeout, 'api_base': self.api_base} return {**self._default_params, **creds}
@property def _client_params(self) ->Dict[str, Any]: """Get the parameters used for the openai client.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name self.client.api_base = self.api_base self.client.organization = self.organization creds: Dict[str, Any] = {'model': set_model_value, 'force_timeout': self.request_timeout, 'api_base': self.api_base} return {**self._default_params, **creds}
Get the parameters used for the openai client.
http_paths_and_methods
"""Return a args for every method in cached OpenAPI spec in test_specs.""" http_paths_and_methods = [] for test_spec in _get_test_specs(): spec_name = test_spec.parent.name if test_spec.suffix == '.json': with test_spec.open('r') as f: spec = json.load(f) else: with test_spec.open('r') as f: spec = yaml.safe_load(f.read()) parsed_spec = OpenAPISpec.from_file(test_spec) for path, method in _get_paths_and_methods_from_spec_dictionary(spec): http_paths_and_methods.append((spec_name, parsed_spec, path, method)) return http_paths_and_methods
def http_paths_and_methods() ->List[Tuple[str, OpenAPISpec, str, str]]: """Return a args for every method in cached OpenAPI spec in test_specs.""" http_paths_and_methods = [] for test_spec in _get_test_specs(): spec_name = test_spec.parent.name if test_spec.suffix == '.json': with test_spec.open('r') as f: spec = json.load(f) else: with test_spec.open('r') as f: spec = yaml.safe_load(f.read()) parsed_spec = OpenAPISpec.from_file(test_spec) for path, method in _get_paths_and_methods_from_spec_dictionary(spec): http_paths_and_methods.append((spec_name, parsed_spec, path, method)) return http_paths_and_methods
Return a args for every method in cached OpenAPI spec in test_specs.
connect_to_elasticsearch
try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) if es_url and cloud_id: raise ValueError( 'Both es_url and cloud_id are defined. Please provide only one.') connection_params: Dict[str, Any] = {} if es_url: connection_params['hosts'] = [es_url] elif cloud_id: connection_params['cloud_id'] = cloud_id else: raise ValueError('Please provide either elasticsearch_url or cloud_id.') if api_key: connection_params['api_key'] = api_key elif username and password: connection_params['basic_auth'] = username, password es_client = elasticsearch.Elasticsearch(**connection_params, headers={ 'user-agent': ElasticsearchChatMessageHistory.get_user_agent()}) try: es_client.info() except Exception as err: logger.error(f'Error connecting to Elasticsearch: {err}') raise err return es_client
@staticmethod def connect_to_elasticsearch(*, es_url: Optional[str]=None, cloud_id: Optional[str]=None, api_key: Optional[str]=None, username: Optional[str ]=None, password: Optional[str]=None) ->'Elasticsearch': try: import elasticsearch except ImportError: raise ImportError( 'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.' ) if es_url and cloud_id: raise ValueError( 'Both es_url and cloud_id are defined. Please provide only one.') connection_params: Dict[str, Any] = {} if es_url: connection_params['hosts'] = [es_url] elif cloud_id: connection_params['cloud_id'] = cloud_id else: raise ValueError('Please provide either elasticsearch_url or cloud_id.' ) if api_key: connection_params['api_key'] = api_key elif username and password: connection_params['basic_auth'] = username, password es_client = elasticsearch.Elasticsearch(**connection_params, headers={ 'user-agent': ElasticsearchChatMessageHistory.get_user_agent()}) try: es_client.info() except Exception as err: logger.error(f'Error connecting to Elasticsearch: {err}') raise err return es_client
null
load_memory_variables
"""Return baz variable.""" return {'baz': 'foo'}
def load_memory_variables(self, inputs: Optional[Dict[str, Any]]=None) ->Dict[ str, str]: """Return baz variable.""" return {'baz': 'foo'}
Return baz variable.
test_konko_streaming_callback_test
"""Evaluate streaming's token callback functionality.""" callback_instance = FakeCallbackHandler() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_mgr, verbose=True) msg = HumanMessage(content='Hi') chat_response = chat_instance([msg]) assert callback_instance.llm_streams > 0 assert isinstance(chat_response, BaseMessage)
def test_konko_streaming_callback_test() ->None: """Evaluate streaming's token callback functionality.""" callback_instance = FakeCallbackHandler() callback_mgr = CallbackManager([callback_instance]) chat_instance = ChatKonko(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_mgr, verbose=True) msg = HumanMessage(content='Hi') chat_response = chat_instance([msg]) assert callback_instance.llm_streams > 0 assert isinstance(chat_response, BaseMessage)
Evaluate streaming's token callback functionality.
from_llm
"""Initialize from llm using default template. Args: vectorstore: Vector store for storing web pages llm: llm for search question generation search: GoogleSearchAPIWrapper prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks Returns: WebResearchRetriever """ if not prompt: QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(default_prompt= DEFAULT_SEARCH_PROMPT, conditionals=[(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)]) prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser= QuestionListOutputParser()) return cls(vectorstore=vectorstore, llm_chain=llm_chain, search=search, num_search_results=num_search_results, text_splitter=text_splitter)
@classmethod def from_llm(cls, vectorstore: VectorStore, llm: BaseLLM, search: GoogleSearchAPIWrapper, prompt: Optional[BasePromptTemplate]=None, num_search_results: int=1, text_splitter: RecursiveCharacterTextSplitter=RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=150)) ->'WebResearchRetriever': """Initialize from llm using default template. Args: vectorstore: Vector store for storing web pages llm: llm for search question generation search: GoogleSearchAPIWrapper prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks Returns: WebResearchRetriever """ if not prompt: QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(default_prompt =DEFAULT_SEARCH_PROMPT, conditionals=[(lambda llm: isinstance( llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)]) prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser= QuestionListOutputParser()) return cls(vectorstore=vectorstore, llm_chain=llm_chain, search=search, num_search_results=num_search_results, text_splitter=text_splitter)
Initialize from llm using default template. Args: vectorstore: Vector store for storing web pages llm: llm for search question generation search: GoogleSearchAPIWrapper prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks Returns: WebResearchRetriever
test_chroma_add_documents_mixed_metadata
db = Chroma(embedding_function=FakeEmbeddings()) docs = [Document(page_content='foo'), Document(page_content='bar', metadata ={'baz': 1})] ids = ['0', '1'] actual_ids = db.add_documents(docs, ids=ids) assert actual_ids == ids search = db.similarity_search('foo bar') assert sorted(search, key=lambda d: d.page_content) == sorted(docs, key=lambda d: d.page_content)
def test_chroma_add_documents_mixed_metadata() ->None: db = Chroma(embedding_function=FakeEmbeddings()) docs = [Document(page_content='foo'), Document(page_content='bar', metadata={'baz': 1})] ids = ['0', '1'] actual_ids = db.add_documents(docs, ids=ids) assert actual_ids == ids search = db.similarity_search('foo bar') assert sorted(search, key=lambda d: d.page_content) == sorted(docs, key =lambda d: d.page_content)
null
delete_file
""" Deletes a file from the repo Parameters: file_path(str): Where the file is Returns: str: Success or failure message """ try: self.gitlab_repo_instance.files.delete(file_path, self.gitlab_branch, 'Delete ' + file_path) return 'Deleted file ' + file_path except Exception as e: return 'Unable to delete file due to error:\n' + str(e)
def delete_file(self, file_path: str) ->str: """ Deletes a file from the repo Parameters: file_path(str): Where the file is Returns: str: Success or failure message """ try: self.gitlab_repo_instance.files.delete(file_path, self. gitlab_branch, 'Delete ' + file_path) return 'Deleted file ' + file_path except Exception as e: return 'Unable to delete file due to error:\n' + str(e)
Deletes a file from the repo Parameters: file_path(str): Where the file is Returns: str: Success or failure message
model
"""For backwards compatibility.""" return self.llm
@property def model(self) ->BaseChatModel: """For backwards compatibility.""" return self.llm
For backwards compatibility.
is_anomalous
""" Detect if given text is anomalous from the dataset Args: query: Text to detect if it is anomaly Returns: True or False """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ','.join(str_embeddings) podstore = self._pod + '.' + self._store q = 'select anomalous(' + vcol + ", '" + qv_comma + "', 'type=" + vtype + "')" q += ' from ' + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return False jd = json.loads(js[0]) if jd['anomalous'] == 'YES': return True return False
def is_anomalous(self, query: str, **kwargs: Any) ->bool: """ Detect if given text is anomalous from the dataset Args: query: Text to detect if it is anomaly Returns: True or False """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ','.join(str_embeddings) podstore = self._pod + '.' + self._store q = ('select anomalous(' + vcol + ", '" + qv_comma + "', 'type=" + vtype + "')") q += ' from ' + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return False jd = json.loads(js[0]) if jd['anomalous'] == 'YES': return True return False
Detect if given text is anomalous from the dataset Args: query: Text to detect if it is anomaly Returns: True or False
setup_class
if not os.getenv('OPENAI_API_KEY'): raise ValueError('OPENAI_API_KEY environment variable is not set')
@classmethod def setup_class(cls) ->None: if not os.getenv('OPENAI_API_KEY'): raise ValueError('OPENAI_API_KEY environment variable is not set')
null
results
res = self._response_json(url) return self._filter_results(res)
def results(self, url: str) ->list: res = self._response_json(url) return self._filter_results(res)
null
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
refresh_schema
"""Refreshes the Kùzu graph schema information""" node_properties = [] node_table_names = self.conn._get_node_table_names() for table_name in node_table_names: current_table_schema = {'properties': [], 'label': table_name} properties = self.conn._get_node_property_names(table_name) for property_name in properties: property_type = properties[property_name]['type'] list_type_flag = '' if properties[property_name]['dimension'] > 0: if 'shape' in properties[property_name]: for s in properties[property_name]['shape']: list_type_flag += '[%s]' % s else: for i in range(properties[property_name]['dimension']): list_type_flag += '[]' property_type += list_type_flag current_table_schema['properties'].append((property_name, property_type)) node_properties.append(current_table_schema) relationships = [] rel_tables = self.conn._get_rel_table_names() for table in rel_tables: relationships.append('(:%s)-[:%s]->(:%s)' % (table['src'], table['name' ], table['dst'])) rel_properties = [] for table in rel_tables: current_table_schema = {'properties': [], 'label': table['name']} properties_text = self.conn._connection.get_rel_property_names(table[ 'name']).split('\n') for i, line in enumerate(properties_text): if i < 3: continue if not line: continue property_name, property_type = line.strip().split(' ') current_table_schema['properties'].append((property_name, property_type)) rel_properties.append(current_table_schema) self.schema = f"""Node properties: {node_properties} Relationships properties: {rel_properties} Relationships: {relationships} """
def refresh_schema(self) ->None: """Refreshes the Kùzu graph schema information""" node_properties = [] node_table_names = self.conn._get_node_table_names() for table_name in node_table_names: current_table_schema = {'properties': [], 'label': table_name} properties = self.conn._get_node_property_names(table_name) for property_name in properties: property_type = properties[property_name]['type'] list_type_flag = '' if properties[property_name]['dimension'] > 0: if 'shape' in properties[property_name]: for s in properties[property_name]['shape']: list_type_flag += '[%s]' % s else: for i in range(properties[property_name]['dimension']): list_type_flag += '[]' property_type += list_type_flag current_table_schema['properties'].append((property_name, property_type)) node_properties.append(current_table_schema) relationships = [] rel_tables = self.conn._get_rel_table_names() for table in rel_tables: relationships.append('(:%s)-[:%s]->(:%s)' % (table['src'], table[ 'name'], table['dst'])) rel_properties = [] for table in rel_tables: current_table_schema = {'properties': [], 'label': table['name']} properties_text = self.conn._connection.get_rel_property_names(table ['name']).split('\n') for i, line in enumerate(properties_text): if i < 3: continue if not line: continue property_name, property_type = line.strip().split(' ') current_table_schema['properties'].append((property_name, property_type)) rel_properties.append(current_table_schema) self.schema = f"""Node properties: {node_properties} Relationships properties: {rel_properties} Relationships: {relationships} """
Refreshes the Kùzu graph schema information
test_copy_file
"""Test the FileCopy tool.""" with TemporaryDirectory() as temp_dir: tool = CopyFileTool() source_file = Path(temp_dir) / 'source.txt' destination_file = Path(temp_dir) / 'destination.txt' source_file.write_text('Hello, world!') tool.run({'source_path': str(source_file), 'destination_path': str( destination_file)}) assert source_file.exists() assert destination_file.exists() assert source_file.read_text() == 'Hello, world!' assert destination_file.read_text() == 'Hello, world!'
def test_copy_file() ->None: """Test the FileCopy tool.""" with TemporaryDirectory() as temp_dir: tool = CopyFileTool() source_file = Path(temp_dir) / 'source.txt' destination_file = Path(temp_dir) / 'destination.txt' source_file.write_text('Hello, world!') tool.run({'source_path': str(source_file), 'destination_path': str( destination_file)}) assert source_file.exists() assert destination_file.exists() assert source_file.read_text() == 'Hello, world!' assert destination_file.read_text() == 'Hello, world!'
Test the FileCopy tool.
replace_imports
"""Replace imports in each Python code block with links to their documentation and append the import info in a comment""" all_imports = [] with open(file, 'r') as f: data = f.read() file_name = os.path.basename(file) _DOC_TITLE = _get_doc_title(data, file_name) def replacer(match): code = match.group(2) existing_comment_re = re.compile('^<!--IMPORTS:.*?-->\\n', re.MULTILINE) code = existing_comment_re.sub('', code) imports = [] for import_match in _IMPORT_RE.finditer(code): module = import_match.group(1) imports_str = import_match.group(3).replace('(\n', '').replace('\n)', '') imported_classes = [imp.strip() for imp in re.split(',\\s*', imports_str.replace('\n', '')) if imp.strip()] for class_name in imported_classes: try: module_path = get_full_module_name(module, class_name) except AttributeError as e: logger.warning(f'Could not find module for {class_name}, {e}') continue except ImportError as e: logger.warning(f'Failed to load for class {class_name}, {e}') continue url = _BASE_URL + module_path.split('.')[1 ] + '/' + module_path + '.' + class_name + '.html' imports.append({'imported': class_name, 'source': module, 'docs': url, 'title': _DOC_TITLE}) if imports: all_imports.extend(imports) import_comment = f'<!--IMPORTS:{json.dumps(imports)}-->' return match.group(1) + import_comment + '\n' + code + match.group(3) else: return match.group(0) data = code_block_re.sub(replacer, data) with open(file, 'w') as f: f.write(data) return all_imports
def replace_imports(file): """Replace imports in each Python code block with links to their documentation and append the import info in a comment""" all_imports = [] with open(file, 'r') as f: data = f.read() file_name = os.path.basename(file) _DOC_TITLE = _get_doc_title(data, file_name) def replacer(match): code = match.group(2) existing_comment_re = re.compile('^<!--IMPORTS:.*?-->\\n', re.MULTILINE ) code = existing_comment_re.sub('', code) imports = [] for import_match in _IMPORT_RE.finditer(code): module = import_match.group(1) imports_str = import_match.group(3).replace('(\n', '').replace( '\n)', '') imported_classes = [imp.strip() for imp in re.split(',\\s*', imports_str.replace('\n', '')) if imp.strip()] for class_name in imported_classes: try: module_path = get_full_module_name(module, class_name) except AttributeError as e: logger.warning( f'Could not find module for {class_name}, {e}') continue except ImportError as e: logger.warning( f'Failed to load for class {class_name}, {e}') continue url = _BASE_URL + module_path.split('.')[1 ] + '/' + module_path + '.' + class_name + '.html' imports.append({'imported': class_name, 'source': module, 'docs': url, 'title': _DOC_TITLE}) if imports: all_imports.extend(imports) import_comment = f'<!--IMPORTS:{json.dumps(imports)}-->' return match.group(1) + import_comment + '\n' + code + match.group( 3) else: return match.group(0) data = code_block_re.sub(replacer, data) with open(file, 'w') as f: f.write(data) return all_imports
Replace imports in each Python code block with links to their documentation and append the import info in a comment
__init__
self.url = url self.key = key self.username = username self.timeout = timeout
def __init__(self, url: str, key: str, username: str='root', timeout: int=10): self.url = url self.key = key self.username = username self.timeout = timeout
null
_create_chat_result
generations = [] for res in response.choices: message = _convert_dict_to_message(res.message) finish_reason = res.finish_reason gen = ChatGeneration(message=message, generation_info={'finish_reason': finish_reason}) generations.append(gen) if finish_reason != 'stop': logger.warning('Giga generation stopped with reason: %s', finish_reason ) if self.verbose: logger.info('Giga response: %s', message.content) llm_output = {'token_usage': response.usage, 'model_name': response.model} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Any) ->ChatResult: generations = [] for res in response.choices: message = _convert_dict_to_message(res.message) finish_reason = res.finish_reason gen = ChatGeneration(message=message, generation_info={ 'finish_reason': finish_reason}) generations.append(gen) if finish_reason != 'stop': logger.warning('Giga generation stopped with reason: %s', finish_reason) if self.verbose: logger.info('Giga response: %s', message.content) llm_output = {'token_usage': response.usage, 'model_name': response.model} return ChatResult(generations=generations, llm_output=llm_output)
null
test_debug_is_settable_via_setter
from langchain_core import globals from langchain_core.callbacks.manager import _get_debug previous_value = globals._debug previous_fn_reading = _get_debug() assert previous_value == previous_fn_reading set_debug(not previous_value) new_value = globals._debug new_fn_reading = _get_debug() try: assert new_value != previous_value assert new_value == new_fn_reading assert new_value == get_debug() finally: set_debug(previous_value)
def test_debug_is_settable_via_setter() ->None: from langchain_core import globals from langchain_core.callbacks.manager import _get_debug previous_value = globals._debug previous_fn_reading = _get_debug() assert previous_value == previous_fn_reading set_debug(not previous_value) new_value = globals._debug new_fn_reading = _get_debug() try: assert new_value != previous_value assert new_value == new_fn_reading assert new_value == get_debug() finally: set_debug(previous_value)
null
_construct_initial_inputs
base_info = {'page_content': docs[0].page_content} base_info.update(docs[0].metadata) document_info = {k: base_info[k] for k in self.document_prompt.input_variables} base_inputs: dict = {self.document_variable_name: self.document_prompt. format(**document_info)} inputs = {**base_inputs, **kwargs} return inputs
def _construct_initial_inputs(self, docs: List[Document], **kwargs: Any ) ->Dict[str, Any]: base_info = {'page_content': docs[0].page_content} base_info.update(docs[0].metadata) document_info = {k: base_info[k] for k in self.document_prompt. input_variables} base_inputs: dict = {self.document_variable_name: self.document_prompt. format(**document_info)} inputs = {**base_inputs, **kwargs} return inputs
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult, filter)
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[dict]=None, **kwargs: Any ) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult, filter)
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents selected by maximal marginal relevance.
test_agent_tool_return_direct_in_intermediate_steps
"""Test agent using tools that return directly.""" tool = 'Search' responses = [f"""FooBarBaz Action: {tool} Action Input: misalignment""", """Oh well Final Answer: curses foiled again"""] fake_llm = FakeListLLM(responses=responses) tools = [Tool(name='Search', func=lambda x: x, description= 'Useful for searching', return_direct=True)] agent = initialize_agent(tools, fake_llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, return_intermediate_steps=True) resp = agent('when was langchain made') assert isinstance(resp, dict) assert resp['output'] == 'misalignment' assert len(resp['intermediate_steps']) == 1 action, _action_intput = resp['intermediate_steps'][0] assert action.tool == 'Search'
def test_agent_tool_return_direct_in_intermediate_steps() ->None: """Test agent using tools that return directly.""" tool = 'Search' responses = [f'FooBarBaz\nAction: {tool}\nAction Input: misalignment', """Oh well Final Answer: curses foiled again"""] fake_llm = FakeListLLM(responses=responses) tools = [Tool(name='Search', func=lambda x: x, description= 'Useful for searching', return_direct=True)] agent = initialize_agent(tools, fake_llm, agent=AgentType. ZERO_SHOT_REACT_DESCRIPTION, return_intermediate_steps=True) resp = agent('when was langchain made') assert isinstance(resp, dict) assert resp['output'] == 'misalignment' assert len(resp['intermediate_steps']) == 1 action, _action_intput = resp['intermediate_steps'][0] assert action.tool == 'Search'
Test agent using tools that return directly.
_embed_with_retry
response = embeddings.client.create(**kwargs) return _check_response(response)
@retry_decorator def _embed_with_retry(**kwargs: Any) ->Any: response = embeddings.client.create(**kwargs) return _check_response(response)
null
__init__
"""Initialize with a list of image data (bytes) or file paths Args: images: Either a single image or a list of images. Accepts image data (bytes) or file paths to images. blip_processor: The name of the pre-trained BLIP processor. blip_model: The name of the pre-trained BLIP model. """ if isinstance(images, (str, bytes)): self.images = [images] else: self.images = images self.blip_processor = blip_processor self.blip_model = blip_model
def __init__(self, images: Union[str, bytes, List[Union[str, bytes]]], blip_processor: str='Salesforce/blip-image-captioning-base', blip_model: str='Salesforce/blip-image-captioning-base'): """Initialize with a list of image data (bytes) or file paths Args: images: Either a single image or a list of images. Accepts image data (bytes) or file paths to images. blip_processor: The name of the pre-trained BLIP processor. blip_model: The name of the pre-trained BLIP model. """ if isinstance(images, (str, bytes)): self.images = [images] else: self.images = images self.blip_processor = blip_processor self.blip_model = blip_model
Initialize with a list of image data (bytes) or file paths Args: images: Either a single image or a list of images. Accepts image data (bytes) or file paths to images. blip_processor: The name of the pre-trained BLIP processor. blip_model: The name of the pre-trained BLIP model.
input_mapper
return d['some_input']
def input_mapper(d: dict) ->str: return d['some_input']
null
load
"""Load Documents from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results
def load(self) ->List[Document]: """Load Documents from bilibili url.""" results = [] for url in self.video_urls: transcript, video_info = self._get_bilibili_subs_and_info(url) doc = Document(page_content=transcript, metadata=video_info) results.append(doc) return results
Load Documents from bilibili url.
_on_llm_end
crumbs = self.get_breadcrumbs(run) self.function_callback(f"{get_colored_text('[llm/end]', color='blue')} " + get_bolded_text( f"""[{crumbs}] [{elapsed(run)}] Exiting LLM run with output: """) + f"{try_json_stringify(run.outputs, '[response]')}")
def _on_llm_end(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[llm/end]', color='blue')} " + get_bolded_text (f"""[{crumbs}] [{elapsed(run)}] Exiting LLM run with output: """) + f"{try_json_stringify(run.outputs, '[response]')}")
null
similarity_search_with_score
"""Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score. """ results = self.marqo_similarity_search(query=query, k=k) scored_documents = self._construct_documents_from_results_with_score(results) return scored_documents
def similarity_search_with_score(self, query: Union[str, Dict[str, float]], k: int=4) ->List[Tuple[Document, float]]: """Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score. """ results = self.marqo_similarity_search(query=query, k=k) scored_documents = self._construct_documents_from_results_with_score( results) return scored_documents
Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score.
test_structured_tool_from_function
"""Test that structured tools can be created from functions.""" def foo(bar: int, baz: str) ->str: """Docstring Args: bar: int baz: str """ raise NotImplementedError() structured_tool = StructuredTool.from_function(foo) assert structured_tool.name == 'foo' assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'string'}} assert structured_tool.args_schema.schema() == {'title': 'fooSchemaSchema', 'type': 'object', 'properties': {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'string'}}, 'required': [ 'bar', 'baz']} prefix = 'foo(bar: int, baz: str) -> str - ' assert foo.__doc__ is not None assert structured_tool.description == prefix + foo.__doc__.strip()
def test_structured_tool_from_function() ->None: """Test that structured tools can be created from functions.""" def foo(bar: int, baz: str) ->str: """Docstring Args: bar: int baz: str """ raise NotImplementedError() structured_tool = StructuredTool.from_function(foo) assert structured_tool.name == 'foo' assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'string'}} assert structured_tool.args_schema.schema() == {'title': 'fooSchemaSchema', 'type': 'object', 'properties': {'bar': {'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type': 'string' }}, 'required': ['bar', 'baz']} prefix = 'foo(bar: int, baz: str) -> str - ' assert foo.__doc__ is not None assert structured_tool.description == prefix + foo.__doc__.strip()
Test that structured tools can be created from functions.
_chain_type
"""Return the chain type.""" return 'vector_db_qa'
@property def _chain_type(self) ->str: """Return the chain type.""" return 'vector_db_qa'
Return the chain type.
test_long_context_reorder
"""Test Lost in the middle reordering get_relevant_docs.""" texts = ['Basquetball is a great sport.', 'Fly me to the moon is one of my favourite songs.', 'The Celtics are my favourite team.', 'This is a document about the Boston Celtics', 'I simply love going to the movies', 'The Boston Celtics won the game by 20 points', 'This is just a random text.', 'Elden Ring is one of the best games in the last 15 years.', 'L. Kornet is one of the best Celtics players.', 'Larry Bird was an iconic NBA player.'] embeddings = OpenAIEmbeddings() retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( search_kwargs={'k': 10}) reordering = LongContextReorder() docs = retriever.get_relevant_documents('Tell me about the Celtics') actual = reordering.transform_documents(docs) first_and_last = list(actual[:2]) + list(actual[-2:]) assert len(actual) == 10 assert texts[2] in [d.page_content for d in first_and_last] assert texts[3] in [d.page_content for d in first_and_last] assert texts[5] in [d.page_content for d in first_and_last] assert texts[8] in [d.page_content for d in first_and_last]
def test_long_context_reorder() ->None: """Test Lost in the middle reordering get_relevant_docs.""" texts = ['Basquetball is a great sport.', 'Fly me to the moon is one of my favourite songs.', 'The Celtics are my favourite team.', 'This is a document about the Boston Celtics', 'I simply love going to the movies', 'The Boston Celtics won the game by 20 points', 'This is just a random text.', 'Elden Ring is one of the best games in the last 15 years.', 'L. Kornet is one of the best Celtics players.', 'Larry Bird was an iconic NBA player.'] embeddings = OpenAIEmbeddings() retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( search_kwargs={'k': 10}) reordering = LongContextReorder() docs = retriever.get_relevant_documents('Tell me about the Celtics') actual = reordering.transform_documents(docs) first_and_last = list(actual[:2]) + list(actual[-2:]) assert len(actual) == 10 assert texts[2] in [d.page_content for d in first_and_last] assert texts[3] in [d.page_content for d in first_and_last] assert texts[5] in [d.page_content for d in first_and_last] assert texts[8] in [d.page_content for d in first_and_last]
Test Lost in the middle reordering get_relevant_docs.
_identifying_params
return {}
@property def _identifying_params(self) ->Dict[str, Any]: return {}
null
_deploy
"""Call to Beam.""" try: import beam if beam.__path__ == '': raise ImportError except ImportError: raise ImportError( 'Could not import beam python package. Please install it with `curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh`.' ) self.app_creation() self.run_creation() process = subprocess.run('beam deploy app.py', shell=True, capture_output= True, text=True) if process.returncode == 0: output = process.stdout logger.info(output) lines = output.split('\n') for line in lines: if line.startswith(' i Send requests to: https://apps.beam.cloud/'): self.app_id = line.split('/')[-1] self.url = line.split(':')[1].strip() return self.app_id raise ValueError( f"""Failed to retrieve the appID from the deployment output. Deployment output: {output}""" ) else: raise ValueError(f'Deployment failed. Error: {process.stderr}')
def _deploy(self) ->str: """Call to Beam.""" try: import beam if beam.__path__ == '': raise ImportError except ImportError: raise ImportError( 'Could not import beam python package. Please install it with `curl https://raw.githubusercontent.com/slai-labs/get-beam/main/get-beam.sh -sSfL | sh`.' ) self.app_creation() self.run_creation() process = subprocess.run('beam deploy app.py', shell=True, capture_output=True, text=True) if process.returncode == 0: output = process.stdout logger.info(output) lines = output.split('\n') for line in lines: if line.startswith(' i Send requests to: https://apps.beam.cloud/' ): self.app_id = line.split('/')[-1] self.url = line.split(':')[1].strip() return self.app_id raise ValueError( f"""Failed to retrieve the appID from the deployment output. Deployment output: {output}""" ) else: raise ValueError(f'Deployment failed. Error: {process.stderr}')
Call to Beam.
get_runtime_environment
"""Get information about the LangChain runtime environment.""" from langchain import __version__ return {'library_version': __version__, 'library': 'langchain', 'platform': platform.platform(), 'runtime': 'python', 'runtime_version': platform. python_version()}
@lru_cache(maxsize=1) def get_runtime_environment() ->dict: """Get information about the LangChain runtime environment.""" from langchain import __version__ return {'library_version': __version__, 'library': 'langchain', 'platform': platform.platform(), 'runtime': 'python', 'runtime_version': platform.python_version()}
Get information about the LangChain runtime environment.
_get_tables_todo
"""Get the tables that still need to be queried.""" return [table for table in tables_todo if table not in self.schemas]
def _get_tables_todo(self, tables_todo: List[str]) ->List[str]: """Get the tables that still need to be queried.""" return [table for table in tables_todo if table not in self.schemas]
Get the tables that still need to be queried.
get_cleaned_operation_id
"""Get a cleaned operation id from an operation id.""" operation_id = operation.operationId if operation_id is None: path = re.sub('[^a-zA-Z0-9]', '_', path.lstrip('/')) operation_id = f'{path}_{method}' return operation_id.replace('-', '_').replace('.', '_').replace('/', '_')
@staticmethod def get_cleaned_operation_id(operation: Operation, path: str, method: str ) ->str: """Get a cleaned operation id from an operation id.""" operation_id = operation.operationId if operation_id is None: path = re.sub('[^a-zA-Z0-9]', '_', path.lstrip('/')) operation_id = f'{path}_{method}' return operation_id.replace('-', '_').replace('.', '_').replace('/', '_')
Get a cleaned operation id from an operation id.
test_bigquery_loader_page_content_columns
loader = BigQueryLoader('SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS a, 4 AS b' , page_content_columns=['a']) docs = loader.load() assert len(docs) == 2 assert docs[0].page_content == 'a: 1' assert docs[0].metadata == {} assert docs[1].page_content == 'a: 3' assert docs[1].metadata == {}
@pytest.mark.skipif(not bigquery_installed, reason='bigquery not installed') def test_bigquery_loader_page_content_columns() ->None: loader = BigQueryLoader( 'SELECT 1 AS a, 2 AS b UNION ALL SELECT 3 AS a, 4 AS b', page_content_columns=['a']) docs = loader.load() assert len(docs) == 2 assert docs[0].page_content == 'a: 1' assert docs[0].metadata == {} assert docs[1].page_content == 'a: 3' assert docs[1].metadata == {}
null
evaluation_name
return f'embedding_{self.distance_metric.value}_distance'
@property def evaluation_name(self) ->str: return f'embedding_{self.distance_metric.value}_distance'
null
test_singlestoredb
"""Test end to end construction and search.""" table_name = 'test_singlestoredb' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLESTOREDB_URL) output = docsearch.similarity_search('foo', k=1) assert output == TEST_SINGLE_RESULT drop(table_name)
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb(texts: List[str]) ->None: """Test end to end construction and search.""" table_name = 'test_singlestoredb' drop(table_name) docsearch = SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=table_name, host=TEST_SINGLESTOREDB_URL) output = docsearch.similarity_search('foo', k=1) assert output == TEST_SINGLE_RESULT drop(table_name)
Test end to end construction and search.
embed_query
"""Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') embedding = self.embed([text]).numpy()[0] return embedding.tolist()
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') embedding = self.embed([text]).numpy()[0] return embedding.tolist()
Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text.
kv_singleio_dataset_name
import pandas as pd client = Client() df = pd.DataFrame({'the wackiest input': [ "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?"], 'unthinkable output': ['Sacramento', 'Carson City', 'Salem', 'Olympia']}) uid = str(uuid4())[-8:] _dataset_name = f'lcp singleio kv dataset integration tests - {uid}' client.upload_dataframe(df, name=_dataset_name, input_keys=[ 'the wackiest input'], output_keys=['unthinkable output'], description= 'Integration test dataset') yield _dataset_name
@pytest.fixture(scope='module') def kv_singleio_dataset_name() ->Iterator[str]: import pandas as pd client = Client() df = pd.DataFrame({'the wackiest input': [ "What's the capital of California?", "What's the capital of Nevada?", "What's the capital of Oregon?", "What's the capital of Washington?"], 'unthinkable output': [ 'Sacramento', 'Carson City', 'Salem', 'Olympia']}) uid = str(uuid4())[-8:] _dataset_name = f'lcp singleio kv dataset integration tests - {uid}' client.upload_dataframe(df, name=_dataset_name, input_keys=[ 'the wackiest input'], output_keys=['unthinkable output'], description='Integration test dataset') yield _dataset_name
null
_transform
for chunk in input: if isinstance(chunk, BaseMessage): yield self.parse_result([ChatGeneration(message=chunk)]) else: yield self.parse_result([Generation(text=chunk)])
def _transform(self, input: Iterator[Union[str, BaseMessage]]) ->Iterator[T]: for chunk in input: if isinstance(chunk, BaseMessage): yield self.parse_result([ChatGeneration(message=chunk)]) else: yield self.parse_result([Generation(text=chunk)])
null
_init_resp
return {k: None for k in self.callback_columns}
def _init_resp(self) ->Dict: return {k: None for k in self.callback_columns}
null
test_pickbest_textembedder_missing_context_throws
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_action = {'action': ['0', '1', '2']} event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_action, based_on={}) with pytest.raises(ValueError): feature_embedder.format(event)
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_missing_context_throws() ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) named_action = {'action': ['0', '1', '2']} event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_action, based_on={}) with pytest.raises(ValueError): feature_embedder.format(event)
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
similarity_search_with_score
"""Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ embedding = self._embedding.embed_query(query) payload = {'queryVector': embedding, 'column': 'embedding', 'size': k} if filter: payload['filter'] = filter r = self._client.data().vector_search(self._table_name, payload=payload) if r.status_code != 200: raise Exception(f'Error running similarity search: {r.status_code} {r}') hits = r['records'] docs_and_scores = [(Document(page_content=hit['content'], metadata=self. _extractMetadata(hit)), hit['xata']['score']) for hit in hits] return docs_and_scores
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[dict]=None, **kwargs: Any) ->List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ embedding = self._embedding.embed_query(query) payload = {'queryVector': embedding, 'column': 'embedding', 'size': k} if filter: payload['filter'] = filter r = self._client.data().vector_search(self._table_name, payload=payload) if r.status_code != 200: raise Exception(f'Error running similarity search: {r.status_code} {r}' ) hits = r['records'] docs_and_scores = [(Document(page_content=hit['content'], metadata=self ._extractMetadata(hit)), hit['xata']['score']) for hit in hits] return docs_and_scores
Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float.
similarity_search_with_score
embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector(embedding_vector, k, filter=filter)
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document, float]]: embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector(embedding_vector, k, filter=filter)
null
astream
async def input_aiter() ->AsyncIterator[Input]: yield input return self.atransform(input_aiter(), config, **kwargs)
def astream(self, input: Input, config: Optional[RunnableConfig]=None, ** kwargs: Any) ->AsyncIterator[Output]: async def input_aiter() ->AsyncIterator[Input]: yield input return self.atransform(input_aiter(), config, **kwargs)
null
add_texts
raise NotImplementedError
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: raise NotImplementedError
null
_completion_with_retry
return llm.client.create(**kwargs)
@retry_decorator def _completion_with_retry(**kwargs: Any) ->Any: return llm.client.create(**kwargs)
null
get_table_info
"""Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ all_table_names = self.get_usable_table_names() if table_names is not None: missing_tables = set(table_names).difference(all_table_names) if missing_tables: raise ValueError(f'table_names {missing_tables} not found in database') all_table_names = table_names meta_tables = [tbl for tbl in self._metadata.sorted_tables if tbl.name in set(all_table_names) and not (self.dialect == 'sqlite' and tbl.name. startswith('sqlite_'))] tables = [] for table in meta_tables: if self._custom_table_info and table.name in self._custom_table_info: tables.append(self._custom_table_info[table.name]) continue for k, v in table.columns.items(): if type(v.type) is NullType: table._columns.remove(v) create_table = str(CreateTable(table).compile(self._engine)) table_info = f'{create_table.rstrip()}' has_extra_info = (self._indexes_in_table_info or self. _sample_rows_in_table_info) if has_extra_info: table_info += '\n\n/*' if self._indexes_in_table_info: table_info += f'\n{self._get_table_indexes(table)}\n' if self._sample_rows_in_table_info: table_info += f'\n{self._get_sample_rows(table)}\n' if has_extra_info: table_info += '*/' tables.append(table_info) tables.sort() final_str = '\n\n'.join(tables) return final_str
def get_table_info(self, table_names: Optional[List[str]]=None) ->str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper. """ all_table_names = self.get_usable_table_names() if table_names is not None: missing_tables = set(table_names).difference(all_table_names) if missing_tables: raise ValueError( f'table_names {missing_tables} not found in database') all_table_names = table_names meta_tables = [tbl for tbl in self._metadata.sorted_tables if tbl.name in set(all_table_names) and not (self.dialect == 'sqlite' and tbl.name .startswith('sqlite_'))] tables = [] for table in meta_tables: if self._custom_table_info and table.name in self._custom_table_info: tables.append(self._custom_table_info[table.name]) continue for k, v in table.columns.items(): if type(v.type) is NullType: table._columns.remove(v) create_table = str(CreateTable(table).compile(self._engine)) table_info = f'{create_table.rstrip()}' has_extra_info = (self._indexes_in_table_info or self. _sample_rows_in_table_info) if has_extra_info: table_info += '\n\n/*' if self._indexes_in_table_info: table_info += f'\n{self._get_table_indexes(table)}\n' if self._sample_rows_in_table_info: table_info += f'\n{self._get_sample_rows(table)}\n' if has_extra_info: table_info += '*/' tables.append(table_info) tables.sort() final_str = '\n\n'.join(tables) return final_str
Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper.
load
with open(self.persist_path, 'rb') as fp: return self.bson.loads(fp.read())
def load(self) ->Any: with open(self.persist_path, 'rb') as fp: return self.bson.loads(fp.read())
null
test_document_not_found
"""Test when document is not found.""" _dict = {'foo': Document(page_content='bar')} docstore = InMemoryDocstore(_dict) output = docstore.search('bar') assert output == 'ID bar not found.'
def test_document_not_found() ->None: """Test when document is not found.""" _dict = {'foo': Document(page_content='bar')} docstore = InMemoryDocstore(_dict) output = docstore.search('bar') assert output == 'ID bar not found.'
Test when document is not found.
validate_init_args
"""Validates proper combinations of init arguments""" errors = [] if url is None: errors.append('Must provide `base_url`') if api_key and not username or username and not api_key: errors.append( 'If one of `api_key` or `username` is provided, the other must be as well.' ) non_null_creds = list(x is not None for x in (api_key or username, session, oauth2, token)) if sum(non_null_creds) > 1: all_names = '(api_key, username)', 'session', 'oath2', 'token' provided = tuple(n for x, n in zip(non_null_creds, all_names) if x) errors.append( f'Cannot provide a value for more than one of: {all_names}. Received values for: {provided}' ) if oauth2 and set(oauth2.keys()) != {'access_token', 'access_token_secret', 'consumer_key', 'key_cert'}: errors.append( "You have either omitted require keys or added extra keys to the oauth2 dictionary. key values should be `['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`" ) return errors or None
@staticmethod def validate_init_args(url: Optional[str]=None, api_key: Optional[str]=None, username: Optional[str]=None, session: Optional[requests.Session]=None, oauth2: Optional[dict]=None, token: Optional[str]=None) ->Union[List, None ]: """Validates proper combinations of init arguments""" errors = [] if url is None: errors.append('Must provide `base_url`') if api_key and not username or username and not api_key: errors.append( 'If one of `api_key` or `username` is provided, the other must be as well.' ) non_null_creds = list(x is not None for x in (api_key or username, session, oauth2, token)) if sum(non_null_creds) > 1: all_names = '(api_key, username)', 'session', 'oath2', 'token' provided = tuple(n for x, n in zip(non_null_creds, all_names) if x) errors.append( f'Cannot provide a value for more than one of: {all_names}. Received values for: {provided}' ) if oauth2 and set(oauth2.keys()) != {'access_token', 'access_token_secret', 'consumer_key', 'key_cert'}: errors.append( "You have either omitted require keys or added extra keys to the oauth2 dictionary. key values should be `['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`" ) return errors or None
Validates proper combinations of init arguments
_stream_response_to_chat_generation_chunk
"""Convert a stream response to a generation chunk.""" parsed_response = json.loads(stream_response) generation_info = parsed_response if parsed_response.get('done' ) is True else None return ChatGenerationChunk(message=AIMessageChunk(content=parsed_response. get('response', '')), generation_info=generation_info)
@deprecated('0.0.3', alternative= '_chat_stream_response_to_chat_generation_chunk') def _stream_response_to_chat_generation_chunk(stream_response: str ) ->ChatGenerationChunk: """Convert a stream response to a generation chunk.""" parsed_response = json.loads(stream_response) generation_info = parsed_response if parsed_response.get('done' ) is True else None return ChatGenerationChunk(message=AIMessageChunk(content= parsed_response.get('response', '')), generation_info=generation_info)
Convert a stream response to a generation chunk.
__init__
"""Initialize with a path. Args: path: Path to the directory containing the Obsidian files. encoding: Charset encoding, defaults to "UTF-8" collect_metadata: Whether to collect metadata from the front matter. Defaults to True. """ self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata
def __init__(self, path: str, encoding: str='UTF-8', collect_metadata: bool =True): """Initialize with a path. Args: path: Path to the directory containing the Obsidian files. encoding: Charset encoding, defaults to "UTF-8" collect_metadata: Whether to collect metadata from the front matter. Defaults to True. """ self.file_path = path self.encoding = encoding self.collect_metadata = collect_metadata
Initialize with a path. Args: path: Path to the directory containing the Obsidian files. encoding: Charset encoding, defaults to "UTF-8" collect_metadata: Whether to collect metadata from the front matter. Defaults to True.
test_openai_embedding_documents_multiple
"""Test openai embeddings.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536
@pytest.mark.scheduled def test_openai_embedding_documents_multiple() ->None: """Test openai embeddings.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = OpenAIEmbeddings(chunk_size=2) embedding.embedding_ctx_length = 8191 output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536
Test openai embeddings.
create_stuff_documents_chain
"""Create a chain for passing a list of Documents to a model. Args: llm: Language model. prompt: Prompt template. Must contain input variable "context", which will be used for passing in the formatted documents. output_parser: Output parser. Defaults to StrOutputParser. document_prompt: Prompt used for formatting each document into a string. Input variables can be "page_content" or any metadata keys that are in all documents. "page_content" will automatically retrieve the `Document.page_content`, and all other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`. document_separator: String separator to use between formatted document strings. Returns: An LCEL Runnable. The input is a dictionary that must have a "context" key that maps to a List[Document], and any other input variables expected in the prompt. The Runnable return type depends on output_parser used. Example: .. code-block:: python # pip install -U langchain langchain-community from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document from langchain_core.prompts import ChatPromptTemplate from langchain.chains.combine_documents import create_stuff_documents_chain prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors: {context}")] ) llm = ChatOpenAI(model_name="gpt-3.5-turbo") chain = create_stuff_documents_chain(llm, prompt) docs = [ Document(page_content="Jesse loves red but not yellow"), Document(page_content = "Jamal loves green but not as much as he loves orange") ] chain.invoke({"context": docs}) """ _validate_prompt(prompt) _document_prompt = document_prompt or DEFAULT_DOCUMENT_PROMPT _output_parser = output_parser or StrOutputParser() def format_docs(inputs: dict) ->str: return document_separator.join(format_document(doc, _document_prompt) for doc in inputs[DOCUMENTS_KEY]) return (RunnablePassthrough.assign(**{DOCUMENTS_KEY: format_docs}). with_config(run_name='format_inputs') | prompt | llm | _output_parser ).with_config(run_name='stuff_documents_chain')
def create_stuff_documents_chain(llm: LanguageModelLike, prompt: BasePromptTemplate, *, output_parser: Optional[BaseOutputParser]=None, document_prompt: Optional[BasePromptTemplate]=None, document_separator: str=DEFAULT_DOCUMENT_SEPARATOR) ->Runnable[Dict[str, Any], Any]: """Create a chain for passing a list of Documents to a model. Args: llm: Language model. prompt: Prompt template. Must contain input variable "context", which will be used for passing in the formatted documents. output_parser: Output parser. Defaults to StrOutputParser. document_prompt: Prompt used for formatting each document into a string. Input variables can be "page_content" or any metadata keys that are in all documents. "page_content" will automatically retrieve the `Document.page_content`, and all other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`. document_separator: String separator to use between formatted document strings. Returns: An LCEL Runnable. The input is a dictionary that must have a "context" key that maps to a List[Document], and any other input variables expected in the prompt. The Runnable return type depends on output_parser used. Example: .. code-block:: python # pip install -U langchain langchain-community from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document from langchain_core.prompts import ChatPromptTemplate from langchain.chains.combine_documents import create_stuff_documents_chain prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors: {context}")] ) llm = ChatOpenAI(model_name="gpt-3.5-turbo") chain = create_stuff_documents_chain(llm, prompt) docs = [ Document(page_content="Jesse loves red but not yellow"), Document(page_content = "Jamal loves green but not as much as he loves orange") ] chain.invoke({"context": docs}) """ _validate_prompt(prompt) _document_prompt = document_prompt or DEFAULT_DOCUMENT_PROMPT _output_parser = output_parser or StrOutputParser() def format_docs(inputs: dict) ->str: return document_separator.join(format_document(doc, _document_prompt) for doc in inputs[DOCUMENTS_KEY]) return (RunnablePassthrough.assign(**{DOCUMENTS_KEY: format_docs}). with_config(run_name='format_inputs') | prompt | llm | _output_parser ).with_config(run_name='stuff_documents_chain')
Create a chain for passing a list of Documents to a model. Args: llm: Language model. prompt: Prompt template. Must contain input variable "context", which will be used for passing in the formatted documents. output_parser: Output parser. Defaults to StrOutputParser. document_prompt: Prompt used for formatting each document into a string. Input variables can be "page_content" or any metadata keys that are in all documents. "page_content" will automatically retrieve the `Document.page_content`, and all other inputs variables will be automatically retrieved from the `Document.metadata` dictionary. Default to a prompt that only contains `Document.page_content`. document_separator: String separator to use between formatted document strings. Returns: An LCEL Runnable. The input is a dictionary that must have a "context" key that maps to a List[Document], and any other input variables expected in the prompt. The Runnable return type depends on output_parser used. Example: .. code-block:: python # pip install -U langchain langchain-community from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document from langchain_core.prompts import ChatPromptTemplate from langchain.chains.combine_documents import create_stuff_documents_chain prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors: {context}")] ) llm = ChatOpenAI(model_name="gpt-3.5-turbo") chain = create_stuff_documents_chain(llm, prompt) docs = [ Document(page_content="Jesse loves red but not yellow"), Document(page_content = "Jamal loves green but not as much as he loves orange") ] chain.invoke({"context": docs})
run
"""Run query through WolframAlpha and parse result.""" res = self.wolfram_client.query(query) try: assumption = next(res.pods).text answer = next(res.results).text except StopIteration: return "Wolfram Alpha wasn't able to answer it" if answer is None or answer == '': return 'No good Wolfram Alpha Result was found' else: return f'Assumption: {assumption} \nAnswer: {answer}'
def run(self, query: str) ->str: """Run query through WolframAlpha and parse result.""" res = self.wolfram_client.query(query) try: assumption = next(res.pods).text answer = next(res.results).text except StopIteration: return "Wolfram Alpha wasn't able to answer it" if answer is None or answer == '': return 'No good Wolfram Alpha Result was found' else: return f'Assumption: {assumption} \nAnswer: {answer}'
Run query through WolframAlpha and parse result.
_get_folder
req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id)) with urllib.request.urlopen(req_folder) as response: json_data = json.loads(response.read().decode()) return json_data['title']
def _get_folder(self, folder_id: str) ->str: req_folder = urllib.request.Request(self._get_folder_url.format(id= folder_id)) with urllib.request.urlopen(req_folder) as response: json_data = json.loads(response.read().decode()) return json_data['title']
null
_import_vllm_openai
from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI
def _import_vllm_openai() ->Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI
null
_import_nlpcloud
from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud
def _import_nlpcloud() ->Any: from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud
null
pytest_collection_modifyitems
"""Add implementations for handling custom markers. At the moment, this adds support for a custom `requires` marker. The `requires` marker is used to denote tests that require one or more packages to be installed to run. If the package is not installed, the test is skipped. The `requires` marker syntax is: .. code-block:: python @pytest.mark.requires("package1", "package2") def test_something(): ... """ required_pkgs_info: Dict[str, bool] = {} only_extended = config.getoption('--only-extended') or False only_core = config.getoption('--only-core') or False if only_extended and only_core: raise ValueError('Cannot specify both `--only-extended` and `--only-core`.' ) for item in items: requires_marker = item.get_closest_marker('requires') if requires_marker is not None: if only_core: item.add_marker(pytest.mark.skip(reason= 'Skipping not a core test.')) continue required_pkgs = requires_marker.args for pkg in required_pkgs: if pkg not in required_pkgs_info: try: installed = util.find_spec(pkg) is not None except Exception: installed = False required_pkgs_info[pkg] = installed if not required_pkgs_info[pkg]: if only_extended: pytest.fail( f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.' ) else: item.add_marker(pytest.mark.skip(reason= f'Requires pkg: `{pkg}`')) break elif only_extended: item.add_marker(pytest.mark.skip(reason= 'Skipping not an extended test.'))
def pytest_collection_modifyitems(config: Config, items: Sequence[Function] ) ->None: """Add implementations for handling custom markers. At the moment, this adds support for a custom `requires` marker. The `requires` marker is used to denote tests that require one or more packages to be installed to run. If the package is not installed, the test is skipped. The `requires` marker syntax is: .. code-block:: python @pytest.mark.requires("package1", "package2") def test_something(): ... """ required_pkgs_info: Dict[str, bool] = {} only_extended = config.getoption('--only-extended') or False only_core = config.getoption('--only-core') or False if only_extended and only_core: raise ValueError( 'Cannot specify both `--only-extended` and `--only-core`.') for item in items: requires_marker = item.get_closest_marker('requires') if requires_marker is not None: if only_core: item.add_marker(pytest.mark.skip(reason= 'Skipping not a core test.')) continue required_pkgs = requires_marker.args for pkg in required_pkgs: if pkg not in required_pkgs_info: try: installed = util.find_spec(pkg) is not None except Exception: installed = False required_pkgs_info[pkg] = installed if not required_pkgs_info[pkg]: if only_extended: pytest.fail( f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.' ) else: item.add_marker(pytest.mark.skip(reason= f'Requires pkg: `{pkg}`')) break elif only_extended: item.add_marker(pytest.mark.skip(reason= 'Skipping not an extended test.'))
Add implementations for handling custom markers. At the moment, this adds support for a custom `requires` marker. The `requires` marker is used to denote tests that require one or more packages to be installed to run. If the package is not installed, the test is skipped. The `requires` marker syntax is: .. code-block:: python @pytest.mark.requires("package1", "package2") def test_something(): ...
visit_operation
args = [arg.accept(self) for arg in operation.arguments] return {'bool': {self._format_func(operation.operator): args}}
def visit_operation(self, operation: Operation) ->Dict: args = [arg.accept(self) for arg in operation.arguments] return {'bool': {self._format_func(operation.operator): args}}
null
check_ruff
"""Run ruff check on a file.""" subprocess.check_output(f'ruff check {filepath}', stderr=subprocess.STDOUT, shell=True, timeout=3, text=True)
def check_ruff(filepath: str): """Run ruff check on a file.""" subprocess.check_output(f'ruff check {filepath}', stderr=subprocess. STDOUT, shell=True, timeout=3, text=True)
Run ruff check on a file.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_import_yandex_gpt
from langchain_community.llms.yandex import YandexGPT return YandexGPT
def _import_yandex_gpt() ->Any: from langchain_community.llms.yandex import YandexGPT return YandexGPT
null
lazy_load
query_result = self._execute_query() if isinstance(query_result, Exception): print(f'An error occurred during the query: {query_result}') return [] page_content_columns, metadata_columns = self._get_columns(query_result) if '*' in page_content_columns: page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_columns) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc
def lazy_load(self) ->Iterator[Document]: query_result = self._execute_query() if isinstance(query_result, Exception): print(f'An error occurred during the query: {query_result}') return [] page_content_columns, metadata_columns = self._get_columns(query_result) if '*' in page_content_columns: page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = '\n'.join(f'{k}: {v}' for k, v in row.items() if k in page_content_columns) metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc
null