method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test__convert_message_to_dict_ai
message = AIMessage(content='foo') result = _convert_message_to_dict(message) expected_output = {'role': 'assistant', 'content': 'foo'} assert result == expected_output
def test__convert_message_to_dict_ai() ->None: message = AIMessage(content='foo') result = _convert_message_to_dict(message) expected_output = {'role': 'assistant', 'content': 'foo'} assert result == expected_output
null
__init__
"""Initialize with a file path.""" self.file_path = file_path
def __init__(self, file_path: str): """Initialize with a file path.""" self.file_path = file_path
Initialize with a file path.
get_default_prompt
base_prompt = ChatPromptTemplate.from_template(agent_instructions) return base_prompt + AIMessagePromptTemplate.from_template( '{intermediate_steps}')
@staticmethod def get_default_prompt() ->ChatPromptTemplate: base_prompt = ChatPromptTemplate.from_template(agent_instructions) return base_prompt + AIMessagePromptTemplate.from_template( '{intermediate_steps}')
null
test_llamacpp_model_kwargs
llm = LlamaCpp(model_path=get_model(), model_kwargs={'n_gqa': None}) assert llm.model_kwargs == {'n_gqa': None}
def test_llamacpp_model_kwargs() ->None: llm = LlamaCpp(model_path=get_model(), model_kwargs={'n_gqa': None}) assert llm.model_kwargs == {'n_gqa': None}
null
load
"""Load documents.""" p = Path(self.file_path) text_content = '' with open(p, encoding='utf8') as f: lines = f.readlines() message_line_regex = """ \\[? ( \\d{1,4} [\\/.] \\d{1,2} [\\/.] \\d{1,4} ,\\s \\d{1,2} :\\d{2} (?: :\\d{2} )? (?:[\\s_](?:AM|PM))? ) \\]? [\\s-]* ([~\\w\\s]+) [:]+ \\s (.+) """ ignore_lines = ['This message was deleted', '<Media omitted>'] for line in lines: result = re.match(message_line_regex, line.strip(), flags=re.VERBOSE | re.IGNORECASE) if result: date, sender, text = result.groups() if text not in ignore_lines: text_content += concatenate_rows(date, sender, text) metadata = {'source': str(p)} return [Document(page_content=text_content, metadata=metadata)]
def load(self) ->List[Document]: """Load documents.""" p = Path(self.file_path) text_content = '' with open(p, encoding='utf8') as f: lines = f.readlines() message_line_regex = """ \\[? ( \\d{1,4} [\\/.] \\d{1,2} [\\/.] \\d{1,4} ,\\s \\d{1,2} :\\d{2} (?: :\\d{2} )? (?:[\\s_](?:AM|PM))? ) \\]? [\\s-]* ([~\\w\\s]+) [:]+ \\s (.+) """ ignore_lines = ['This message was deleted', '<Media omitted>'] for line in lines: result = re.match(message_line_regex, line.strip(), flags=re. VERBOSE | re.IGNORECASE) if result: date, sender, text = result.groups() if text not in ignore_lines: text_content += concatenate_rows(date, sender, text) metadata = {'source': str(p)} return [Document(page_content=text_content, metadata=metadata)]
Load documents.
_import_vllm_openai
from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI
def _import_vllm_openai() ->Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI
null
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ from momento.requests.vector_index import ALL_METADATA from momento.responses.vector_index import SearchAndFetchVectors filter_expression = kwargs.get('filter_expression', None) response = self._client.search_and_fetch_vectors(self.index_name, embedding, top_k=fetch_k, metadata_fields=ALL_METADATA, filter_expression= filter_expression) if isinstance(response, SearchAndFetchVectors.Success): pass elif isinstance(response, SearchAndFetchVectors.Error): logger.error(f'Error searching and fetching vectors: {response}') return [] else: logger.error(f'Unexpected response: {response}') raise Exception(f'Unexpected response: {response}') mmr_selected = maximal_marginal_relevance(query_embedding=np.array([ embedding], dtype=np.float32), embedding_list=[hit.vector for hit in response.hits], lambda_mult=lambda_mult, k=k) selected = [response.hits[i].metadata for i in mmr_selected] return [Document(page_content=metadata.pop(self.text_field, ''), metadata= metadata) for metadata in selected]
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, **kwargs: Any) ->List[ Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ from momento.requests.vector_index import ALL_METADATA from momento.responses.vector_index import SearchAndFetchVectors filter_expression = kwargs.get('filter_expression', None) response = self._client.search_and_fetch_vectors(self.index_name, embedding, top_k=fetch_k, metadata_fields=ALL_METADATA, filter_expression=filter_expression) if isinstance(response, SearchAndFetchVectors.Success): pass elif isinstance(response, SearchAndFetchVectors.Error): logger.error(f'Error searching and fetching vectors: {response}') return [] else: logger.error(f'Unexpected response: {response}') raise Exception(f'Unexpected response: {response}') mmr_selected = maximal_marginal_relevance(query_embedding=np.array([ embedding], dtype=np.float32), embedding_list=[hit.vector for hit in response.hits], lambda_mult=lambda_mult, k=k) selected = [response.hits[i].metadata for i in mmr_selected] return [Document(page_content=metadata.pop(self.text_field, ''), metadata=metadata) for metadata in selected]
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
__init__
"""Define a parser that uses mime-types to determine how to parse a blob. Args: handlers: A mapping from mime-types to functions that take a blob, parse it and return a document. fallback_parser: A fallback_parser parser to use if the mime-type is not found in the handlers. If provided, this parser will be used to parse blobs with all mime-types not found in the handlers. If not provided, a ValueError will be raised if the mime-type is not found in the handlers. """ self.handlers = handlers self.fallback_parser = fallback_parser
def __init__(self, handlers: Mapping[str, BaseBlobParser], *, fallback_parser: Optional[BaseBlobParser]=None) ->None: """Define a parser that uses mime-types to determine how to parse a blob. Args: handlers: A mapping from mime-types to functions that take a blob, parse it and return a document. fallback_parser: A fallback_parser parser to use if the mime-type is not found in the handlers. If provided, this parser will be used to parse blobs with all mime-types not found in the handlers. If not provided, a ValueError will be raised if the mime-type is not found in the handlers. """ self.handlers = handlers self.fallback_parser = fallback_parser
Define a parser that uses mime-types to determine how to parse a blob. Args: handlers: A mapping from mime-types to functions that take a blob, parse it and return a document. fallback_parser: A fallback_parser parser to use if the mime-type is not found in the handlers. If provided, this parser will be used to parse blobs with all mime-types not found in the handlers. If not provided, a ValueError will be raised if the mime-type is not found in the handlers.
test_johnsnowlabs_embed_query
"""Test johnsnowlabs embeddings.""" document = 'foo bar' embedding = JohnSnowLabsEmbeddings() output = embedding.embed_query(document) assert len(output) == 128
def test_johnsnowlabs_embed_query() ->None: """Test johnsnowlabs embeddings.""" document = 'foo bar' embedding = JohnSnowLabsEmbeddings() output = embedding.embed_query(document) assert len(output) == 128
Test johnsnowlabs embeddings.
_load_document_from_id
"""Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build('drive', 'v3', credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True, fields= 'modifiedTime,name').execute() request = service.files().export_media(fileId=id, mimeType='text/plain') fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print('File not found: {}'.format(id)) else: print('An error occurred: {}'.format(e)) text = fh.getvalue().decode('utf-8') metadata = {'source': f'https://docs.google.com/document/d/{id}/edit', 'title': f"{file.get('name')}", 'when': f"{file.get('modifiedTime')}"} return Document(page_content=text, metadata=metadata)
def _load_document_from_id(self, id: str) ->Document: """Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build('drive', 'v3', credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True, fields= 'modifiedTime,name').execute() request = service.files().export_media(fileId=id, mimeType='text/plain') fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print('File not found: {}'.format(id)) else: print('An error occurred: {}'.format(e)) text = fh.getvalue().decode('utf-8') metadata = {'source': f'https://docs.google.com/document/d/{id}/edit', 'title': f"{file.get('name')}", 'when': f"{file.get('modifiedTime')}"} return Document(page_content=text, metadata=metadata)
Load a document from an ID.
_lambda
if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x
def _lambda(x: int) ->Union[int, Runnable]: if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x
null
_import_titan_takeoff_pro
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro
def _import_titan_takeoff_pro() ->Any: from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro
null
__init__
"""Initialize with a file path.""" try: from pdfminer.high_level import extract_text_to_fp except ImportError: raise ImportError( '`pdfminer` package not found, please install it with `pip install pdfminer.six`' ) super().__init__(file_path, headers=headers)
def __init__(self, file_path: str, *, headers: Optional[Dict]=None): """Initialize with a file path.""" try: from pdfminer.high_level import extract_text_to_fp except ImportError: raise ImportError( '`pdfminer` package not found, please install it with `pip install pdfminer.six`' ) super().__init__(file_path, headers=headers)
Initialize with a file path.
_import_python_tool_PythonAstREPLTool
raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
def _import_python_tool_PythonAstREPLTool() ->Any: raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
null
_import_yellowbrick
from langchain_community.vectorstores.yellowbrick import Yellowbrick return Yellowbrick
def _import_yellowbrick() ->Any: from langchain_community.vectorstores.yellowbrick import Yellowbrick return Yellowbrick
null
on_llm_start
"""Log the prompts to Infino, and set start time and error flag.""" for prompt in prompts: self._send_to_infino('prompt', prompt, is_ts=False) self.error = 0 self.start_time = time.time()
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], ** kwargs: Any) ->None: """Log the prompts to Infino, and set start time and error flag.""" for prompt in prompts: self._send_to_infino('prompt', prompt, is_ts=False) self.error = 0 self.start_time = time.time()
Log the prompts to Infino, and set start time and error flag.
add_texts
from qdrant_client import QdrantClient added_ids = [] client = cast(QdrantClient, self.client) for batch_ids, points in self._generate_rest_batches(texts, metadatas, ids, batch_size): client.upsert(self.collection_name, points=points, **kwargs) added_ids.extend(batch_ids) return added_ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[Sequence[str]]=None, batch_size: int=64, **kwargs: Any ) ->List[str]: from qdrant_client import QdrantClient added_ids = [] client = cast(QdrantClient, self.client) for batch_ids, points in self._generate_rest_batches(texts, metadatas, ids, batch_size): client.upsert(self.collection_name, points=points, **kwargs) added_ids.extend(batch_ids) return added_ids
null
test_convert_pydantic_to_openai_function
class Data(BaseModel): """The data to return.""" key: str = Field(..., description='API key') days: int = Field(default=0, description='Number of days to forecast') actual = convert_pydantic_to_openai_function(Data) expected = {'name': 'Data', 'description': 'The data to return.', 'parameters': {'title': 'Data', 'description': 'The data to return.', 'type': 'object', 'properties': {'key': {'title': 'Key', 'description': 'API key', 'type': 'string'}, 'days': {'title': 'Days', 'description': 'Number of days to forecast', 'default': 0, 'type': 'integer'}}, 'required': ['key']}} assert actual == expected
def test_convert_pydantic_to_openai_function() ->None: class Data(BaseModel): """The data to return.""" key: str = Field(..., description='API key') days: int = Field(default=0, description='Number of days to forecast') actual = convert_pydantic_to_openai_function(Data) expected = {'name': 'Data', 'description': 'The data to return.', 'parameters': {'title': 'Data', 'description': 'The data to return.', 'type': 'object', 'properties': {'key': { 'title': 'Key', 'description': 'API key', 'type': 'string'}, 'days': {'title': 'Days', 'description': 'Number of days to forecast', 'default': 0, 'type': 'integer'}}, 'required': ['key']}} assert actual == expected
null
test_spliting_answer_into_answer_and_sources
qa_chain = QAWithSourcesChain.from_llm(FakeLLM()) generated_answer, generated_sources = qa_chain._split_sources(text) assert generated_answer == answer assert generated_sources == sources
@pytest.mark.parametrize('text,answer,sources', [( """This Agreement is governed by English law. SOURCES: 28-pl""", 'This Agreement is governed by English law.\n', '28-pl'), ( """This Agreement is governed by English law. Sources: 28-pl""", 'This Agreement is governed by English law.\n', '28-pl'), ( """This Agreement is governed by English law. source: 28-pl""", 'This Agreement is governed by English law.\n', '28-pl'), ( """This Agreement is governed by English law. Source: 28-pl""", 'This Agreement is governed by English law.\n', '28-pl'), ( """According to the sources the agreement is governed by English law. Source: 28-pl""" , 'According to the sources the agreement is governed by English law.\n', '28-pl'), ( """This Agreement is governed by English law. SOURCES: 28-pl QUESTION: Which state/country's law governs the interpretation of the contract? FINAL ANSWER: This Agreement is governed by English law. SOURCES: 28-pl""" , 'This Agreement is governed by English law.\n', '28-pl'), ( """The president did not mention Michael Jackson in the provided content. SOURCES: Note: Since the content provided does not contain any information about Michael Jackson, there are no sources to cite for this specific question.""" , """The president did not mention Michael Jackson in the provided content. """ , ''), ( """To diagnose the problem, please answer the following questions and send them in one message to IT: A1. Are you connected to the office network? VPN will not work from the office network. A2. Are you sure about your login/password? A3. Are you using any other VPN (e.g. from a client)? A4. When was the last time you used the company VPN? SOURCES: 1 ALTERNATIVE OPTION: Another option is to run the VPN in CLI, but keep in mind that DNS settings may not work and there may be a need for manual modification of the local resolver or /etc/hosts and/or ~/.ssh/config files to be able to connect to machines in the company. With the appropriate packages installed, the only thing needed to establish a connection is to run the command: sudo openvpn --config config.ovpn We will be asked for a username and password - provide the login details, the same ones that have been used so far for VPN connection, connecting to the company's WiFi, or printers (in the Warsaw office). Finally, just use the VPN connection. SOURCES: 2 ALTERNATIVE OPTION (for Windows): Download theOpenVPN client application version 2.6 or newer from the official website: https://openvpn.net/community-downloads/ SOURCES: 3""" , """To diagnose the problem, please answer the following questions and send them in one message to IT: A1. Are you connected to the office network? VPN will not work from the office network. A2. Are you sure about your login/password? A3. Are you using any other VPN (e.g. from a client)? A4. When was the last time you used the company VPN? """ , '1')]) def test_spliting_answer_into_answer_and_sources(text: str, answer: str, sources: str) ->None: qa_chain = QAWithSourcesChain.from_llm(FakeLLM()) generated_answer, generated_sources = qa_chain._split_sources(text) assert generated_answer == answer assert generated_sources == sources
null
_parse_list
"""Parse a newline-separated string into a list of strings.""" lines = re.split('\\n', text.strip()) return [re.sub('^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]
@staticmethod def _parse_list(text: str) ->List[str]: """Parse a newline-separated string into a list of strings.""" lines = re.split('\\n', text.strip()) return [re.sub('^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]
Parse a newline-separated string into a list of strings.
regex_match_string_evaluator_ignore_case
"""Create a RegexMatchStringEvaluator with IGNORECASE flag.""" return RegexMatchStringEvaluator(flags=re.IGNORECASE)
@pytest.fixture def regex_match_string_evaluator_ignore_case() ->RegexMatchStringEvaluator: """Create a RegexMatchStringEvaluator with IGNORECASE flag.""" return RegexMatchStringEvaluator(flags=re.IGNORECASE)
Create a RegexMatchStringEvaluator with IGNORECASE flag.
test_load_fail_wrong_split_name
"""Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasets(dataset_name='mlqa/en', split_name= 'wrong_split_name', load_max_docs=MAX_DOCS, sample_to_document_function=mlqaen_example_to_document) assert 'Unknown split' in str(exc_info.value)
def test_load_fail_wrong_split_name() ->None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasets(dataset_name='mlqa/en', split_name= 'wrong_split_name', load_max_docs=MAX_DOCS, sample_to_document_function=mlqaen_example_to_document) assert 'Unknown split' in str(exc_info.value)
Test that fails to load
_identifying_params
"""Get the identifying parameters.""" return {**{'models_priority_list': self.models_priority_list}, **self. _default_params}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {**{'models_priority_list': self.models_priority_list}, **self. _default_params}
Get the identifying parameters.
get_function_first_arg_dict_keys
"""Get the keys of the first argument of a function if it is a dict.""" try: code = inspect.getsource(func) tree = ast.parse(textwrap.dedent(code)) visitor = IsFunctionArgDict() visitor.visit(tree) return list(visitor.keys) if visitor.keys else None except (SyntaxError, TypeError, OSError): return None
def get_function_first_arg_dict_keys(func: Callable) ->Optional[List[str]]: """Get the keys of the first argument of a function if it is a dict.""" try: code = inspect.getsource(func) tree = ast.parse(textwrap.dedent(code)) visitor = IsFunctionArgDict() visitor.visit(tree) return list(visitor.keys) if visitor.keys else None except (SyntaxError, TypeError, OSError): return None
Get the keys of the first argument of a function if it is a dict.
from_url
"""Instantiate AIPlugin from a URL.""" response = requests.get(url).json() return cls(**response)
@classmethod def from_url(cls, url: str) ->AIPlugin: """Instantiate AIPlugin from a URL.""" response = requests.get(url).json() return cls(**response)
Instantiate AIPlugin from a URL.
_parse_attributedBody
""" Parse the attributedBody field of the message table for the text content of the message. The attributedBody field is a binary blob that contains the message content after the byte string b"NSString": 5 bytes 1-3 bytes `len` bytes ... | b"NSString" | preamble | `len` | contents | ... The 5 preamble bytes are always b"”„+" The size of `len` is either 1 byte or 3 bytes: - If the first byte in `len` is b"" then `len` is 3 bytes long. So the message length is the 2 bytes after, in little Endian. - Otherwise, the size of `len` is 1 byte, and the message length is that byte. Args: attributedBody (bytes): attributedBody field of the message table. Return: str: Text content of the message. """ content = attributedBody.split(b'NSString')[1][5:] length, start = content[0], 1 if content[0] == 129: length, start = int.from_bytes(content[1:3], 'little'), 3 return content[start:start + length].decode('utf-8', errors='ignore')
def _parse_attributedBody(self, attributedBody: bytes) ->str: """ Parse the attributedBody field of the message table for the text content of the message. The attributedBody field is a binary blob that contains the message content after the byte string b"NSString": 5 bytes 1-3 bytes `len` bytes ... | b"NSString" | preamble | `len` | contents | ... The 5 preamble bytes are always b"”„+" The size of `len` is either 1 byte or 3 bytes: - If the first byte in `len` is b"" then `len` is 3 bytes long. So the message length is the 2 bytes after, in little Endian. - Otherwise, the size of `len` is 1 byte, and the message length is that byte. Args: attributedBody (bytes): attributedBody field of the message table. Return: str: Text content of the message. """ content = attributedBody.split(b'NSString')[1][5:] length, start = content[0], 1 if content[0] == 129: length, start = int.from_bytes(content[1:3], 'little'), 3 return content[start:start + length].decode('utf-8', errors='ignore')
Parse the attributedBody field of the message table for the text content of the message. The attributedBody field is a binary blob that contains the message content after the byte string b"NSString": 5 bytes 1-3 bytes `len` bytes ... | b"NSString" | preamble | `len` | contents | ... The 5 preamble bytes are always b"”„+" The size of `len` is either 1 byte or 3 bytes: - If the first byte in `len` is b"" then `len` is 3 bytes long. So the message length is the 2 bytes after, in little Endian. - Otherwise, the size of `len` is 1 byte, and the message length is that byte. Args: attributedBody (bytes): attributedBody field of the message table. Return: str: Text content of the message.
input_keys
"""Input keys. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Input keys. :meta private: """ return [self.input_key]
Input keys. :meta private:
_import_clickhouse
from langchain_community.vectorstores.clickhouse import Clickhouse return Clickhouse
def _import_clickhouse() ->Any: from langchain_community.vectorstores.clickhouse import Clickhouse return Clickhouse
null
api_client
return WikipediaAPIWrapper()
@pytest.fixture def api_client() ->WikipediaAPIWrapper: return WikipediaAPIWrapper()
null
_Tuple
self.write('(') if len(t.elts) == 1: elt = t.elts[0] self.dispatch(elt) self.write(',') else: interleave(lambda : self.write(', '), self.dispatch, t.elts) self.write(')')
def _Tuple(self, t): self.write('(') if len(t.elts) == 1: elt = t.elts[0] self.dispatch(elt) self.write(',') else: interleave(lambda : self.write(', '), self.dispatch, t.elts) self.write(')')
null
validate_environment
"""Validate that api key exists in environment.""" _ = get_from_dict_or_env(values, 'eleven_api_key', 'ELEVEN_API_KEY') return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key exists in environment.""" _ = get_from_dict_or_env(values, 'eleven_api_key', 'ELEVEN_API_KEY') return values
Validate that api key exists in environment.
__init__
""" Initialize an instance of UpstashRedisCache. This method initializes an object with Upstash Redis caching capabilities. It takes a `redis_` parameter, which should be an instance of an Upstash Redis client class, allowing the object to interact with Upstash Redis server for caching purposes. Parameters: redis_: An instance of Upstash Redis client class (e.g., Redis) used for caching. This allows the object to communicate with Redis server for caching operations on. ttl (int, optional): Time-to-live (TTL) for cached items in seconds. If provided, it sets the time duration for how long cached items will remain valid. If not provided, cached items will not have an automatic expiration. """ try: from upstash_redis import Redis except ImportError: raise ValueError( 'Could not import upstash_redis python package. Please install it with `pip install upstash_redis`.' ) if not isinstance(redis_, Redis): raise ValueError('Please pass in Upstash Redis object.') self.redis = redis_ self.ttl = ttl
def __init__(self, redis_: Any, *, ttl: Optional[int]=None): """ Initialize an instance of UpstashRedisCache. This method initializes an object with Upstash Redis caching capabilities. It takes a `redis_` parameter, which should be an instance of an Upstash Redis client class, allowing the object to interact with Upstash Redis server for caching purposes. Parameters: redis_: An instance of Upstash Redis client class (e.g., Redis) used for caching. This allows the object to communicate with Redis server for caching operations on. ttl (int, optional): Time-to-live (TTL) for cached items in seconds. If provided, it sets the time duration for how long cached items will remain valid. If not provided, cached items will not have an automatic expiration. """ try: from upstash_redis import Redis except ImportError: raise ValueError( 'Could not import upstash_redis python package. Please install it with `pip install upstash_redis`.' ) if not isinstance(redis_, Redis): raise ValueError('Please pass in Upstash Redis object.') self.redis = redis_ self.ttl = ttl
Initialize an instance of UpstashRedisCache. This method initializes an object with Upstash Redis caching capabilities. It takes a `redis_` parameter, which should be an instance of an Upstash Redis client class, allowing the object to interact with Upstash Redis server for caching purposes. Parameters: redis_: An instance of Upstash Redis client class (e.g., Redis) used for caching. This allows the object to communicate with Redis server for caching operations on. ttl (int, optional): Time-to-live (TTL) for cached items in seconds. If provided, it sets the time duration for how long cached items will remain valid. If not provided, cached items will not have an automatic expiration.
test_openllm_with_kwargs
llm = OpenLLM(model_name='flan-t5', model_id='google/flan-t5-small', temperature=0.84) output = llm('Say bar:') assert isinstance(output, str)
def test_openllm_with_kwargs() ->None: llm = OpenLLM(model_name='flan-t5', model_id='google/flan-t5-small', temperature=0.84) output = llm('Say bar:') assert isinstance(output, str)
null
create_hnsw_index
create_index_query = sqlalchemy.text( 'CREATE INDEX IF NOT EXISTS langchain_pg_embedding_idx ON langchain_pg_embedding USING hnsw (embedding) WITH (maxelements = {}, dims = {}, m = {}, efconstruction = {}, efsearch = {});' .format(max_elements, dims, m, ef_construction, ef_search)) try: with Session(self._conn) as session: session.execute(create_index_query) session.commit() print('HNSW extension and index created successfully.') except Exception as e: print(f'Failed to create HNSW extension or index: {e}')
def create_hnsw_index(self, max_elements: int=10000, dims: int= ADA_TOKEN_COUNT, m: int=8, ef_construction: int=16, ef_search: int=16 ) ->None: create_index_query = sqlalchemy.text( 'CREATE INDEX IF NOT EXISTS langchain_pg_embedding_idx ON langchain_pg_embedding USING hnsw (embedding) WITH (maxelements = {}, dims = {}, m = {}, efconstruction = {}, efsearch = {});' .format(max_elements, dims, m, ef_construction, ef_search)) try: with Session(self._conn) as session: session.execute(create_index_query) session.commit() print('HNSW extension and index created successfully.') except Exception as e: print(f'Failed to create HNSW extension or index: {e}')
null
toxicity_callback
return self.on_after_toxicity.__func__ is not BaseModerationCallbackHandler.on_after_toxicity
@property def toxicity_callback(self) ->bool: return (self.on_after_toxicity.__func__ is not BaseModerationCallbackHandler.on_after_toxicity)
null
_validate_tools
super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools)
@classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) ->None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools)
null
inputs
self._inputs = self.agent_executor.prep_inputs(inputs)
@inputs.setter def inputs(self, inputs: Any) ->None: self._inputs = self.agent_executor.prep_inputs(inputs)
null
test_gpt2_call
"""Test valid call to GPT2.""" llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv( 'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv('OSS_ENDPOINT_URL'), deployment_name=os.getenv('OSS_DEPLOYMENT_NAME'), content_formatter= OSSContentFormatter()) output = llm('Foo') assert isinstance(output, str)
def test_gpt2_call() ->None: """Test valid call to GPT2.""" llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv( 'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv('OSS_ENDPOINT_URL'), deployment_name=os.getenv('OSS_DEPLOYMENT_NAME'), content_formatter =OSSContentFormatter()) output = llm('Foo') assert isinstance(output, str)
Test valid call to GPT2.
__init__
""" Initialize a new LangSmithChatDatasetLoader instance. :param dataset_name: The name of the LangSmith dataset. :param client: An instance of LangSmith client; if not provided, a new client instance will be created. """ try: from langsmith.client import Client except ImportError as e: raise ImportError( """The LangSmith client is required to load LangSmith datasets. Please install it with `pip install langsmith`""" ) from e self.dataset_name = dataset_name self.client = client or Client()
def __init__(self, *, dataset_name: str, client: Optional['Client']=None): """ Initialize a new LangSmithChatDatasetLoader instance. :param dataset_name: The name of the LangSmith dataset. :param client: An instance of LangSmith client; if not provided, a new client instance will be created. """ try: from langsmith.client import Client except ImportError as e: raise ImportError( """The LangSmith client is required to load LangSmith datasets. Please install it with `pip install langsmith`""" ) from e self.dataset_name = dataset_name self.client = client or Client()
Initialize a new LangSmithChatDatasetLoader instance. :param dataset_name: The name of the LangSmith dataset. :param client: An instance of LangSmith client; if not provided, a new client instance will be created.
mock_post
with patch('langchain_community.tools.edenai.edenai_base_tool.requests.post' ) as mock: yield mock
@pytest.fixture def mock_post() ->Generator: with patch( 'langchain_community.tools.edenai.edenai_base_tool.requests.post' ) as mock: yield mock
null
test_no_message
with pytest.raises(ValueError) as info: model.predict_messages([]) assert info.value.args[0] == 'at least one HumanMessage must be provided'
def test_no_message(model: Llama2Chat) ->None: with pytest.raises(ValueError) as info: model.predict_messages([]) assert info.value.args[0] == 'at least one HumanMessage must be provided'
null
_get_example_memories
return [Document(page_content='foo', metadata={'buffer_idx': i, 'last_accessed_at': datetime(2023, 4, 14, 12, 0)}) for i in range(k)]
def _get_example_memories(k: int=4) ->List[Document]: return [Document(page_content='foo', metadata={'buffer_idx': i, 'last_accessed_at': datetime(2023, 4, 14, 12, 0)}) for i in range(k)]
null
convert_pydantic_to_ernie_function
"""Converts a Pydantic model to a function description for the Ernie API.""" schema = dereference_refs(model.schema()) schema.pop('definitions', None) return {'name': name or schema['title'], 'description': description or schema['description'], 'parameters': schema}
def convert_pydantic_to_ernie_function(model: Type[BaseModel], *, name: Optional[str]=None, description: Optional[str]=None) ->FunctionDescription: """Converts a Pydantic model to a function description for the Ernie API.""" schema = dereference_refs(model.schema()) schema.pop('definitions', None) return {'name': name or schema['title'], 'description': description or schema['description'], 'parameters': schema}
Converts a Pydantic model to a function description for the Ernie API.
_build_youtube_client
try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi except ImportError: raise ImportError( 'You must run`pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib youtube-transcript-api` to use the Google Drive loader' ) return build('youtube', 'v3', credentials=creds)
def _build_youtube_client(self, creds: Any) ->Any: try: from googleapiclient.discovery import build from youtube_transcript_api import YouTubeTranscriptApi except ImportError: raise ImportError( 'You must run`pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib youtube-transcript-api` to use the Google Drive loader' ) return build('youtube', 'v3', credentials=creds)
null
score_response
...
@abstractmethod def score_response(self, inputs: Dict[str, Any], llm_response: str, event: TEvent) ->float: ...
null
test_get_relevant_documents
retriever = YouRetriever() actual = retriever.get_relevant_documents('test') assert len(actual) > 0
def test_get_relevant_documents(self) ->None: retriever = YouRetriever() actual = retriever.get_relevant_documents('test') assert len(actual) > 0
null
config_specs
return get_unique_config_specs(spec for dep in self.deps for spec in dep. config_specs)
@property def config_specs(self) ->List[ConfigurableFieldSpec]: return get_unique_config_specs(spec for dep in self.deps for spec in dep.config_specs)
null
on_llm_end
"""On LLM end, send data to Arthur.""" try: import pytz except ImportError as e: raise ImportError( "Could not import pytz. Please install it with 'pip install pytz'." ) from e run_id = kwargs['run_id'] try: run_map_data = self.run_map[run_id] except KeyError as e: raise KeyError( 'This function has been called with a run_id that was never registered in on_llm_start(). Restart and try running the LLM again' ) from e time_from_start_to_end = time() - run_map_data['start_time'] inferences = [] for i, generations in enumerate(response.generations): for generation in generations: inference = {'partner_inference_id': str(uuid.uuid4()), 'inference_timestamp': datetime.now(tz=pytz.UTC), self. input_attr: run_map_data['input_texts'][i], self.output_attr: generation.text} if generation.generation_info is not None: if (FINISH_REASON in generation.generation_info and FINISH_REASON in self.attr_names): inference[FINISH_REASON] = generation.generation_info[ FINISH_REASON] logprobs_data = generation.generation_info['logprobs'] if (logprobs_data is not None and self.token_likelihood_attr is not None): logprobs = logprobs_data['top_logprobs'] likelihoods = [{k: np.exp(v) for k, v in logprobs[i].items( )} for i in range(len(logprobs))] inference[self.token_likelihood_attr] = likelihoods if isinstance(response.llm_output, dict ) and TOKEN_USAGE in response.llm_output: token_usage = response.llm_output[TOKEN_USAGE] if (PROMPT_TOKENS in token_usage and PROMPT_TOKENS in self. attr_names): inference[PROMPT_TOKENS] = token_usage[PROMPT_TOKENS] if (COMPLETION_TOKENS in token_usage and COMPLETION_TOKENS in self.attr_names): inference[COMPLETION_TOKENS] = token_usage[COMPLETION_TOKENS] if DURATION in self.attr_names: inference[DURATION] = time_from_start_to_end inferences.append(inference) self.arthur_model.send_inferences(inferences)
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None: """On LLM end, send data to Arthur.""" try: import pytz except ImportError as e: raise ImportError( "Could not import pytz. Please install it with 'pip install pytz'." ) from e run_id = kwargs['run_id'] try: run_map_data = self.run_map[run_id] except KeyError as e: raise KeyError( 'This function has been called with a run_id that was never registered in on_llm_start(). Restart and try running the LLM again' ) from e time_from_start_to_end = time() - run_map_data['start_time'] inferences = [] for i, generations in enumerate(response.generations): for generation in generations: inference = {'partner_inference_id': str(uuid.uuid4()), 'inference_timestamp': datetime.now(tz=pytz.UTC), self. input_attr: run_map_data['input_texts'][i], self. output_attr: generation.text} if generation.generation_info is not None: if (FINISH_REASON in generation.generation_info and FINISH_REASON in self.attr_names): inference[FINISH_REASON] = generation.generation_info[ FINISH_REASON] logprobs_data = generation.generation_info['logprobs'] if (logprobs_data is not None and self. token_likelihood_attr is not None): logprobs = logprobs_data['top_logprobs'] likelihoods = [{k: np.exp(v) for k, v in logprobs[i]. items()} for i in range(len(logprobs))] inference[self.token_likelihood_attr] = likelihoods if isinstance(response.llm_output, dict ) and TOKEN_USAGE in response.llm_output: token_usage = response.llm_output[TOKEN_USAGE] if (PROMPT_TOKENS in token_usage and PROMPT_TOKENS in self. attr_names): inference[PROMPT_TOKENS] = token_usage[PROMPT_TOKENS] if (COMPLETION_TOKENS in token_usage and COMPLETION_TOKENS in self.attr_names): inference[COMPLETION_TOKENS] = token_usage[ COMPLETION_TOKENS] if DURATION in self.attr_names: inference[DURATION] = time_from_start_to_end inferences.append(inference) self.arthur_model.send_inferences(inferences)
On LLM end, send data to Arthur.
run
plan = self.chat_planner.plan(inputs={'input': input, 'hf_tools': self.tools}) self.task_executor = TaskExecutor(plan) self.task_executor.run() response = self.response_generator.generate({'task_execution': self. task_executor}) return response
def run(self, input: str) ->str: plan = self.chat_planner.plan(inputs={'input': input, 'hf_tools': self. tools}) self.task_executor = TaskExecutor(plan) self.task_executor.run() response = self.response_generator.generate({'task_execution': self. task_executor}) return response
null
_select_relevance_score_fn
if self.relevance_score_fn: return self.relevance_score_fn metric_map = {'COSINE': self._cosine_relevance_score_fn, 'IP': self. _max_inner_product_relevance_score_fn, 'L2': self. _euclidean_relevance_score_fn} try: return metric_map[self._schema.content_vector.distance_metric] except KeyError: return _default_relevance_score
def _select_relevance_score_fn(self) ->Callable[[float], float]: if self.relevance_score_fn: return self.relevance_score_fn metric_map = {'COSINE': self._cosine_relevance_score_fn, 'IP': self. _max_inner_product_relevance_score_fn, 'L2': self. _euclidean_relevance_score_fn} try: return metric_map[self._schema.content_vector.distance_metric] except KeyError: return _default_relevance_score
null
test_calls_convert_agent_action_to_messages
additional_kwargs1 = {'tool_calls': [{'id': 'call_abcd12345', 'function': { 'arguments': '{"a": 3, "b": 5}', 'name': 'add'}, 'type': 'function'}]} message1 = AIMessage(content='', additional_kwargs=additional_kwargs1) actions1 = parse_ai_message_to_openai_tool_action(message1) additional_kwargs2 = {'tool_calls': [{'id': 'call_abcd54321', 'function': { 'arguments': '{"a": 3, "b": 5}', 'name': 'subtract'}, 'type': 'function'}]} message2 = AIMessage(content='', additional_kwargs=additional_kwargs2) actions2 = parse_ai_message_to_openai_tool_action(message2) additional_kwargs3 = {'tool_calls': [{'id': 'call_abcd67890', 'function': { 'arguments': '{"a": 3, "b": 5}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_abcd09876', 'function': {'arguments': '{"a": 3, "b": 5}', 'name': 'divide'}, 'type': 'function'}]} message3 = AIMessage(content='', additional_kwargs=additional_kwargs3) actions3 = parse_ai_message_to_openai_tool_action(message3) assert isinstance(actions1, list) assert isinstance(actions2, list) assert isinstance(actions3, list) intermediate_steps = [(actions1[0], 'observation1'), (actions2[0], 'observation2'), (actions3[0], 'observation3'), (actions3[1], 'observation4')] expected_messages = [message1, ToolMessage(tool_call_id='call_abcd12345', content='observation1', additional_kwargs={'name': 'add'}), message2, ToolMessage(tool_call_id='call_abcd54321', content='observation2', additional_kwargs={'name': 'subtract'}), message3, ToolMessage( tool_call_id='call_abcd67890', content='observation3', additional_kwargs={'name': 'multiply'}), ToolMessage(tool_call_id= 'call_abcd09876', content='observation4', additional_kwargs={'name': 'divide'})] output = format_to_openai_tool_messages(intermediate_steps) assert output == expected_messages
def test_calls_convert_agent_action_to_messages() ->None: additional_kwargs1 = {'tool_calls': [{'id': 'call_abcd12345', 'function': {'arguments': '{"a": 3, "b": 5}', 'name': 'add'}, 'type': 'function'}]} message1 = AIMessage(content='', additional_kwargs=additional_kwargs1) actions1 = parse_ai_message_to_openai_tool_action(message1) additional_kwargs2 = {'tool_calls': [{'id': 'call_abcd54321', 'function': {'arguments': '{"a": 3, "b": 5}', 'name': 'subtract'}, 'type': 'function'}]} message2 = AIMessage(content='', additional_kwargs=additional_kwargs2) actions2 = parse_ai_message_to_openai_tool_action(message2) additional_kwargs3 = {'tool_calls': [{'id': 'call_abcd67890', 'function': {'arguments': '{"a": 3, "b": 5}', 'name': 'multiply'}, 'type': 'function'}, {'id': 'call_abcd09876', 'function': { 'arguments': '{"a": 3, "b": 5}', 'name': 'divide'}, 'type': 'function'}]} message3 = AIMessage(content='', additional_kwargs=additional_kwargs3) actions3 = parse_ai_message_to_openai_tool_action(message3) assert isinstance(actions1, list) assert isinstance(actions2, list) assert isinstance(actions3, list) intermediate_steps = [(actions1[0], 'observation1'), (actions2[0], 'observation2'), (actions3[0], 'observation3'), (actions3[1], 'observation4')] expected_messages = [message1, ToolMessage(tool_call_id= 'call_abcd12345', content='observation1', additional_kwargs={'name': 'add'}), message2, ToolMessage(tool_call_id='call_abcd54321', content='observation2', additional_kwargs={'name': 'subtract'}), message3, ToolMessage(tool_call_id='call_abcd67890', content= 'observation3', additional_kwargs={'name': 'multiply'}), ToolMessage(tool_call_id='call_abcd09876', content='observation4', additional_kwargs={'name': 'divide'})] output = format_to_openai_tool_messages(intermediate_steps) assert output == expected_messages
null
split_text_from_file
"""Split HTML file Args: file: HTML file """ try: from lxml import etree except ImportError as e: raise ImportError( 'Unable to import lxml, please install with `pip install lxml`.' ) from e parser = etree.HTMLParser() tree = etree.parse_folder(file, parser) xslt_path = pathlib.Path(__file__ ).parent / 'document_transformers/xsl/html_chunks_with_headers.xslt' xslt_tree = etree.parse_folder(xslt_path) transform = etree.XSLT(xslt_tree) result = transform(tree) result_dom = etree.fromstring(str(result)) header_filter = [header[0] for header in self.headers_to_split_on] header_mapping = dict(self.headers_to_split_on) ns_map = {'h': 'http://www.w3.org/1999/xhtml'} elements = [] for element in result_dom.findall('*//*', ns_map): if element.findall("*[@class='headers']") or element.findall( "*[@class='chunk']"): elements.append(ElementType(url=file, xpath=''.join([node.text for node in element.findall("*[@class='xpath']", ns_map)]), content =''.join([node.text for node in element.findall( "*[@class='chunk']", ns_map)]), metadata={header_mapping[node. tag]: node.text for node in filter(lambda x: x.tag in header_filter, element.findall("*[@class='headers']/*", ns_map))})) if not self.return_each_element: return self.aggregate_elements_to_chunks(elements) else: return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in elements]
def split_text_from_file(self, file: Any) ->List[Document]: """Split HTML file Args: file: HTML file """ try: from lxml import etree except ImportError as e: raise ImportError( 'Unable to import lxml, please install with `pip install lxml`.' ) from e parser = etree.HTMLParser() tree = etree.parse_folder(file, parser) xslt_path = pathlib.Path(__file__ ).parent / 'document_transformers/xsl/html_chunks_with_headers.xslt' xslt_tree = etree.parse_folder(xslt_path) transform = etree.XSLT(xslt_tree) result = transform(tree) result_dom = etree.fromstring(str(result)) header_filter = [header[0] for header in self.headers_to_split_on] header_mapping = dict(self.headers_to_split_on) ns_map = {'h': 'http://www.w3.org/1999/xhtml'} elements = [] for element in result_dom.findall('*//*', ns_map): if element.findall("*[@class='headers']") or element.findall( "*[@class='chunk']"): elements.append(ElementType(url=file, xpath=''.join([node.text for node in element.findall("*[@class='xpath']", ns_map)]), content=''.join([node.text for node in element.findall( "*[@class='chunk']", ns_map)]), metadata={header_mapping[ node.tag]: node.text for node in filter(lambda x: x.tag in header_filter, element.findall("*[@class='headers']/*", ns_map))})) if not self.return_each_element: return self.aggregate_elements_to_chunks(elements) else: return [Document(page_content=chunk['content'], metadata=chunk[ 'metadata']) for chunk in elements]
Split HTML file Args: file: HTML file
Embed
if isinstance(anything, _ToSelectFrom): return ToSelectFrom(Embed(anything.value, keep=keep)) elif isinstance(anything, _BasedOn): return BasedOn(Embed(anything.value, keep=keep)) if isinstance(anything, list): return [Embed(v, keep=keep) for v in anything] elif isinstance(anything, dict): return {k: Embed(v, keep=keep) for k, v in anything.items()} elif isinstance(anything, _Embed): return anything return _Embed(anything, keep=keep)
def Embed(anything: Any, keep: bool=False) ->Any: if isinstance(anything, _ToSelectFrom): return ToSelectFrom(Embed(anything.value, keep=keep)) elif isinstance(anything, _BasedOn): return BasedOn(Embed(anything.value, keep=keep)) if isinstance(anything, list): return [Embed(v, keep=keep) for v in anything] elif isinstance(anything, dict): return {k: Embed(v, keep=keep) for k, v in anything.items()} elif isinstance(anything, _Embed): return anything return _Embed(anything, keep=keep)
null
test_sql_database_sequential_chain_run
"""Test that commands can be run successfully SEQUENTIALLY and returned in correct format.""" engine = create_engine('sqlite:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison', user_company='Foo' ) with engine.connect() as conn: conn.execute(stmt) db = SQLDatabase(engine) db_chain = SQLDatabaseSequentialChain.from_llm(OpenAI(temperature=0), db) output = db_chain.run('What company does Harrison work at?') expected_output = ' Harrison works at Foo.' assert output == expected_output
def test_sql_database_sequential_chain_run() ->None: """Test that commands can be run successfully SEQUENTIALLY and returned in correct format.""" engine = create_engine('sqlite:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison', user_company='Foo') with engine.connect() as conn: conn.execute(stmt) db = SQLDatabase(engine) db_chain = SQLDatabaseSequentialChain.from_llm(OpenAI(temperature=0), db) output = db_chain.run('What company does Harrison work at?') expected_output = ' Harrison works at Foo.' assert output == expected_output
Test that commands can be run successfully SEQUENTIALLY and returned in correct format.
output_keys
"""Will always return text key. :meta private: """ if self.return_final_only: return [self.output_key] else: return [self.output_key, 'full_generation']
@property def output_keys(self) ->List[str]: """Will always return text key. :meta private: """ if self.return_final_only: return [self.output_key] else: return [self.output_key, 'full_generation']
Will always return text key. :meta private:
delete
"""Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i:i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError('Either ids, delete_all, or filter must be provided.') return None
def delete(self, ids: Optional[List[str]]=None, delete_all: Optional[bool]= None, namespace: Optional[str]=None, filter: Optional[dict]=None, ** kwargs: Any) ->None: """Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i:i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError('Either ids, delete_all, or filter must be provided.') return None
Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete.
_file_types
"""Return supported file types.""" return _FileType.DOC, _FileType.DOCX, _FileType.PDF
@property def _file_types(self) ->Sequence[_FileType]: """Return supported file types.""" return _FileType.DOC, _FileType.DOCX, _FileType.PDF
Return supported file types.
get_input_schema
"""The tool's input schema.""" if self.args_schema is not None: return self.args_schema else: return create_schema_from_function(self.name, self._run)
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: """The tool's input schema.""" if self.args_schema is not None: return self.args_schema else: return create_schema_from_function(self.name, self._run)
The tool's input schema.
empty_tool_input
"""Return a constant.""" return 'the empty result'
@tool def empty_tool_input() ->str: """Return a constant.""" return 'the empty result'
Return a constant.
similarity_search_with_relevance_scores
"""Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql(self.embedding_function.embed_query(query), k, where_str) try: return [(Document(page_content=r[self.config.column_map['document']], metadata=json.loads(r[self.config.column_map['metadata']])), r[ 'dist']) for r in get_named_result(self.connection, q_str)] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m') return []
def similarity_search_with_relevance_scores(self, query: str, k: int=4, where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[Document, float] ]: """Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql(self.embedding_function.embed_query(query ), k, where_str) try: return [(Document(page_content=r[self.config.column_map['document'] ], metadata=json.loads(r[self.config.column_map['metadata']])), r['dist']) for r in get_named_result(self.connection, q_str)] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m' ) return []
Perform a similarity search with StarRocks Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents
test_visit_operation
from timescale_vector import client op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator= Comparator.GT, attribute='abc', value=2.0)]) expected = client.Predicates(client.Predicates(('foo', '<', 2)), client. Predicates(('bar', '==', 'baz')), client.Predicates(('abc', '>', 2.0))) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
@pytest.mark.requires('timescale_vector') def test_visit_operation() ->None: from timescale_vector import client op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator =Comparator.GT, attribute='abc', value=2.0)]) expected = client.Predicates(client.Predicates(('foo', '<', 2)), client .Predicates(('bar', '==', 'baz')), client.Predicates(('abc', '>', 2.0)) ) actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
get_api_chain
"""Fake LLM API chain for testing.""" data = get_test_api_data() test_api_docs = data['api_docs'] test_question = data['question'] test_url = data['api_url'] test_api_response = data['api_response'] test_api_summary = data['api_summary'] api_url_query_prompt = API_URL_PROMPT.format(api_docs=test_api_docs, question=test_question) api_response_prompt = API_RESPONSE_PROMPT.format(api_docs=test_api_docs, question=test_question, api_url=test_url, api_response=test_api_response) queries = {api_url_query_prompt: test_url, api_response_prompt: test_api_summary} fake_llm = FakeLLM(queries=queries) api_request_chain = LLMChain(llm=fake_llm, prompt=API_URL_PROMPT) api_answer_chain = LLMChain(llm=fake_llm, prompt=API_RESPONSE_PROMPT) requests_wrapper = FakeRequestsChain(output=test_api_response) return APIChain(api_request_chain=api_request_chain, api_answer_chain= api_answer_chain, requests_wrapper=requests_wrapper, api_docs= test_api_docs, **kwargs)
def get_api_chain(**kwargs: Any) ->APIChain: """Fake LLM API chain for testing.""" data = get_test_api_data() test_api_docs = data['api_docs'] test_question = data['question'] test_url = data['api_url'] test_api_response = data['api_response'] test_api_summary = data['api_summary'] api_url_query_prompt = API_URL_PROMPT.format(api_docs=test_api_docs, question=test_question) api_response_prompt = API_RESPONSE_PROMPT.format(api_docs=test_api_docs, question=test_question, api_url=test_url, api_response= test_api_response) queries = {api_url_query_prompt: test_url, api_response_prompt: test_api_summary} fake_llm = FakeLLM(queries=queries) api_request_chain = LLMChain(llm=fake_llm, prompt=API_URL_PROMPT) api_answer_chain = LLMChain(llm=fake_llm, prompt=API_RESPONSE_PROMPT) requests_wrapper = FakeRequestsChain(output=test_api_response) return APIChain(api_request_chain=api_request_chain, api_answer_chain= api_answer_chain, requests_wrapper=requests_wrapper, api_docs= test_api_docs, **kwargs)
Fake LLM API chain for testing.
similarity_search
"""Return MongoDB documents most similar to the given query. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores. """ docs_and_scores = self.similarity_search_with_score(query, k=k, pre_filter= pre_filter, post_filter_pipeline=post_filter_pipeline) return [doc for doc, _ in docs_and_scores]
def similarity_search(self, query: str, k: int=4, pre_filter: Optional[Dict ]=None, post_filter_pipeline: Optional[List[Dict]]=None, **kwargs: Any ) ->List[Document]: """Return MongoDB documents most similar to the given query. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores. """ docs_and_scores = self.similarity_search_with_score(query, k=k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline) return [doc for doc, _ in docs_and_scores]
Return MongoDB documents most similar to the given query. Uses the knnBeta Operator available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes. For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the knnBeta vector search. Returns: List of documents most similar to the query and their scores.
test_causal_mediator
""" Test CPAL approach on causal mediator. """ narrative_input = ( 'jan has three times the number of pets as marcia.marcia has two more pets than cindy.If marcia has ten pets, how many pets does jan have?' ) llm = OpenAI(temperature=0, max_tokens=512) cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) answer = cpal_chain.run(narrative_input) self.assertEqual(answer, 30.0)
def test_causal_mediator(self) ->None: """ Test CPAL approach on causal mediator. """ narrative_input = ( 'jan has three times the number of pets as marcia.marcia has two more pets than cindy.If marcia has ten pets, how many pets does jan have?' ) llm = OpenAI(temperature=0, max_tokens=512) cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) answer = cpal_chain.run(narrative_input) self.assertEqual(answer, 30.0)
Test CPAL approach on causal mediator.
test_load_returns_limited_docs
"""Test that returns several docs""" expected_docs = 2 loader = ArxivLoader(query='ChatGPT', load_max_docs=expected_docs) docs = loader.load() assert len(docs) == expected_docs assert_docs(docs)
def test_load_returns_limited_docs() ->None: """Test that returns several docs""" expected_docs = 2 loader = ArxivLoader(query='ChatGPT', load_max_docs=expected_docs) docs = loader.load() assert len(docs) == expected_docs assert_docs(docs)
Test that returns several docs
_stream
"""Call out to Titan Takeoff stream endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Yields: A dictionary like object containing a string token. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ url = f'{self.base_url}/generate_stream' params = {'text': prompt, **self._default_params} response = requests.post(url, json=params, stream=True) response.encoding = 'utf-8' for text in response.iter_content(chunk_size=1, decode_unicode=True): if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text)
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: """Call out to Titan Takeoff stream endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Yields: A dictionary like object containing a string token. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt) """ url = f'{self.base_url}/generate_stream' params = {'text': prompt, **self._default_params} response = requests.post(url, json=params, stream=True) response.encoding = 'utf-8' for text in response.iter_content(chunk_size=1, decode_unicode=True): if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(token=chunk.text)
Call out to Titan Takeoff stream endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Yields: A dictionary like object containing a string token. Example: .. code-block:: python prompt = "What is the capital of the United Kingdom?" response = model(prompt)
_identifying_params
"""Get the identifying parameters.""" return {'model_name': self.model_name, 'name': self.name, 'cpu': self.cpu, 'memory': self.memory, 'gpu': self.gpu, 'python_version': self. python_version, 'python_packages': self.python_packages, 'max_length': self.max_length, 'model_kwargs': self.model_kwargs}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {'model_name': self.model_name, 'name': self.name, 'cpu': self. cpu, 'memory': self.memory, 'gpu': self.gpu, 'python_version': self .python_version, 'python_packages': self.python_packages, 'max_length': self.max_length, 'model_kwargs': self.model_kwargs}
Get the identifying parameters.
similarity_search_with_score
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with L2 distance in float. Lower score represents more similarity. """ embedding = self.embedding.embed_query(query) docs = self.similarity_search_with_score_by_vector(embedding, k, filter= filter, fetch_k=fetch_k, **kwargs) return docs
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Tuple [Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with L2 distance in float. Lower score represents more similarity. """ embedding = self.embedding.embed_query(query) docs = self.similarity_search_with_score_by_vector(embedding, k, filter =filter, fetch_k=fetch_k, **kwargs) return docs
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with L2 distance in float. Lower score represents more similarity.
lazy_parse
"""Lazily parse the blob.""" import io try: from pydub import AudioSegment except ImportError: raise ImportError( 'pydub package not found, please install it with `pip install pydub`') try: import librosa except ImportError: raise ImportError( 'librosa package not found, please install it with `pip install librosa`' ) audio = AudioSegment.from_file(blob.path) file_obj = io.BytesIO(audio.export(format='mp3').read()) print(f'Transcribing part {blob.path}!') y, sr = librosa.load(file_obj, sr=16000) prediction = self.pipe(y.copy(), batch_size=8)['text'] yield Document(page_content=prediction, metadata={'source': blob.source})
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import io try: from pydub import AudioSegment except ImportError: raise ImportError( 'pydub package not found, please install it with `pip install pydub`' ) try: import librosa except ImportError: raise ImportError( 'librosa package not found, please install it with `pip install librosa`' ) audio = AudioSegment.from_file(blob.path) file_obj = io.BytesIO(audio.export(format='mp3').read()) print(f'Transcribing part {blob.path}!') y, sr = librosa.load(file_obj, sr=16000) prediction = self.pipe(y.copy(), batch_size=8)['text'] yield Document(page_content=prediction, metadata={'source': blob.source})
Lazily parse the blob.
test_api_request_body_from_request_body_with_schema
"""Test instantiating APIRequestBody from RequestBody with a schema.""" from openapi_pydantic import MediaType, RequestBody, Schema request_body = RequestBody(content={'application/json': MediaType(schema= Schema(type='object', properties={'foo': Schema(type='string')}))}) api_request_body = APIRequestBody.from_request_body(request_body, raw_spec) assert api_request_body.properties == [APIRequestBodyProperty(name='foo', required=False, type='string', default=None, description=None, properties=[], references_used=[])] assert api_request_body.media_type == 'application/json'
@pytest.mark.requires('openapi_pydantic') def test_api_request_body_from_request_body_with_schema(raw_spec: OpenAPISpec ) ->None: """Test instantiating APIRequestBody from RequestBody with a schema.""" from openapi_pydantic import MediaType, RequestBody, Schema request_body = RequestBody(content={'application/json': MediaType( schema=Schema(type='object', properties={'foo': Schema(type= 'string')}))}) api_request_body = APIRequestBody.from_request_body(request_body, raw_spec) assert api_request_body.properties == [APIRequestBodyProperty(name= 'foo', required=False, type='string', default=None, description= None, properties=[], references_used=[])] assert api_request_body.media_type == 'application/json'
Test instantiating APIRequestBody from RequestBody with a schema.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_start_chat
if not self.is_codey_model: return self.client.start_chat(context=history.context, message_history= history.history, **kwargs) else: return self.client.start_chat(message_history=history.history, **kwargs)
def _start_chat(self, history: _ChatHistory, **kwargs: Any) ->Union[ ChatSession, CodeChatSession]: if not self.is_codey_model: return self.client.start_chat(context=history.context, message_history=history.history, **kwargs) else: return self.client.start_chat(message_history=history.history, **kwargs )
null
test_embedding_documents
"""Test embeddings for documents.""" documents = ['foo', 'bar'] embedding = VolcanoEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1024
def test_embedding_documents() ->None: """Test embeddings for documents.""" documents = ['foo', 'bar'] embedding = VolcanoEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1024
Test embeddings for documents.
test_qdrant_from_texts_raises_error_on_different_dimensionality
"""Test if Qdrant.from_texts raises an exception if dimensionality does not match""" collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet' ], ConsistentFakeEmbeddings(dimensionality=10), collection_name= collection_name, path=str(tmpdir), vector_name=vector_name) del vec_store with pytest.raises(QdrantException): Qdrant.from_texts(['foo', 'bar'], ConsistentFakeEmbeddings( dimensionality=5), collection_name=collection_name, path=str( tmpdir), vector_name=vector_name)
@pytest.mark.parametrize('vector_name', [None, 'custom-vector']) def test_qdrant_from_texts_raises_error_on_different_dimensionality(vector_name : Optional[str]) ->None: """Test if Qdrant.from_texts raises an exception if dimensionality does not match""" collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet'], ConsistentFakeEmbeddings(dimensionality=10), collection_name=collection_name, path=str(tmpdir), vector_name= vector_name) del vec_store with pytest.raises(QdrantException): Qdrant.from_texts(['foo', 'bar'], ConsistentFakeEmbeddings( dimensionality=5), collection_name=collection_name, path= str(tmpdir), vector_name=vector_name)
Test if Qdrant.from_texts raises an exception if dimensionality does not match
test_system_message_multiple_tools
prompt: Any = StructuredChatAgent.create_prompt([Tool(name='foo', description='Test tool FOO', func=lambda x: x), Tool(name='bar', description='Test tool BAR', func=lambda x: x)]) actual = prompt.messages[0].prompt.format() expected = dedent( """ Respond to the human as helpfully and accurately as possible. You have access to the following tools: foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} bar: Test tool BAR, args: {'tool_input': {'type': 'string'}} Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Valid "action" values: "Final Answer" or foo, bar Provide only ONE action per $JSON_BLOB, as shown: ``` { "action": $TOOL_NAME, "action_input": $INPUT } ``` Follow this format: Question: input question to answer Thought: consider previous and subsequent steps Action: ``` $JSON_BLOB ``` Observation: action result ... (repeat Thought/Action/Observation N times) Thought: I know what to respond Action: ``` { "action": "Final Answer", "action_input": "Final response to human" } ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: """ ).strip() assert actual == expected
def test_system_message_multiple_tools(self) ->None: prompt: Any = StructuredChatAgent.create_prompt([Tool(name='foo', description='Test tool FOO', func=lambda x: x), Tool(name='bar', description='Test tool BAR', func=lambda x: x)]) actual = prompt.messages[0].prompt.format() expected = dedent( """ Respond to the human as helpfully and accurately as possible. You have access to the following tools: foo: Test tool FOO, args: {'tool_input': {'type': 'string'}} bar: Test tool BAR, args: {'tool_input': {'type': 'string'}} Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). Valid "action" values: "Final Answer" or foo, bar Provide only ONE action per $JSON_BLOB, as shown: ``` { "action": $TOOL_NAME, "action_input": $INPUT } ``` Follow this format: Question: input question to answer Thought: consider previous and subsequent steps Action: ``` $JSON_BLOB ``` Observation: action result ... (repeat Thought/Action/Observation N times) Thought: I know what to respond Action: ``` { "action": "Final Answer", "action_input": "Final response to human" } ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: """ ).strip() assert actual == expected
null
_insert
_insert_query = self._build_insert_sql(transac, column_names) debug_output(_insert_query) get_named_result(self.connection, _insert_query)
def _insert(self, transac: Iterable, column_names: Iterable[str]) ->None: _insert_query = self._build_insert_sql(transac, column_names) debug_output(_insert_query) get_named_result(self.connection, _insert_query)
null
visit_Call
if isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast .Name ) and node.func.value.id == self.name and node.func.attr == 'get' and len( node.args) in (1, 2) and isinstance(node.args[0], ast.Constant ) and isinstance(node.args[0].value, str): self.keys.add(node.args[0].value)
def visit_Call(self, node: ast.Call) ->Any: if isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name ) and node.func.value.id == self.name and node.func.attr == 'get' and len( node.args) in (1, 2) and isinstance(node.args[0], ast.Constant ) and isinstance(node.args[0].value, str): self.keys.add(node.args[0].value)
null
from_llm
"""Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls(qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, qa_prompt: BasePromptTemplate= GRAPH_QA_PROMPT, entity_prompt: BasePromptTemplate= ENTITY_EXTRACTION_PROMPT, **kwargs: Any) ->GraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls(qa_chain=qa_chain, entity_extraction_chain=entity_chain, ** kwargs)
Initialize from LLM.
_scopes
"""Return required scopes.""" return ['offline_access', 'Files.Read.All']
@property def _scopes(self) ->List[str]: """Return required scopes.""" return ['offline_access', 'Files.Read.All']
Return required scopes.
set_text
"""Set the text attribute to be the contents of the message.""" try: values['text'] = values['message'].content except (KeyError, AttributeError) as e: raise ValueError('Error while initializing ChatGeneration') from e return values
@root_validator def set_text(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Set the text attribute to be the contents of the message.""" try: values['text'] = values['message'].content except (KeyError, AttributeError) as e: raise ValueError('Error while initializing ChatGeneration') from e return values
Set the text attribute to be the contents of the message.
get_multi_vector_retriever
"""Create the composed retriever object.""" vectorstore = get_vectorstore() store = get_docstore() return MultiVectorRetriever(vectorstore=vectorstore, byte_store=store, id_key=docstore_id_key)
def get_multi_vector_retriever(docstore_id_key: str): """Create the composed retriever object.""" vectorstore = get_vectorstore() store = get_docstore() return MultiVectorRetriever(vectorstore=vectorstore, byte_store=store, id_key=docstore_id_key)
Create the composed retriever object.
ideation_prompt_inputs
return {'question': self.question}
def ideation_prompt_inputs(self) ->Dict[str, Any]: return {'question': self.question}
null
_import_openweathermap_tool
from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun return OpenWeatherMapQueryRun
def _import_openweathermap_tool() ->Any: from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun return OpenWeatherMapQueryRun
null
_search
from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [('https://www.youtube.com' + video['url_suffix']) for video in data['videos']] return str(url_suffix_list)
def _search(self, person: str, num_results: int) ->str: from youtube_search import YoutubeSearch results = YoutubeSearch(person, num_results).to_json() data = json.loads(results) url_suffix_list = [('https://www.youtube.com' + video['url_suffix']) for video in data['videos']] return str(url_suffix_list)
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]]): An optional list of ids. refresh(bool): Whether or not to refresh indices with the updated data. Default True. Returns: List[str]: List of IDs of the added texts. """ if metadatas is not None and len(metadatas) > 0 and 'text' in metadatas[0 ].keys(): raise ValueError('Cannot accept key text in metadata!') texts = list(texts) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if self._embedding_function is not None: _embeddings = self._embedding_function.embed_documents(texts) embeddings = np.stack(_embeddings) if metadatas is None: data = [{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], 'text': texts[i]} for i, _ in enumerate(texts)] else: for i in range(len(metadatas)): metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] metadatas[i]['text'] = texts[i] data = metadatas self.project._validate_map_data_inputs([], id_field=AtlasDB. _ATLAS_DEFAULT_ID_FIELD, data=data) with self.project.wait_for_project_lock(): self.project.add_embeddings(embeddings=embeddings, data=data) else: if metadatas is None: data = [{'text': text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]} for i, text in enumerate(texts)] else: for i, text in enumerate(texts): metadatas[i]['text'] = texts metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] data = metadatas self.project._validate_map_data_inputs([], id_field=AtlasDB. _ATLAS_DEFAULT_ID_FIELD, data=data) with self.project.wait_for_project_lock(): self.project.add_text(data) if refresh: if len(self.project.indices) > 0: with self.project.wait_for_project_lock(): self.project.rebuild_maps() return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, refresh: bool=True, **kwargs: Any ) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]]): An optional list of ids. refresh(bool): Whether or not to refresh indices with the updated data. Default True. Returns: List[str]: List of IDs of the added texts. """ if metadatas is not None and len(metadatas) > 0 and 'text' in metadatas[0 ].keys(): raise ValueError('Cannot accept key text in metadata!') texts = list(texts) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] if self._embedding_function is not None: _embeddings = self._embedding_function.embed_documents(texts) embeddings = np.stack(_embeddings) if metadatas is None: data = [{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], 'text': texts [i]} for i, _ in enumerate(texts)] else: for i in range(len(metadatas)): metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] metadatas[i]['text'] = texts[i] data = metadatas self.project._validate_map_data_inputs([], id_field=AtlasDB. _ATLAS_DEFAULT_ID_FIELD, data=data) with self.project.wait_for_project_lock(): self.project.add_embeddings(embeddings=embeddings, data=data) else: if metadatas is None: data = [{'text': text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]} for i, text in enumerate(texts)] else: for i, text in enumerate(texts): metadatas[i]['text'] = texts metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] data = metadatas self.project._validate_map_data_inputs([], id_field=AtlasDB. _ATLAS_DEFAULT_ID_FIELD, data=data) with self.project.wait_for_project_lock(): self.project.add_text(data) if refresh: if len(self.project.indices) > 0: with self.project.wait_for_project_lock(): self.project.rebuild_maps() return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]]): An optional list of ids. refresh(bool): Whether or not to refresh indices with the updated data. Default True. Returns: List[str]: List of IDs of the added texts.
_import_deepinfra
from langchain_community.llms.deepinfra import DeepInfra return DeepInfra
def _import_deepinfra() ->Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra
null
test_output_messages
runnable = RunnableLambda(lambda input: [AIMessage(content='you said: ' + '\n'.join([str(m.content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']]))]) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history') config: RunnableConfig = {'configurable': {'session_id': '5'}} output = with_history.invoke({'input': 'hello'}, config) assert output == [AIMessage(content='you said: hello')] output = with_history.invoke({'input': 'good bye'}, config) assert output == [AIMessage(content="""you said: hello good bye""")]
def test_output_messages() ->None: runnable = RunnableLambda(lambda input: [AIMessage(content='you said: ' + '\n'.join([str(m.content) for m in input['history'] if isinstance(m, HumanMessage)] + [input['input']]))]) get_session_history = _get_get_session_history() with_history = RunnableWithMessageHistory(runnable, get_session_history, input_messages_key='input', history_messages_key='history') config: RunnableConfig = {'configurable': {'session_id': '5'}} output = with_history.invoke({'input': 'hello'}, config) assert output == [AIMessage(content='you said: hello')] output = with_history.invoke({'input': 'good bye'}, config) assert output == [AIMessage(content='you said: hello\ngood bye')]
null
_llm_type
"""Return type of llm.""" return 'azureml_endpoint'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'azureml_endpoint'
Return type of llm.
_import_usearch
from langchain_community.vectorstores.usearch import USearch return USearch
def _import_usearch() ->Any: from langchain_community.vectorstores.usearch import USearch return USearch
null
test_bad_inputs
"""Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({'foobar': 'baz'})
def test_bad_inputs() ->None: """Test errors are raised if input keys are not found.""" chain = FakeChain() with pytest.raises(ValueError): chain({'foobar': 'baz'})
Test errors are raised if input keys are not found.
_parse
return text.strip('**')
def _parse(text): return text.strip('**')
null
test_yaml_output_parser_fail
"""Test YamlOutputParser where completion result fails schema validation.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(pydantic_object =TestModel) try: yaml_parser.parse_folder(DEF_RESULT_FAIL) except OutputParserException as e: print('parse_result:', e) assert 'Failed to parse TestModel from completion' in str(e) else: assert False, 'Expected OutputParserException'
def test_yaml_output_parser_fail() ->None: """Test YamlOutputParser where completion result fails schema validation.""" yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(pydantic_object =TestModel) try: yaml_parser.parse_folder(DEF_RESULT_FAIL) except OutputParserException as e: print('parse_result:', e) assert 'Failed to parse TestModel from completion' in str(e) else: assert False, 'Expected OutputParserException'
Test YamlOutputParser where completion result fails schema validation.
_llm_type
"""Return type of llm.""" return 'fireworks-chat'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fireworks-chat'
Return type of llm.
_embed_with_retry
response = embeddings.client.create(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty)
@retry_decorator def _embed_with_retry(**kwargs: Any) ->Any: response = embeddings.client.create(**kwargs) return _check_response(response, skip_empty=embeddings.skip_empty)
null
test_huggingfacehub_embedding_query
"""Test huggingfacehub embeddings.""" document = 'foo bar' embedding = HuggingFaceHubEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
def test_huggingfacehub_embedding_query() ->None: """Test huggingfacehub embeddings.""" document = 'foo bar' embedding = HuggingFaceHubEmbeddings() output = embedding.embed_query(document) assert len(output) == 768
Test huggingfacehub embeddings.
_clean_results
cleaned_results = [] for result in raw_search_results: cleaned_results.append({'title': result.get('title', 'Unknown Title'), 'url': result.get('url', 'Unknown URL'), 'author': result.get( 'author', 'Unknown Author'), 'published_date': result.get( 'publishedDate', 'Unknown Date')}) return cleaned_results
def _clean_results(self, raw_search_results: List[Dict]) ->List[Dict]: cleaned_results = [] for result in raw_search_results: cleaned_results.append({'title': result.get('title', 'Unknown Title'), 'url': result.get('url', 'Unknown URL'), 'author': result.get('author', 'Unknown Author'), 'published_date': result.get('publishedDate', 'Unknown Date')}) return cleaned_results
null
test_call
"""Test that call returns a URL in the output.""" search = DallEAPIWrapper() output = search.run('volcano island') assert 'https://oaidalleapi' in output
def test_call() ->None: """Test that call returns a URL in the output.""" search = DallEAPIWrapper() output = search.run('volcano island') assert 'https://oaidalleapi' in output
Test that call returns a URL in the output.
test_atlas_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AtlasDB.from_texts(name='langchain_test_project' + str(time. time()), texts=texts, api_key=ATLAS_TEST_API_KEY, embedding= FakeEmbeddings(), metadatas=metadatas, reset_project_if_exists=True) output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo' assert output[0].metadata['page'] == '0'
def test_atlas_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AtlasDB.from_texts(name='langchain_test_project' + str(time .time()), texts=texts, api_key=ATLAS_TEST_API_KEY, embedding= FakeEmbeddings(), metadatas=metadatas, reset_project_if_exists=True) output = docsearch.similarity_search('foo', k=1) assert len(output) == 1 assert output[0].page_content == 'foo' assert output[0].metadata['page'] == '0'
Test end to end construction and search.
_FormattedValue
self.write('f') string = io.StringIO() self._fstring_FormattedValue(t, string.write) self.write(repr(string.getvalue()))
def _FormattedValue(self, t): self.write('f') string = io.StringIO() self._fstring_FormattedValue(t, string.write) self.write(repr(string.getvalue()))
null
validate
""" Check and validate the safety of the given prompt text. Args: prompt_value (str): The input text to be checked for unsafe text. config (Dict[str, Any]): Configuration settings for prompt safety checks. Raises: ValueError: If unsafe prompt is found in the prompt text based on the specified threshold. Returns: str: The input prompt_value. Note: This function checks the safety of the provided prompt text using Comprehend's classify_document API and raises an error if unsafe text is detected with a score above the specified threshold. Example: comprehend_client = boto3.client('comprehend') prompt_text = "Please tell me your credit card information." config = {"threshold": 0.7} checked_prompt = check_prompt_safety(comprehend_client, prompt_text, config) """ threshold = config.get('threshold') unsafe_prompt = False endpoint_arn = self._get_arn() response = self.client.classify_document(Text=prompt_value, EndpointArn= endpoint_arn) if self.callback and self.callback.prompt_safety_callback: self.moderation_beacon['moderation_input'] = prompt_value self.moderation_beacon['moderation_output'] = response for class_result in response['Classes']: if class_result['Score'] >= threshold and class_result['Name' ] == 'UNSAFE_PROMPT': unsafe_prompt = True break if self.callback and self.callback.intent_callback: if unsafe_prompt: self.moderation_beacon['moderation_status'] = 'LABELS_FOUND' asyncio.create_task(self.callback.on_after_intent(self. moderation_beacon, self.unique_id)) if unsafe_prompt: raise ModerationPromptSafetyError return prompt_value
def validate(self, prompt_value: str, config: Any=None) ->str: """ Check and validate the safety of the given prompt text. Args: prompt_value (str): The input text to be checked for unsafe text. config (Dict[str, Any]): Configuration settings for prompt safety checks. Raises: ValueError: If unsafe prompt is found in the prompt text based on the specified threshold. Returns: str: The input prompt_value. Note: This function checks the safety of the provided prompt text using Comprehend's classify_document API and raises an error if unsafe text is detected with a score above the specified threshold. Example: comprehend_client = boto3.client('comprehend') prompt_text = "Please tell me your credit card information." config = {"threshold": 0.7} checked_prompt = check_prompt_safety(comprehend_client, prompt_text, config) """ threshold = config.get('threshold') unsafe_prompt = False endpoint_arn = self._get_arn() response = self.client.classify_document(Text=prompt_value, EndpointArn =endpoint_arn) if self.callback and self.callback.prompt_safety_callback: self.moderation_beacon['moderation_input'] = prompt_value self.moderation_beacon['moderation_output'] = response for class_result in response['Classes']: if class_result['Score'] >= threshold and class_result['Name' ] == 'UNSAFE_PROMPT': unsafe_prompt = True break if self.callback and self.callback.intent_callback: if unsafe_prompt: self.moderation_beacon['moderation_status'] = 'LABELS_FOUND' asyncio.create_task(self.callback.on_after_intent(self. moderation_beacon, self.unique_id)) if unsafe_prompt: raise ModerationPromptSafetyError return prompt_value
Check and validate the safety of the given prompt text. Args: prompt_value (str): The input text to be checked for unsafe text. config (Dict[str, Any]): Configuration settings for prompt safety checks. Raises: ValueError: If unsafe prompt is found in the prompt text based on the specified threshold. Returns: str: The input prompt_value. Note: This function checks the safety of the provided prompt text using Comprehend's classify_document API and raises an error if unsafe text is detected with a score above the specified threshold. Example: comprehend_client = boto3.client('comprehend') prompt_text = "Please tell me your credit card information." config = {"threshold": 0.7} checked_prompt = check_prompt_safety(comprehend_client, prompt_text, config)