method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_format_definitions
formatted_definitions: List[str] = [] for definition in definitions: formatted_definitions.extend(self._format_definition(definition)) if len(formatted_definitions) == 1: return f"Definition of '{query}':\n{formatted_definitions[0]}" result = f"Definitions of '{query}':\n\n" for i, formatted_definition in enume...
def _format_definitions(self, query: str, definitions: List[Dict]) ->str: formatted_definitions: List[str] = [] for definition in definitions: formatted_definitions.extend(self._format_definition(definition)) if len(formatted_definitions) == 1: return f"Definition of '{query}':\n{formatted_d...
null
test_stream
"""Test streaming tokens from OpenAI.""" llm = Chat__ModuleName__() for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
def test_stream() ->None: """Test streaming tokens from OpenAI.""" llm = Chat__ModuleName__() for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from OpenAI.
test_with_metadatas_with_scores_using_vector
"""Test end to end construction and scored search, using embedding vector.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]] vector_search = Bagel.from_texts(cluster_name='testing_vector', texts=texts, metadatas=me...
def test_with_metadatas_with_scores_using_vector() ->None: """Test end to end construction and scored search, using embedding vector.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'page': str(i)} for i in range(len(texts))] embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]] vector_search ...
Test end to end construction and scored search, using embedding vector.
_call
"""Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me ...
def _call(self, prompt: str, stop: Optional[Sequence[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encounte...
Generate text from a prompt. Args: prompt: The prompt to generate text from. stop: A list of sequences to stop generation when encountered. Returns: The generated text. Example: .. code-block:: python response = llm("Tell me a joke.")
FakeFind
def fn(self: Any, **kwargs: Any) ->Any: return attrdict({'resources': {'123': attrdict({'fields': {'456': attrdict({'paragraphs': {'123/t/text/0-14': attrdict({'text': 'This is a test', 'order': 0})}})}, 'data': {'texts': {'text': { 'body': 'This is a test'}}}, 'extra': attrdict({'metadata':...
def FakeFind(**args: Any) ->Any: def fn(self: Any, **kwargs: Any) ->Any: return attrdict({'resources': {'123': attrdict({'fields': {'456': attrdict({'paragraphs': {'123/t/text/0-14': attrdict({'text': 'This is a test', 'order': 0})}})}, 'data': {'texts': {'text': {'body'...
null
_get_attachments
"""Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments. """ from bs4 import BeautifulSoup, Tag content_list = soup.find('ul', {'class': 'contentList'}) if content_list is None: raise ValueError('No content list found...
def _get_attachments(self, soup: Any) ->List[str]: """Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments. """ from bs4 import BeautifulSoup, Tag content_list = soup.find('ul', {'class': 'contentList'}) i...
Get all attachments from a page. Args: soup: BeautifulSoup4 soup object. Returns: List of attachments.
on_tool_start
"""Do nothing when tool starts.""" pass
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Do nothing when tool starts.""" pass
Do nothing when tool starts.
requires_input
""" This evaluator does not require input. """ return False
@property def requires_input(self) ->bool: """ This evaluator does not require input. """ return False
This evaluator does not require input.
delete_by_document_id
""" Given this is a "similarity search" cache, an invalidation pattern that makes sense is first a lookup to get an ID, and then deleting with that ID. This is for the second step. """ self.collection.delete_one(document_id)
def delete_by_document_id(self, document_id: str) ->None: """ Given this is a "similarity search" cache, an invalidation pattern that makes sense is first a lookup to get an ID, and then deleting with that ID. This is for the second step. """ self.collection.delete_one(document_i...
Given this is a "similarity search" cache, an invalidation pattern that makes sense is first a lookup to get an ID, and then deleting with that ID. This is for the second step.
test_with_metadatas
"""Test end to end construction and search.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'metadata': str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search('hello bagel', k=1) assert output == [...
def test_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'metadata': str(i)} for i in range(len(texts))] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_searc...
Test end to end construction and search.
test_stream
"""Test that stream works.""" chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat(messages=[HumanMessage(content='Hell...
def test_stream() ->None: """Test that stream works.""" chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'), eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) re...
Test that stream works.
on_llm_new_token
"""Do nothing when a new token is generated.""" pass
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: """Do nothing when a new token is generated.""" pass
Do nothing when a new token is generated.
test_load_valid_numeric_content
file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='99', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='99.5', metadata={'source': file_path, 'seq_num': 2})] mocker.patch('builtins.open', mocker.mock_open()) mocker.patch('pathlib.Path.read_text', return_v...
def test_load_valid_numeric_content(mocker: MockerFixture) ->None: file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='99', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='99.5', metadata={ 'source': file_path, 'seq_num': 2})] mocker.pa...
null
concatenate_rows
"""Combine message information in a readable format ready to be used. Args: row: dictionary containing message information. """ sender = row['sender_name'] text = row['content'] date = datetime.datetime.fromtimestamp(row['timestamp_ms'] / 1000).strftime( '%Y-%m-%d %H:%M:%S') return f'{sender} on {d...
def concatenate_rows(row: dict) ->str: """Combine message information in a readable format ready to be used. Args: row: dictionary containing message information. """ sender = row['sender_name'] text = row['content'] date = datetime.datetime.fromtimestamp(row['timestamp_ms'] / 1000 ...
Combine message information in a readable format ready to be used. Args: row: dictionary containing message information.
get_count_value
return result.get(key, 0) or 0
def get_count_value(key: str, result: Dict[str, Any]) ->int: return result.get(key, 0) or 0
null
combine_docs
"""Combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be...
def combine_docs(self, docs: List[Document], token_max: Optional[int]=None, callbacks: Callbacks=None, **kwargs: Any) ->Tuple[str, dict]: """Combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. ...
Combine multiple documents recursively. Args: docs: List of documents to combine, assumed that each one is less than `token_max`. token_max: Recursively creates groups of documents less than this number of tokens. callbacks: Callbacks to be passed through **kwargs: additional parameters...
from_embeddings
return cls(model=model, distance_func_name=distance_func_name, **kwargs)
@classmethod def from_embeddings(cls, model: Embeddings, distance_func_name: str= 'distance', **kwargs: Any) ->BaseOutputParser: return cls(model=model, distance_func_name=distance_func_name, **kwargs)
null
lazy_load
"""Lazy load text from the url(s) in web_path.""" for doc in self.load(): yield doc
def lazy_load(self) ->Iterator[Document]: """Lazy load text from the url(s) in web_path.""" for doc in self.load(): yield doc
Lazy load text from the url(s) in web_path.
test_json_equality_evaluator_requires_reference
assert json_equality_evaluator.requires_reference is True
def test_json_equality_evaluator_requires_reference(json_equality_evaluator: JsonEqualityEvaluator) ->None: assert json_equality_evaluator.requires_reference is True
null
get_title
"""Document title."""
@abstractmethod def get_title(self) ->str: """Document title."""
Document title.
test_scann_vector_sim
"""Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = ScaNN.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content ='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Docume...
def test_scann_vector_sim() ->None: """Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = ScaNN.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document( page_content='foo'), index_to_id[...
Test vector similarity.
chain
""" Process the given text to extract graph data and constructs a graph document from the extracted information. The constructed graph document is then added to the graph. Parameters: - text (str): The input text from which the information will be extracted to construct the graph. - allowed_nodes (...
def chain(text: str, allowed_nodes: Optional[List[str]]=None, allowed_relationships: Optional[List[str]]=None) ->str: """ Process the given text to extract graph data and constructs a graph document from the extracted information. The constructed graph document is then added to the graph. Parameter...
Process the given text to extract graph data and constructs a graph document from the extracted information. The constructed graph document is then added to the graph. Parameters: - text (str): The input text from which the information will be extracted to construct the graph. - allowed_nodes (Optional[List[str]]): A ...
test_get_action_and_input
"""Test getting an action from text.""" llm_output = """Thought: I need to search for NBA Action: Search Action Input: NBA""" action, action_input = get_action_and_input(llm_output) assert action == 'Search' assert action_input == 'NBA'
def test_get_action_and_input() ->None: """Test getting an action from text.""" llm_output = ( 'Thought: I need to search for NBA\nAction: Search\nAction Input: NBA') action, action_input = get_action_and_input(llm_output) assert action == 'Search' assert action_input == 'NBA'
Test getting an action from text.
_search
"""Return Elasticsearch documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. query_vector: Embedding to look up documents similar to. fetch_k: Number of candidates ...
def _search(self, query: Optional[str]=None, k: int=4, query_vector: Union[ List[float], None]=None, fetch_k: int=50, fields: Optional[List[str]]= None, filter: Optional[List[dict]]=None, custom_query: Optional[ Callable[[Dict, Union[str, None]], Dict]]=None, doc_builder: Optional[ Callable[[Dict], Docu...
Return Elasticsearch documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. query_vector: Embedding to look up documents similar to. fetch_k: Number of candidates to fetch from each shard. Defau...
__new__
"""Initialize the OpenAI object.""" model_name = data.get('model_name', '') if (model_name.startswith('gpt-3.5-turbo') or model_name.startswith('gpt-4') ) and '-instruct' not in model_name: warnings.warn( 'You are trying to use a chat model. This way of initializing it is no longer supported. Instead, p...
def __new__(cls, **data: Any) ->Union[OpenAIChat, BaseOpenAI]: """Initialize the OpenAI object.""" model_name = data.get('model_name', '') if (model_name.startswith('gpt-3.5-turbo') or model_name.startswith( 'gpt-4')) and '-instruct' not in model_name: warnings.warn( 'You are try...
Initialize the OpenAI object.
__init__
"""Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( 'VECTARA_CUSTOMER_ID') self._vectara_corpus_id = vectara_corpus_id or os.environ.get( 'VECTARA_CORPUS_ID') self._vectara_api_key = vectara_api_key or os.environ.get('VECTARA_API_KEY') if self._vectara_customer_...
def __init__(self, vectara_customer_id: Optional[str]=None, vectara_corpus_id: Optional[str]=None, vectara_api_key: Optional[str]= None, vectara_api_timeout: int=120, source: str='langchain'): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( '...
Initialize with Vectara API.
_stop
return ['\nObservation:']
@property def _stop(self) ->List[str]: return ['\nObservation:']
null
load
"""Load a TileDB index from a URI. Args: index_uri: The URI of the TileDB vector index. embedding: Embeddings to use when generating queries. metric: Optional, Metric to use for indexing. Defaults to "euclidean". config: Optional, TileDB config timest...
@classmethod def load(cls, index_uri: str, embedding: Embeddings, *, metric: str= DEFAULT_METRIC, config: Optional[Mapping[str, Any]]=None, timestamp: Any=None, **kwargs: Any) ->TileDB: """Load a TileDB index from a URI. Args: index_uri: The URI of the TileDB vector index. e...
Load a TileDB index from a URI. Args: index_uri: The URI of the TileDB vector index. embedding: Embeddings to use when generating queries. metric: Optional, Metric to use for indexing. Defaults to "euclidean". config: Optional, TileDB config timestamp: Optional, timestamp to use for opening the arr...
_create_message_dicts
message_dicts = [convert_message_to_dict(m) for m in messages] return message_dicts
def _create_message_dicts(self, messages: List[BaseMessage]) ->List[Dict[ str, Any]]: message_dicts = [convert_message_to_dict(m) for m in messages] return message_dicts
null
input_keys
"""Return the input keys. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the input keys. :meta private: """ return [self.input_key]
Return the input keys. :meta private:
_load_documents_from_folder
"""Load documents from a Dropbox folder.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions from dropbox.files import FileMetadata except ImportError: raise ImportError('You must run `pip install dropbox') try: results = dbx.files_list_folder(folder_path, recursive=self.recursive...
def _load_documents_from_folder(self, folder_path: str) ->List[Document]: """Load documents from a Dropbox folder.""" dbx = self._create_dropbox_client() try: from dropbox import exceptions from dropbox.files import FileMetadata except ImportError: raise ImportError('You must run...
Load documents from a Dropbox folder.
_create_retry_decorator
"""Define retry mechanism.""" import fireworks.client errors = [fireworks.client.error.RateLimitError, fireworks.client.error. InternalServerError, fireworks.client.error.BadGatewayError, fireworks. client.error.ServiceUnavailableError] return create_base_retry_decorator(error_types=errors, max_retries=llm. ...
def _create_retry_decorator(llm: ChatFireworks, run_manager: Optional[Union [AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None ) ->Callable[[Any], Any]: """Define retry mechanism.""" import fireworks.client errors = [fireworks.client.error.RateLimitError, fireworks.client.error ...
Define retry mechanism.
_continuous_recognize
done = False text = '' def stop_cb(evt: Any) ->None: """callback that stop continuous recognition""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True def retrieve_cb(evt: Any) ->None: """callback that retrieves the intermediate recognition results""" nonlocal text ...
def _continuous_recognize(self, speech_recognizer: Any) ->str: done = False text = '' def stop_cb(evt: Any) ->None: """callback that stop continuous recognition""" speech_recognizer.stop_continuous_recognition_async() nonlocal done done = True def retrieve_cb(evt: Any) ...
null
convert_to_openai_function
"""Convert a raw function/class to an OpenAI function. Args: function: Either a dictionary, a pydantic.BaseModel class, or a Python function. If a dictionary is passed in, it is assumed to already be a valid OpenAI function. Returns: A dict version of the passed in func...
def convert_to_openai_function(function: Union[Dict[str, Any], Type[ BaseModel], Callable]) ->Dict[str, Any]: """Convert a raw function/class to an OpenAI function. Args: function: Either a dictionary, a pydantic.BaseModel class, or a Python function. If a dictionary is passed in, it is...
Convert a raw function/class to an OpenAI function. Args: function: Either a dictionary, a pydantic.BaseModel class, or a Python function. If a dictionary is passed in, it is assumed to already be a valid OpenAI function. Returns: A dict version of the passed in function which is compatible wi...
test__collapse_docs_metadata
"""Test collapse documents functionality when metadata exists.""" metadata1 = {'source': 'a', 'foo': 2, 'bar': '1', 'extra1': 'foo'} metadata2 = {'source': 'b', 'foo': '3', 'bar': 2, 'extra2': 'bar'} docs = [Document(page_content='foo', metadata=metadata1), Document( page_content='bar', metadata=metadata2)] output ...
def test__collapse_docs_metadata() ->None: """Test collapse documents functionality when metadata exists.""" metadata1 = {'source': 'a', 'foo': 2, 'bar': '1', 'extra1': 'foo'} metadata2 = {'source': 'b', 'foo': '3', 'bar': 2, 'extra2': 'bar'} docs = [Document(page_content='foo', metadata=metadata1), Doc...
Test collapse documents functionality when metadata exists.
test_timescalevector_with_filter_distant_match
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVI...
def test_timescalevector_with_filter_distant_match() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection_filter', embed...
Test end to end construction and search.
process_thread_images
text = '' try: from PIL import Image from pytesseract import pytesseract except ImportError: raise ImportError( '`Pillow or pytesseract` package not found, please run `pip install Pillow` or `pip install pytesseract`' ) for img in tree.iter('img'): src = img.get('src') if not src or ...
def process_thread_images(self, tree: ElementTree) ->str: text = '' try: from PIL import Image from pytesseract import pytesseract except ImportError: raise ImportError( '`Pillow or pytesseract` package not found, please run `pip install Pillow` or `pip install pytesserac...
null
run
"""Run query through Google Trends with Serpapi""" serpapi_api_key = cast(SecretStr, self.serp_api_key) params = {'engine': 'google_lens', 'api_key': serpapi_api_key. get_secret_value(), 'url': query} queryURL = ( f"https://serpapi.com/search?engine={params['engine']}&api_key={params['api_key']}&url={params['ur...
def run(self, query: str) ->str: """Run query through Google Trends with Serpapi""" serpapi_api_key = cast(SecretStr, self.serp_api_key) params = {'engine': 'google_lens', 'api_key': serpapi_api_key. get_secret_value(), 'url': query} queryURL = ( f"https://serpapi.com/search?engine={para...
Run query through Google Trends with Serpapi
_get_channel_id_map
"""Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, 'r') as zip_file: try: with zip_file.open('channels.json', 'r') as f: channels = json.load(f) return {channel['name']: channel['id'] for channel in channels} except KeyError: ...
@staticmethod def _get_channel_id_map(zip_path: Path) ->Dict[str, str]: """Get a dictionary mapping channel names to their respective IDs.""" with zipfile.ZipFile(zip_path, 'r') as zip_file: try: with zip_file.open('channels.json', 'r') as f: channels = json.load(f) ...
Get a dictionary mapping channel names to their respective IDs.
parse_llm_output
""" Based on the prompt we expect the result to be a string that looks like: '[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]' We'll load that JSON and turn it into a Pydantic model """ return [LLMPlateResponse(**plate_r) for plate_r in json.loads(result)...
def parse_llm_output(result: str): """ Based on the prompt we expect the result to be a string that looks like: '[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]' We'll load that JSON and turn it into a Pydantic model """ return [LLMPlateResponse(*...
Based on the prompt we expect the result to be a string that looks like: '[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]' We'll load that JSON and turn it into a Pydantic model
_call
known_values = inputs.copy() _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child() outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks ) known_values.update(outputs) return {k: ...
def _call(self, inputs: Dict[str, str], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: known_values = inputs.copy() _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() for i, chain in enumerate(self.chains): callbacks = _run_manager.get_child()...
null
test_serialization_of_wellknown_objects
"""Test that pydantic is able to serialize and deserialize well known objects.""" class WellKnownLCObject(BaseModel): """A well known LangChain object.""" __root__: Union[Document, HumanMessage, SystemMessage, ChatMessage, FunctionMessage, AIMessage, HumanMessageChunk, SystemMessageChunk, ChatMe...
def test_serialization_of_wellknown_objects() ->None: """Test that pydantic is able to serialize and deserialize well known objects.""" class WellKnownLCObject(BaseModel): """A well known LangChain object.""" __root__: Union[Document, HumanMessage, SystemMessage, ChatMessage, Funct...
Test that pydantic is able to serialize and deserialize well known objects.
get_prompt_input_key
""" Get the prompt input key. Args: inputs: Dict[str, Any] memory_variables: List[str] Returns: A prompt input key. """ prompt_input_keys = list(set(inputs).difference(memory_variables + ['stop'])) if len(prompt_input_keys) != 1: raise ValueError(f'One input key expected go...
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str] ) ->str: """ Get the prompt input key. Args: inputs: Dict[str, Any] memory_variables: List[str] Returns: A prompt input key. """ prompt_input_keys = list(set(inputs).difference(memory_varia...
Get the prompt input key. Args: inputs: Dict[str, Any] memory_variables: List[str] Returns: A prompt input key.
test_prompt_jinja2_missing_input_variables
"""Test error is raised when input variables are not provided.""" prefix = 'Starting with {{ foo }}' suffix = 'Ending with {{ bar }}' with pytest.warns(UserWarning): FewShotPromptTemplate(input_variables=[], suffix=suffix, examples= example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[0], ...
@pytest.mark.requires('jinja2') def test_prompt_jinja2_missing_input_variables(example_jinja2_prompt: Tuple [PromptTemplate, List[Dict[str, str]]]) ->None: """Test error is raised when input variables are not provided.""" prefix = 'Starting with {{ foo }}' suffix = 'Ending with {{ bar }}' with pytes...
Test error is raised when input variables are not provided.
_stream
params: Dict[str, Any] = self._invocation_params(stop=stop, stream=True, ** kwargs) for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params): chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp)) yield chunk if run_manager: run_manager.on_llm_new_token(chunk...
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: params: Dict[str, Any] = self._invocation_params(stop=stop, stream=True, **kwargs) for stream_resp in stream_generate_with_retry(self,...
null
_test_parse_value
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value assert actual == x
def _test_parse_value(x: Any) ->None: parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value assert actual == x
null
test_load_tools_with_callback_manager_raises_deprecation_warning
"""Test load_tools raises a deprecation for old callback manager kwarg.""" callback_manager = MagicMock() with pytest.warns(DeprecationWarning, match='callback_manager is deprecated'): tools = load_tools(['requests_get'], callback_manager=callback_manager) assert len(tools) == 1 assert tools[0].callbacks == callbac...
def test_load_tools_with_callback_manager_raises_deprecation_warning() ->None: """Test load_tools raises a deprecation for old callback manager kwarg.""" callback_manager = MagicMock() with pytest.warns(DeprecationWarning, match= 'callback_manager is deprecated'): tools = load_tools(['reques...
Test load_tools raises a deprecation for old callback manager kwarg.
test_undefined_deprecation_schedule
"""This test is expected to fail until we defined a deprecation schedule.""" with pytest.raises(NotImplementedError): warn_deprecated('1.0.0', pending=False)
def test_undefined_deprecation_schedule() ->None: """This test is expected to fail until we defined a deprecation schedule.""" with pytest.raises(NotImplementedError): warn_deprecated('1.0.0', pending=False)
This test is expected to fail until we defined a deprecation schedule.
_parse_kv_pairs
result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else '' value = kv_pair.value.content if kv_pair.value else '' result.append((key, value)) return result
def _parse_kv_pairs(self, kv_pairs: List[Any]) ->List[Any]: result = [] for kv_pair in kv_pairs: key = kv_pair.key.content if kv_pair.key else '' value = kv_pair.value.content if kv_pair.value else '' result.append((key, value)) return result
null
from_llm
"""Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt) return cls(qa_chain=qa_chain, ngql_generation_chain=ngql_generation_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate= CYPHER_QA_PROMPT, ngql_prompt: BasePromptTemplate= NGQL_GENERATION_PROMPT, **kwargs: Any) ->NebulaGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) ngql_generation_chain = L...
Initialize from LLM.
seq_naive_rag_alt
context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) return Context.setter('input') | {'context': retriever | Context.setter( 'context'), 'question': RunnablePas...
def seq_naive_rag_alt() ->Runnable: context = ['Hi there!', 'How are you?', "What's your name?"] retriever = RunnableLambda(lambda x: context) prompt = PromptTemplate.from_template('{context} {question}') llm = FakeListLLM(responses=['hello']) return Context.setter('input') | {'context': retriever |...
null
_run
"""Run the tool.""" return self.requests_wrapper.get(_clean_url(url))
def _run(self, url: str, run_manager: Optional[CallbackManagerForToolRun]=None ) ->str: """Run the tool.""" return self.requests_wrapper.get(_clean_url(url))
Run the tool.
similarity_search
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fet...
def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. ...
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults...
_create_chat_result
generations = [] for choice in response['choices']: message = ChatMlflow._convert_dict_to_message(choice['message']) usage = choice.get('usage', {}) gen = ChatGeneration(message=message, generation_info=usage) generations.append(gen) usage = response.get('usage', {}) return ChatResult(generations=genera...
@staticmethod def _create_chat_result(response: Mapping[str, Any]) ->ChatResult: generations = [] for choice in response['choices']: message = ChatMlflow._convert_dict_to_message(choice['message']) usage = choice.get('usage', {}) gen = ChatGeneration(message=message, generation_info=usag...
null
similarity_search_by_vector_with_score
"""Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self._namespace docs = [] results = self._index.query([embedding], top_k=k, include_metadata=True, namespace=namespace, filter=filter) for res in results['matches']: metadata = res['metadata'] ...
def similarity_search_by_vector_with_score(self, embedding: List[float], *, k: int=4, filter: Optional[dict]=None, namespace: Optional[str]=None ) ->List[Tuple[Document, float]]: """Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self....
Return pinecone documents most similar to embedding, along with scores.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_key': self.model_key}, **{'model_url_slug': self. model_url_slug}, **{'model_kwargs': self.model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_key': self.model_key}, **{'model_url_slug': self. model_url_slug}, **{'model_kwargs': self.model_kwargs}}
Get the identifying parameters.
mock_collection_config
return CollectionConfig(name='test_collection', description= 'Test Collection', metadata={'key': 'value'}, embedding_dimensions= VECTOR_DIMS, is_auto_embedded=True)
@pytest.fixture def mock_collection_config() ->CollectionConfig: return CollectionConfig(name='test_collection', description= 'Test Collection', metadata={'key': 'value'}, embedding_dimensions= VECTOR_DIMS, is_auto_embedded=True)
null
validate_environment
"""Validate that api key and python package exists in environment.""" gooseai_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')) values['gooseai_api_key'] = gooseai_api_key try: import openai openai.api_key = gooseai_api_key.get_secret_value() openai.api_...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = convert_to_secret_str(get_from_dict_or_env(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')) values['gooseai_api_key'] = gooseai_api_key try:...
Validate that api key and python package exists in environment.
from_univariate_prompt
"""instantiation depends on component chains *Security note*: The building blocks of this class include the implementation of an AI technique that generates SQL code. If those SQL commands are executed, it's critical to ensure they use credentials that are narrowly-scoped to...
@classmethod def from_univariate_prompt(cls, llm: BaseLanguageModel, **kwargs: Any ) ->CPALChain: """instantiation depends on component chains *Security note*: The building blocks of this class include the implementation of an AI technique that generates SQL code. If those SQL commands ...
instantiation depends on component chains *Security note*: The building blocks of this class include the implementation of an AI technique that generates SQL code. If those SQL commands are executed, it's critical to ensure they use credentials that are narrowly-scoped to only include the permissions this ...
input_keys
"""Input keys.""" return self.the_input_keys
@property def input_keys(self) ->List[str]: """Input keys.""" return self.the_input_keys
Input keys.
get_output_schema
return create_model('ChainOutput', **{k: (Any, None) for k in self.output_keys} )
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: return create_model('ChainOutput', **{k: (Any, None) for k in self. output_keys})
null
get_parser
"""Get a parser by parser name.""" if parser_name not in _REGISTRY: raise ValueError(f'Unknown parser combination: {parser_name}') return _REGISTRY[parser_name]()
def get_parser(parser_name: str) ->BaseBlobParser: """Get a parser by parser name.""" if parser_name not in _REGISTRY: raise ValueError(f'Unknown parser combination: {parser_name}') return _REGISTRY[parser_name]()
Get a parser by parser name.
_generate_helper
try: output = self._generate(prompts, stop=stop, run_manager=run_managers[0] if run_managers else None, **kwargs ) if new_arg_supported else self._generate(prompts, stop=stop) except BaseException as e: for run_manager in run_managers: run_manager.on_llm_error(e, response=LLMResult(gener...
def _generate_helper(self, prompts: List[str], stop: Optional[List[str]], run_managers: List[CallbackManagerForLLMRun], new_arg_supported: bool, **kwargs: Any) ->LLMResult: try: output = self._generate(prompts, stop=stop, run_manager= run_managers[0] if run_managers else None, **kwargs ...
null
lc_secrets
return {'konko_api_key': 'KONKO_API_KEY', 'openai_api_key': 'OPENAI_API_KEY'}
@property def lc_secrets(self) ->Dict[str, str]: return {'konko_api_key': 'KONKO_API_KEY', 'openai_api_key': 'OPENAI_API_KEY'}
null
color_mapping
return get_color_mapping([tool.name for tool in self.agent_executor.tools], excluded_colors=['green', 'red'])
@property def color_mapping(self) ->Dict[str, str]: return get_color_mapping([tool.name for tool in self.agent_executor. tools], excluded_colors=['green', 'red'])
null
test_pairwise_embedding_distance_eval_chain_cosine_similarity
"""Test the cosine similarity.""" pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance .COSINE) result = pairwise_embedding_distance_eval_chain._compute_score(np.array( vectors)) expected = 1.0 - np.dot(vectors[0], vectors[1]) / (np.linalg.norm(vectors[0 ]) * np.linalg.norm(vectors[1]...
@pytest.mark.requires('scipy') def test_pairwise_embedding_distance_eval_chain_cosine_similarity( pairwise_embedding_distance_eval_chain: PairwiseEmbeddingDistanceEvalChain, vectors: Tuple[np.ndarray, np.ndarray] ) ->None: """Test the cosine similarity.""" pairwise_embedding_distance_eval_chain.dist...
Test the cosine similarity.
_warn_on_import
"""Warn on import of deprecated module.""" if _is_interactive_env(): return if replacement: warnings.warn( f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instead.' ) else: warnings.warn( f'Importing {name} from langchain root module is ...
def _warn_on_import(name: str, replacement: Optional[str]=None) ->None: """Warn on import of deprecated module.""" if _is_interactive_env(): return if replacement: warnings.warn( f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instea...
Warn on import of deprecated module.
__or__
if isinstance(other, RunnableSequence): return RunnableSequence(self.first, *self.middle, self.last, other. first, *other.middle, other.last, name=self.name or other.name) else: return RunnableSequence(self.first, *self.middle, self.last, coerce_to_runnable(other), name=self.name)
def __or__(self, other: Union[Runnable[Any, Other], Callable[[Any], Other], Callable[[Iterator[Any]], Iterator[Other]], Mapping[str, Union[Runnable [Any, Other], Callable[[Any], Other], Any]]]) ->RunnableSerializable[ Input, Other]: if isinstance(other, RunnableSequence): return RunnableSequence...
null
transform
yield from self._transform_stream_with_config(input, self._transform, config, **kwargs)
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Any) ->Iterator[Dict[str, Any]]: yield from self._transform_stream_with_config(input, self._transform, config, **kwargs)
null
test_prompt_jinja2_functionality
prefix = 'Starting with {{ foo }}' suffix = 'Ending with {{ bar }}' prompt = FewShotPromptTemplate(input_variables=['foo', 'bar'], suffix= suffix, prefix=prefix, examples=example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[0], template_format='jinja2') output = prompt.format(foo='hello', bar='bye') e...
@pytest.mark.requires('jinja2') def test_prompt_jinja2_functionality(example_jinja2_prompt: Tuple[ PromptTemplate, List[Dict[str, str]]]) ->None: prefix = 'Starting with {{ foo }}' suffix = 'Ending with {{ bar }}' prompt = FewShotPromptTemplate(input_variables=['foo', 'bar'], suffix= suffix, pre...
null
load_local
"""Load the local specified table of standalone vearch. Returns: Success or failure of loading the local specified table """ if not path_or_url: raise ValueError('No metadata path!!!') if not table_name: raise ValueError('No table name!!!') table_path = os.path.join(path_or_url, tabl...
@classmethod def load_local(cls, embedding: Embeddings, path_or_url: Optional[str]=None, table_name: str=_DEFAULT_TABLE_NAME, db_name: str= _DEFAULT_CLUSTER_DB_NAME, flag: int=_DEFAULT_VERSION, **kwargs: Any ) ->Vearch: """Load the local specified table of standalone vearch. Returns: ...
Load the local specified table of standalone vearch. Returns: Success or failure of loading the local specified table
_default_params
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route': self. route, **self.params.dict() if self.params else {}} return params
@property def _default_params(self) ->Dict[str, Any]: params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route': self.route, **self.params.dict() if self.params else {}} return params
null
__init__
""" Initialize with a PyVespa client. """ try: from vespa.application import Vespa except ImportError: raise ImportError( 'Could not import Vespa python package. Please install it with `pip install pyvespa`.' ) if not isinstance(app, Vespa): raise ValueError( f'app sh...
def __init__(self, app: Any, embedding_function: Optional[Embeddings]=None, page_content_field: Optional[str]=None, embedding_field: Optional[str]= None, input_field: Optional[str]=None, metadata_fields: Optional[List[ str]]=None) ->None: """ Initialize with a PyVespa client. """ try...
Initialize with a PyVespa client.
test_astradb_vectorstore_create_delete
"""Create and delete.""" emb = SomeEmbeddings(dimension=2) v_store = AstraDB(embedding=emb, collection_name='lc_test_1', token=os. environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[ 'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')) v_store.delete_collection() from astrapy.db ...
def test_astradb_vectorstore_create_delete(self) ->None: """Create and delete.""" emb = SomeEmbeddings(dimension=2) v_store = AstraDB(embedding=emb, collection_name='lc_test_1', token=os. environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[ 'ASTRA_DB_API_ENDPOINT'], namespace=os.e...
Create and delete.
add_texts
create_index(texts, self.index, self.embeddings, self.sparse_encoder, ids= ids, metadatas=metadatas, namespace=namespace)
def add_texts(self, texts: List[str], ids: Optional[List[str]]=None, metadatas: Optional[List[dict]]=None, namespace: Optional[str]=None ) ->None: create_index(texts, self.index, self.embeddings, self.sparse_encoder, ids=ids, metadatas=metadatas, namespace=namespace)
null
load_memory_variables
"""Return history buffer.""" if self.return_messages: final_buffer: Any = self.buffer else: final_buffer = get_buffer_string(self.buffer, human_prefix=self. human_prefix, ai_prefix=self.ai_prefix) return {self.memory_key: final_buffer}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: """Return history buffer.""" if self.return_messages: final_buffer: Any = self.buffer else: final_buffer = get_buffer_string(self.buffer, human_prefix=self. human_prefix, ai_prefix=self.ai_prefix) retur...
Return history buffer.
_call
"""Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python ...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when g...
Call out to Writer's completions endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.")
test_document_found
"""Test document found.""" _dict = {'foo': Document(page_content='bar')} docstore = InMemoryDocstore(_dict) output = docstore.search('foo') assert isinstance(output, Document) assert output.page_content == 'bar'
def test_document_found() ->None: """Test document found.""" _dict = {'foo': Document(page_content='bar')} docstore = InMemoryDocstore(_dict) output = docstore.search('foo') assert isinstance(output, Document) assert output.page_content == 'bar'
Test document found.
__init__
self.openai_api_key = openai_api_key or get_from_env('openai_api_key', 'OPENAI_API_KEY') self.openai_api_model = openai_api_model or get_from_env('openai_api_model', 'OPENAI_API_MODEL')
def __init__(self, openai_api_key: Optional[str]=None, openai_api_model: Optional[str]=None) ->None: self.openai_api_key = openai_api_key or get_from_env('openai_api_key', 'OPENAI_API_KEY') self.openai_api_model = openai_api_model or get_from_env('openai_api_model' , 'OPENAI_API_MODEL')
null
embed_documents
"""Return simple embeddings. Embeddings encode each text as its index.""" return [([float(1.0)] * 9 + [float(i)]) for i in range(len(texts))]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Return simple embeddings. Embeddings encode each text as its index.""" return [([float(1.0)] * 9 + [float(i)]) for i in range(len(texts))]
Return simple embeddings. Embeddings encode each text as its index.
_get_figma_file
"""Get Figma file from Figma REST API.""" headers = {'X-Figma-Token': self.access_token} request = urllib.request.Request(self._construct_figma_api_url(), headers= headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.read().decode()) return json_data
def _get_figma_file(self) ->Any: """Get Figma file from Figma REST API.""" headers = {'X-Figma-Token': self.access_token} request = urllib.request.Request(self._construct_figma_api_url(), headers=headers) with urllib.request.urlopen(request) as response: json_data = json.loads(response.r...
Get Figma file from Figma REST API.
_get_google_cloud_texttospeech
return GoogleCloudTextToSpeechTool(**kwargs)
def _get_google_cloud_texttospeech(**kwargs: Any) ->BaseTool: return GoogleCloudTextToSpeechTool(**kwargs)
null
on_chain_start
"""Run when chain starts running.""" aim = import_aim() self.step += 1 self.chain_starts += 1 self.starts += 1 resp = {'action': 'on_chain_start'} resp.update(self.get_custom_callback_meta()) inputs_res = deepcopy(inputs) self._run.track(aim.Text(inputs_res['input']), name='on_chain_start', context=resp)
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) ->None: """Run when chain starts running.""" aim = import_aim() self.step += 1 self.chain_starts += 1 self.starts += 1 resp = {'action': 'on_chain_start'} resp.update(self.get_custom_callback_meta...
Run when chain starts running.
elasticsearch_url
"""Return the elasticsearch url.""" from elasticsearch import Elasticsearch url = 'http://localhost:9200' yield url es = Elasticsearch(hosts=url) index_names = es.indices.get(index='_all').keys() for index_name in index_names: es.indices.delete(index=index_name)
@pytest.fixture(scope='class', autouse=True) def elasticsearch_url(self) ->Union[str, Generator[str, None, None]]: """Return the elasticsearch url.""" from elasticsearch import Elasticsearch url = 'http://localhost:9200' yield url es = Elasticsearch(hosts=url) index_names = es.indices.get(index=...
Return the elasticsearch url.
tool_run_logging_kwargs
return {'llm_prefix': '', 'observation_prefix': '' if len(self.stop) == 0 else self.stop[0]}
def tool_run_logging_kwargs(self) ->Dict: return {'llm_prefix': '', 'observation_prefix': '' if len(self.stop) == 0 else self.stop[0]}
null
_llm_type
"""Return type of llm.""" return 'fake-list'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'fake-list'
Return type of llm.
on_llm_end
if parent_run_id is None: self.increment()
def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
test_valid_call
"""Test valid call of LLM chain.""" output = fake_llm_chain({'bar': 'baz'}) assert output == {'bar': 'baz', 'text1': 'foo'} output = fake_llm_chain({'bar': 'baz', 'stop': ['foo']}) assert output == {'bar': 'baz', 'stop': ['foo'], 'text1': 'bar'}
def test_valid_call(fake_llm_chain: LLMChain) ->None: """Test valid call of LLM chain.""" output = fake_llm_chain({'bar': 'baz'}) assert output == {'bar': 'baz', 'text1': 'foo'} output = fake_llm_chain({'bar': 'baz', 'stop': ['foo']}) assert output == {'bar': 'baz', 'stop': ['foo'], 'text1': 'bar'}
Test valid call of LLM chain.
_stream
params = self._prepare_params(stop=stop, stream=True, **kwargs) for stream_resp in completion_with_retry(self, [prompt], stream=True, is_gemini=self._is_gemini_model, run_manager=run_manager, **params): chunk = self._response_to_generation(stream_resp) yield chunk if run_manager: run_manager.on_...
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) for stream_resp in completion_with_retry(self, [prompt], stream=True, ...
null
_type
return 'agent_trajectory'
@property def _type(self) ->str: return 'agent_trajectory'
null
test_get_all_10sec_timeout
start_time = time.time() contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c' with pytest.raises(RuntimeError): BlockchainDocumentLoader(contract_address=contract_address, blockchainType=BlockchainType.ETH_MAINNET, api_key=os.environ[ 'ALCHEMY_API_KEY'], get_all_tokens=True, max_execution...
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.') def test_get_all_10sec_timeout() ->None: start_time = time.time() contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c' with pytest.raises(RuntimeError): BlockchainDocumentLoader(contract_address=contract_addres...
null
test_math_question_infinite_loop
"""Test simple question.""" question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?""" prompt = MATH_PROMPT.format(question=question) queries = {prompt: _MATH_SOLUTION_INFINITE_LO...
def test_math_question_infinite_loop() ->None: """Test simple question.""" question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?""" prompt = MATH_PROMPT.format(quest...
Test simple question.
test_pgvector_retriever_search_threshold_custom_normalization_fn
"""Test searching with threshold and custom normalization function""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connecti...
def test_pgvector_retriever_search_threshold_custom_normalization_fn() ->None: """Test searching with threshold and custom normalization function""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= ...
Test searching with threshold and custom normalization function
mget
"""Get the values associated with the given keys.""" return [(value.encode('utf-8') if value is not None else None) for value in self.underlying_store.mget(keys)]
def mget(self, keys: Sequence[str]) ->List[Optional[bytes]]: """Get the values associated with the given keys.""" return [(value.encode('utf-8') if value is not None else None) for value in self.underlying_store.mget(keys)]
Get the values associated with the given keys.
load
""" :param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this labe...
def load(self, space_key: Optional[str]=None, page_ids: Optional[List[str]] =None, label: Optional[str]=None, cql: Optional[str]=None, include_restricted_content: bool=False, include_archived_content: bool= False, include_attachments: bool=False, include_comments: bool=False, content_format: ContentForm...
:param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this label, defaults to None :type label: Optional[st...
load
"""Download a selected dataset. Returns: a list of Documents. """ return list(self.lazy_load())
def load(self) ->List[Document]: """Download a selected dataset. Returns: a list of Documents. """ return list(self.lazy_load())
Download a selected dataset. Returns: a list of Documents.
_clean_response
return re.sub(f'^{self.name} ', '', text.strip()).strip()
def _clean_response(self, text: str) ->str: return re.sub(f'^{self.name} ', '', text.strip()).strip()
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
test_mosaicml_embedding_documents
"""Test MosaicML embeddings.""" documents = ['foo bar'] embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768
def test_mosaicml_embedding_documents() ->None: """Test MosaicML embeddings.""" documents = ['foo bar'] embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768
Test MosaicML embeddings.
test_from_documents_inner_product
"""Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}), Document(page_content='Cats have fluff.', metadata={'b': 1}), Document( page_content='What is a sandwich?', metadata={'c': 1}), Document( page_content='That fence is purple.', metadata={'...
def test_from_documents_inner_product(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: """Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1} ), Document(page_content='Cats have fluff.', metadata={'b': 1}), ...
Test end to end construction and search.