method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_import_openweathermap
|
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
return OpenWeatherMapAPIWrapper
|
def _import_openweathermap() ->Any:
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
return OpenWeatherMapAPIWrapper
| null |
lazy_parse
|
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
|
Extract the first character of a blob.
|
_get_examples
|
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError
|
def _get_examples(self, **kwargs: Any) ->List[dict]:
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError
| null |
results
|
"""Run query through GoogleSearch."""
return self._google_serper_api_results(query, gl=self.gl, hl=self.hl, num=
self.k, tbs=self.tbs, search_type=self.type, **kwargs)
|
def results(self, query: str, **kwargs: Any) ->Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(query, gl=self.gl, hl=self.hl,
num=self.k, tbs=self.tbs, search_type=self.type, **kwargs)
|
Run query through GoogleSearch.
|
input_keys
|
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
return [self.input_key]
| null |
test_simple_pipeline
|
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
output = pipeline_prompt.format(foo='jim')
assert output == 'jim'
|
def test_simple_pipeline() ->None:
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
output = pipeline_prompt.format(foo='jim')
assert output == 'jim'
| null |
random_index_name
|
return f'langchain-test-index-{random_string()}'
|
@pytest.fixture(scope='function')
def random_index_name() ->str:
return f'langchain-test-index-{random_string()}'
| null |
__init__
|
"""Create a new graph."""
try:
import networkx as nx
except ImportError:
raise ImportError(
'Could not import networkx python package. Please install it with `pip install networkx`.'
)
if graph is not None:
if not isinstance(graph, nx.DiGraph):
raise ValueError('Passed in graph is not of correct shape')
self._graph = graph
else:
self._graph = nx.DiGraph()
|
def __init__(self, graph: Optional[Any]=None) ->None:
"""Create a new graph."""
try:
import networkx as nx
except ImportError:
raise ImportError(
'Could not import networkx python package. Please install it with `pip install networkx`.'
)
if graph is not None:
if not isinstance(graph, nx.DiGraph):
raise ValueError('Passed in graph is not of correct shape')
self._graph = graph
else:
self._graph = nx.DiGraph()
|
Create a new graph.
|
completion_with_retry
|
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return llm.client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return llm.client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
transform
|
runnable, config = self._prepare(config)
return runnable.transform(input, config, **kwargs)
|
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig
]=None, **kwargs: Optional[Any]) ->Iterator[Output]:
runnable, config = self._prepare(config)
return runnable.transform(input, config, **kwargs)
| null |
embed_documents
|
"""Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f'{self.embed_instruction}{text}' for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f'{self.embed_instruction}{text}' for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
|
Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
test_api_key_masked_when_passed_via_constructor
|
llm = AlephAlpha(aleph_alpha_api_key='secret-api-key')
print(llm.aleph_alpha_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('aleph_alpha_client')
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = AlephAlpha(aleph_alpha_api_key='secret-api-key')
print(llm.aleph_alpha_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
_run
|
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(since='0.0.319', message=
'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details'
)
return self.api_wrapper.list_as_str()
|
def _run(self, _: str='', run_manager: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(since='0.0.319', message=
'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details'
)
return self.api_wrapper.list_as_str()
|
Use the Zapier NLA tool to return a list of all exposed user actions.
|
__deepcopy__
|
return self
|
def __deepcopy__(self, memo: dict) ->'FakeCallbackHandler':
return self
| null |
test_chat_ai_endpoints_model
|
"""Test wrapper handles model."""
chat = ChatNVIDIA(model='mistral')
assert chat.model == 'mistral'
|
def test_chat_ai_endpoints_model() ->None:
"""Test wrapper handles model."""
chat = ChatNVIDIA(model='mistral')
assert chat.model == 'mistral'
|
Test wrapper handles model.
|
test_get_code_lines_mixed_blocks
|
text = """
Unrelated text
```bash
echo hello
ls && pwd && ls
```
```python
print("hello")
```
```bash
echo goodbye
```
"""
code_lines = output_parser.parse_folder(text)
assert code_lines == ['echo hello', 'ls && pwd && ls', 'echo goodbye']
|
def test_get_code_lines_mixed_blocks(output_parser: BashOutputParser) ->None:
text = """
Unrelated text
```bash
echo hello
ls && pwd && ls
```
```python
print("hello")
```
```bash
echo goodbye
```
"""
code_lines = output_parser.parse_folder(text)
assert code_lines == ['echo hello', 'ls && pwd && ls', 'echo goodbye']
| null |
test_find_all_links_ignore_prefix
|
html = 'href="{prefix}foobar"'
for prefix in PREFIXES_TO_IGNORE:
actual = find_all_links(html.format(prefix=prefix))
assert actual == []
html = 'href="foobar{prefix}more"'
for prefix in PREFIXES_TO_IGNORE:
if prefix == '#':
continue
actual = find_all_links(html.format(prefix=prefix))
assert actual == [f'foobar{prefix}more']
|
def test_find_all_links_ignore_prefix() ->None:
html = 'href="{prefix}foobar"'
for prefix in PREFIXES_TO_IGNORE:
actual = find_all_links(html.format(prefix=prefix))
assert actual == []
html = 'href="foobar{prefix}more"'
for prefix in PREFIXES_TO_IGNORE:
if prefix == '#':
continue
actual = find_all_links(html.format(prefix=prefix))
assert actual == [f'foobar{prefix}more']
| null |
on_tool_end
|
self._container.markdown(f'**{output}**')
|
def on_tool_end(self, output: str, color: Optional[str]=None,
observation_prefix: Optional[str]=None, llm_prefix: Optional[str]=None,
**kwargs: Any) ->None:
self._container.markdown(f'**{output}**')
| null |
test_initialization_with_string_layer
|
layer_url = 'https://example.com/layer_url'
with patch('arcgis.features.FeatureLayer', return_value=mock_feature_layer):
loader = ArcGISLoader(layer=layer_url, gis=mock_gis)
assert loader.url == layer_url
|
def test_initialization_with_string_layer(arcgis_mocks, mock_feature_layer,
mock_gis):
layer_url = 'https://example.com/layer_url'
with patch('arcgis.features.FeatureLayer', return_value=mock_feature_layer
):
loader = ArcGISLoader(layer=layer_url, gis=mock_gis)
assert loader.url == layer_url
| null |
test_initialization
|
"""Test chat model initialization."""
ChatAnthropicMessages(model_name='claude-instant-1.2', anthropic_api_key='xyz')
|
def test_initialization() ->None:
"""Test chat model initialization."""
ChatAnthropicMessages(model_name='claude-instant-1.2',
anthropic_api_key='xyz')
|
Test chat model initialization.
|
label
|
"""The expander's label string."""
return self._label
|
@property
def label(self) ->str:
"""The expander's label string."""
return self._label
|
The expander's label string.
|
save_context
|
"""Save the context of this chain run to memory."""
|
@abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Save the context of this chain run to memory."""
|
Save the context of this chain run to memory.
|
load
|
return list(self._client.lazy_load_docs(self.query))
|
def load(self) ->List[Document]:
return list(self._client.lazy_load_docs(self.query))
| null |
_replace_new_line
|
value = match.group(2)
value = re.sub('\\n', '\\\\n', value)
value = re.sub('\\r', '\\\\r', value)
value = re.sub('\\t', '\\\\t', value)
value = re.sub('(?<!\\\\)"', '\\"', value)
return match.group(1) + value + match.group(3)
|
def _replace_new_line(match: re.Match[str]) ->str:
value = match.group(2)
value = re.sub('\\n', '\\\\n', value)
value = re.sub('\\r', '\\\\r', value)
value = re.sub('\\t', '\\\\t', value)
value = re.sub('(?<!\\\\)"', '\\"', value)
return match.group(1) + value + match.group(3)
| null |
_parse_response
|
return response['public_id']
|
def _parse_response(self, response: dict) ->str:
return response['public_id']
| null |
test_marqo_with_scores
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas,
index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, api_key=
DEFAULT_MARQO_API_KEY, verbose=False)
results = marqo_search.similarity_search_with_score('foo', k=3)
docs = [r[0] for r in results]
scores = [r[1] for r in results]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert scores[0] > scores[1] > scores[2]
|
def test_marqo_with_scores(client: Marqo) ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas,
index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, api_key=
DEFAULT_MARQO_API_KEY, verbose=False)
results = marqo_search.similarity_search_with_score('foo', k=3)
docs = [r[0] for r in results]
scores = [r[1] for r in results]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert scores[0] > scores[1] > scores[2]
|
Test end to end construction and search with scores and IDs.
|
log
|
...
|
@abstractmethod
def log(self, event: TEvent) ->None:
...
| null |
img_prompt_func
|
"""
Ollama prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
"""
messages = []
if data_dict['context']['images']:
for image in data_dict['context']['images'][:num_images]:
image_message = {'type': 'image_url', 'image_url':
f'data:image/jpeg;base64,{image}'}
messages.append(image_message)
text_message = {'type': 'text', 'text':
"""You are a helpful assistant that gives a description of food pictures.
Give a detailed summary of the image.
"""
}
messages.append(text_message)
return [HumanMessage(content=messages)]
|
def img_prompt_func(data_dict, num_images=1):
"""
Ollama prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
"""
messages = []
if data_dict['context']['images']:
for image in data_dict['context']['images'][:num_images]:
image_message = {'type': 'image_url', 'image_url':
f'data:image/jpeg;base64,{image}'}
messages.append(image_message)
text_message = {'type': 'text', 'text':
"""You are a helpful assistant that gives a description of food pictures.
Give a detailed summary of the image.
"""
}
messages.append(text_message)
return [HumanMessage(content=messages)]
|
Ollama prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
|
__init__
|
self.queue = asyncio.Queue()
self.done = asyncio.Event()
|
def __init__(self) ->None:
self.queue = asyncio.Queue()
self.done = asyncio.Event()
| null |
validate_environment
|
"""Validate that the python package exists in environment."""
is_gemini = is_gemini_model(values['model_name'])
cls._try_init_vertexai(values)
try:
from vertexai.language_models import ChatModel, CodeChatModel
if is_gemini:
from vertexai.preview.generative_models import GenerativeModel
except ImportError:
raise_vertex_import_error()
if is_gemini:
values['client'] = GenerativeModel(model_name=values['model_name'])
else:
if is_codey_model(values['model_name']):
model_cls = CodeChatModel
else:
model_cls = ChatModel
values['client'] = model_cls.from_pretrained(values['model_name'])
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in environment."""
is_gemini = is_gemini_model(values['model_name'])
cls._try_init_vertexai(values)
try:
from vertexai.language_models import ChatModel, CodeChatModel
if is_gemini:
from vertexai.preview.generative_models import GenerativeModel
except ImportError:
raise_vertex_import_error()
if is_gemini:
values['client'] = GenerativeModel(model_name=values['model_name'])
else:
if is_codey_model(values['model_name']):
model_cls = CodeChatModel
else:
model_cls = ChatModel
values['client'] = model_cls.from_pretrained(values['model_name'])
return values
|
Validate that the python package exists in environment.
|
test_cosine_similarity_zero
|
X = np.zeros((3, 3))
Y = np.random.random((3, 3))
expected = np.zeros((3, 3))
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
|
def test_cosine_similarity_zero() ->None:
X = np.zeros((3, 3))
Y = np.random.random((3, 3))
expected = np.zeros((3, 3))
actual = cosine_similarity(X, Y)
assert np.allclose(expected, actual)
| null |
on_tool_error_common
|
self.errors += 1
|
def on_tool_error_common(self) ->None:
self.errors += 1
| null |
from_texts
|
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Example:
.. code-block:: python
from langchain_community.vectorstores import Pinecone
from langchain_community.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs)
pinecone.add_texts(texts, metadatas=metadatas, ids=ids, namespace=namespace,
batch_size=batch_size, embedding_chunk_size=embeddings_chunk_size, **
upsert_kwargs or {})
return pinecone
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None, batch_size:
int=32, text_key: str='text', namespace: Optional[str]=None, index_name:
Optional[str]=None, upsert_kwargs: Optional[dict]=None, pool_threads:
int=4, embeddings_chunk_size: int=1000, **kwargs: Any) ->Pinecone:
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Example:
.. code-block:: python
from langchain_community.vectorstores import Pinecone
from langchain_community.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs)
pinecone.add_texts(texts, metadatas=metadatas, ids=ids, namespace=
namespace, batch_size=batch_size, embedding_chunk_size=
embeddings_chunk_size, **upsert_kwargs or {})
return pinecone
|
Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Example:
.. code-block:: python
from langchain_community.vectorstores import Pinecone
from langchain_community.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
|
from_files
|
"""Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_files(
files_list,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
"""
vectara = cls(**kwargs)
vectara.add_files(files, metadatas)
return vectara
|
@classmethod
def from_files(cls: Type[Vectara], files: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, **kwargs: Any
) ->Vectara:
"""Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_files(
files_list,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
"""
vectara = cls(**kwargs)
vectara.add_files(files, metadatas)
return vectara
|
Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_files(
files_list,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
|
_ensure_cache_exists
|
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(
f'Unexpected response cache creation: {create_cache_response}')
|
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str
) ->None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(
f'Unexpected response cache creation: {create_cache_response}')
|
Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
|
format_place_details
|
try:
name = place_details.get('result', {}).get('name', 'Unknown')
address = place_details.get('result', {}).get('formatted_address',
'Unknown')
phone_number = place_details.get('result', {}).get('formatted_phone_number'
, 'Unknown')
website = place_details.get('result', {}).get('website', 'Unknown')
place_id = place_details.get('result', {}).get('place_id', 'Unknown')
formatted_details = f"""{name}
Address: {address}
Google place ID: {place_id}
Phone: {phone_number}
Website: {website}
"""
return formatted_details
except Exception as e:
logging.error(f'An error occurred while formatting place details: {e}')
return None
|
def format_place_details(self, place_details: Dict[str, Any]) ->Optional[str]:
try:
name = place_details.get('result', {}).get('name', 'Unknown')
address = place_details.get('result', {}).get('formatted_address',
'Unknown')
phone_number = place_details.get('result', {}).get(
'formatted_phone_number', 'Unknown')
website = place_details.get('result', {}).get('website', 'Unknown')
place_id = place_details.get('result', {}).get('place_id', 'Unknown')
formatted_details = f"""{name}
Address: {address}
Google place ID: {place_id}
Phone: {phone_number}
Website: {website}
"""
return formatted_details
except Exception as e:
logging.error(f'An error occurred while formatting place details: {e}')
return None
| null |
is_async
|
"""Whether the callback manager is async."""
return False
|
@property
def is_async(self) ->bool:
"""Whether the callback manager is async."""
return False
|
Whether the callback manager is async.
|
__init__
|
"""
Args:
query: The query to execute.
database: The database to connect to. Defaults to ":memory:".
read_only: Whether to open the database in read-only mode.
Defaults to False.
config: A dictionary of configuration options to pass to the database.
Optional.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
"""
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
|
def __init__(self, query: str, database: str=':memory:', read_only: bool=
False, config: Optional[Dict[str, str]]=None, page_content_columns:
Optional[List[str]]=None, metadata_columns: Optional[List[str]]=None):
"""
Args:
query: The query to execute.
database: The database to connect to. Defaults to ":memory:".
read_only: Whether to open the database in read-only mode.
Defaults to False.
config: A dictionary of configuration options to pass to the database.
Optional.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
"""
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
|
Args:
query: The query to execute.
database: The database to connect to. Defaults to ":memory:".
read_only: Whether to open the database in read-only mode.
Defaults to False.
config: A dictionary of configuration options to pass to the database.
Optional.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
|
load_memory_variables
|
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != '':
first_messages: List[BaseMessage] = [self.summary_message_cls(content=
self.moving_summary_buffer)]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(buffer, human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix)
return {self.memory_key: final_buffer}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != '':
first_messages: List[BaseMessage] = [self.summary_message_cls(
content=self.moving_summary_buffer)]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(buffer, human_prefix=self.
human_prefix, ai_prefix=self.ai_prefix)
return {self.memory_key: final_buffer}
|
Return history buffer.
|
test_pipelineai_call
|
"""Test valid call to Pipeline Cloud."""
llm = PipelineAI()
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_pipelineai_call() ->None:
"""Test valid call to Pipeline Cloud."""
llm = PipelineAI()
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to Pipeline Cloud.
|
_format_chat_history
|
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
test_llm_chain_filter
|
texts = ['What happened to all of my cookies?',
'I wish there were better Italian restaurants in my neighborhood.',
'My favorite color is green']
docs = [Document(page_content=t) for t in texts]
relevant_filter = LLMChainFilter.from_llm(llm=ChatOpenAI())
actual = relevant_filter.compress_documents(docs,
'Things I said related to food')
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])) == 2
|
def test_llm_chain_filter() ->None:
texts = ['What happened to all of my cookies?',
'I wish there were better Italian restaurants in my neighborhood.',
'My favorite color is green']
docs = [Document(page_content=t) for t in texts]
relevant_filter = LLMChainFilter.from_llm(llm=ChatOpenAI())
actual = relevant_filter.compress_documents(docs,
'Things I said related to food')
assert len(actual) == 2
assert len(set(texts[:2]).intersection([d.page_content for d in actual])
) == 2
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_import_huggingface_endpoint
|
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
|
def _import_huggingface_endpoint() ->Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
| null |
on_chain_end
|
"""Run when chain ends running."""
|
def on_chain_end(self, outputs: Dict[str, Any], *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run when chain ends running."""
|
Run when chain ends running.
|
check_package_version
|
"""Check the version of a package."""
imported_version = parse(version(package))
if lt_version is not None and imported_version >= parse(lt_version):
raise ValueError(
f'Expected {package} version to be < {lt_version}. Received {imported_version}.'
)
if lte_version is not None and imported_version > parse(lte_version):
raise ValueError(
f'Expected {package} version to be <= {lte_version}. Received {imported_version}.'
)
if gt_version is not None and imported_version <= parse(gt_version):
raise ValueError(
f'Expected {package} version to be > {gt_version}. Received {imported_version}.'
)
if gte_version is not None and imported_version < parse(gte_version):
raise ValueError(
f'Expected {package} version to be >= {gte_version}. Received {imported_version}.'
)
|
def check_package_version(package: str, lt_version: Optional[str]=None,
lte_version: Optional[str]=None, gt_version: Optional[str]=None,
gte_version: Optional[str]=None) ->None:
"""Check the version of a package."""
imported_version = parse(version(package))
if lt_version is not None and imported_version >= parse(lt_version):
raise ValueError(
f'Expected {package} version to be < {lt_version}. Received {imported_version}.'
)
if lte_version is not None and imported_version > parse(lte_version):
raise ValueError(
f'Expected {package} version to be <= {lte_version}. Received {imported_version}.'
)
if gt_version is not None and imported_version <= parse(gt_version):
raise ValueError(
f'Expected {package} version to be > {gt_version}. Received {imported_version}.'
)
if gte_version is not None and imported_version < parse(gte_version):
raise ValueError(
f'Expected {package} version to be >= {gte_version}. Received {imported_version}.'
)
|
Check the version of a package.
|
_get_functions_single_prompt
|
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
suffix_to_use = suffix_to_use.format(df_head=str(df.head(
number_of_head_rows).to_markdown()))
elif include_df_in_prompt:
suffix_to_use = FUNCTIONS_WITH_DF.format(df_head=str(df.head(
number_of_head_rows).to_markdown()))
else:
suffix_to_use = ''
if prefix is None:
prefix = PREFIX_FUNCTIONS
tools = [PythonAstREPLTool(locals={'df': df})]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
|
def _get_functions_single_prompt(df: Any, prefix: Optional[str]=None,
suffix: Optional[str]=None, include_df_in_prompt: Optional[bool]=True,
number_of_head_rows: int=5) ->Tuple[BasePromptTemplate, List[
PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
suffix_to_use = suffix_to_use.format(df_head=str(df.head(
number_of_head_rows).to_markdown()))
elif include_df_in_prompt:
suffix_to_use = FUNCTIONS_WITH_DF.format(df_head=str(df.head(
number_of_head_rows).to_markdown()))
else:
suffix_to_use = ''
if prefix is None:
prefix = PREFIX_FUNCTIONS
tools = [PythonAstREPLTool(locals={'df': df})]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
| null |
_build_search_url
|
url_suffix = get_from_env('', 'AZURE_COGNITIVE_SEARCH_URL_SUFFIX',
DEFAULT_URL_SUFFIX)
base_url = f'https://{self.service_name}.{url_suffix}/'
endpoint_path = (
f'indexes/{self.index_name}/docs?api-version={self.api_version}')
top_param = f'&$top={self.top_k}' if self.top_k else ''
return base_url + endpoint_path + f'&search={query}' + top_param
|
def _build_search_url(self, query: str) ->str:
url_suffix = get_from_env('', 'AZURE_COGNITIVE_SEARCH_URL_SUFFIX',
DEFAULT_URL_SUFFIX)
base_url = f'https://{self.service_name}.{url_suffix}/'
endpoint_path = (
f'indexes/{self.index_name}/docs?api-version={self.api_version}')
top_param = f'&$top={self.top_k}' if self.top_k else ''
return base_url + endpoint_path + f'&search={query}' + top_param
| null |
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_name': self.endpoint_name}, **{'model_kwargs':
_model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_name': self.endpoint_name}, **{'model_kwargs':
_model_kwargs}}
|
Get the identifying parameters.
|
on_chain_end
|
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
handle_event(self.handlers, 'on_chain_end', 'ignore_chain', outputs, run_id
=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
|
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) ->None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
handle_event(self.handlers, 'on_chain_end', 'ignore_chain', outputs,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.
tags, **kwargs)
|
Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
|
__init__
|
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
self.bucket = bucket
self.prefix = prefix
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
|
def __init__(self, bucket: str, prefix: str='', *, region_name: Optional[
str]=None, api_version: Optional[str]=None, use_ssl: Optional[bool]=
True, verify: Union[str, bool, None]=None, endpoint_url: Optional[str]=
None, aws_access_key_id: Optional[str]=None, aws_secret_access_key:
Optional[str]=None, aws_session_token: Optional[str]=None, boto_config:
Optional[botocore.client.Config]=None):
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
self.bucket = bucket
self.prefix = prefix
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
|
Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
|
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter,
**kwargs)
return [doc for doc, _ in docs_and_scores]
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[
str, Any]]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=
filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
test_marqo_search
|
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ['This is document 1', '2', '3']
ids = marqo_search.add_texts(input_documents)
results = marqo_search.marqo_similarity_search('What is the first document?',
k=3)
assert len(ids) == len(input_documents)
assert ids[0] == results['hits'][0]['_id']
|
def test_marqo_search(client: Marqo) ->None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ['This is document 1', '2', '3']
ids = marqo_search.add_texts(input_documents)
results = marqo_search.marqo_similarity_search(
'What is the first document?', k=3)
assert len(ids) == len(input_documents)
assert ids[0] == results['hits'][0]['_id']
| null |
test_visit_comparison
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2'])
expected = 'and(metadata->>foo.lt.1,metadata->>foo.lt.2)'
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1',
'2'])
expected = 'and(metadata->>foo.lt.1,metadata->>foo.lt.2)'
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
_convert_prompt_msg_params
|
if 'streaming' in kwargs:
kwargs['stream'] = kwargs.pop('streaming')
return {**{'prompt': prompt, 'model': self.model}, **self._default_params,
**kwargs}
|
def _convert_prompt_msg_params(self, prompt: str, **kwargs: Any) ->dict:
if 'streaming' in kwargs:
kwargs['stream'] = kwargs.pop('streaming')
return {**{'prompt': prompt, 'model': self.model}, **self.
_default_params, **kwargs}
| null |
similarity_search_with_score_by_vector
|
results: List[dict[str, Any]] = self.storage.search(embedding, k=k,
select_columns=['document'], metadata_filters=filter)
docs = [(Document(page_content=result['document'], metadata=result[
'metadata']), result['distance']) for result in results]
return docs
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, filter: Optional[dict]=None) ->List[Tuple[Document, float]]:
results: List[dict[str, Any]] = self.storage.search(embedding, k=k,
select_columns=['document'], metadata_filters=filter)
docs = [(Document(page_content=result['document'], metadata=result[
'metadata']), result['distance']) for result in results]
return docs
| null |
test_sim_search_by_vector
|
"""Test end to end construction and similarity search by vector."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric=metric)
embedding = [1.0] * 10
output = hnsw_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content='bar')]
|
@pytest.mark.parametrize('metric', ['cosine', 'l2'])
def test_sim_search_by_vector(metric: str, texts: List[str], tmp_path: Path
) ->None:
"""Test end to end construction and similarity search by vector."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric=metric)
embedding = [1.0] * 10
output = hnsw_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content='bar')]
|
Test end to end construction and similarity search by vector.
|
_run
|
"""Use the tool."""
try:
speech_file = self._text2speech(query, self.speech_language)
return speech_file
except Exception as e:
raise RuntimeError(f'Error while running AzureCogsText2SpeechTool: {e}')
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
try:
speech_file = self._text2speech(query, self.speech_language)
return speech_file
except Exception as e:
raise RuntimeError(f'Error while running AzureCogsText2SpeechTool: {e}'
)
|
Use the tool.
|
ignore_retry
|
"""Whether to ignore retry callbacks."""
return False
|
@property
def ignore_retry(self) ->bool:
"""Whether to ignore retry callbacks."""
return False
|
Whether to ignore retry callbacks.
|
validate_model_training_status
|
if self.model_training_status != 'training_complete':
raise Exception(
f'Model {self.model_id} is not ready. Please wait for training to complete.'
)
|
def validate_model_training_status(self) ->None:
if self.model_training_status != 'training_complete':
raise Exception(
f'Model {self.model_id} is not ready. Please wait for training to complete.'
)
| null |
_select_relevance_score_fn
|
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = 'l2'
distance_key = 'hnsw:space'
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == 'cosine':
return self._cosine_relevance_score_fn
elif distance == 'l2':
return self._euclidean_relevance_score_fn
elif distance == 'ip':
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
f'No supported normalization function for distance metric of type: {distance}.Consider providing relevance_score_fn to Chroma constructor.'
)
|
def _select_relevance_score_fn(self) ->Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = 'l2'
distance_key = 'hnsw:space'
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == 'cosine':
return self._cosine_relevance_score_fn
elif distance == 'l2':
return self._euclidean_relevance_score_fn
elif distance == 'ip':
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
f'No supported normalization function for distance metric of type: {distance}.Consider providing relevance_score_fn to Chroma constructor.'
)
|
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
|
test_func_call_invalid
|
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='LLM thoughts.', additional_kwargs={'function_call':
{'name': 'foo', 'arguments': '{42]'}})
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
parser.invoke(msg)
|
def test_func_call_invalid() ->None:
parser = OpenAIFunctionsAgentOutputParser()
msg = AIMessage(content='LLM thoughts.', additional_kwargs={
'function_call': {'name': 'foo', 'arguments': '{42]'}})
err = (
"Could not parse tool input: {'name': 'foo', 'arguments': '{42]'} because the `arguments` is not valid JSON."
)
with pytest.raises(OutputParserException, match=err):
parser.invoke(msg)
| null |
on_retry_common
|
self.retries += 1
|
def on_retry_common(self) ->None:
self.retries += 1
| null |
get_description
|
template: str = (
'Useful for when you need to answer questions about {name}. Whenever you need information about {description} you should ALWAYS use this. Input should be a fully formed question.'
)
return template.format(name=name, description=description)
|
@staticmethod
def get_description(name: str, description: str) ->str:
template: str = (
'Useful for when you need to answer questions about {name}. Whenever you need information about {description} you should ALWAYS use this. Input should be a fully formed question.'
)
return template.format(name=name, description=description)
| null |
tfds_client
|
return TensorflowDatasets(dataset_name='mlqa/en', split_name='test',
load_max_docs=MAX_DOCS, sample_to_document_function=
mlqaen_example_to_document)
|
@pytest.fixture
def tfds_client() ->TensorflowDatasets:
return TensorflowDatasets(dataset_name='mlqa/en', split_name='test',
load_max_docs=MAX_DOCS, sample_to_document_function=
mlqaen_example_to_document)
| null |
_get_metadata
|
return {'source': self.file_path}
|
def _get_metadata(self) ->dict:
return {'source': self.file_path}
| null |
test_from_filesystem_classmethod_with_glob
|
"""Test that glob parameter is taken into account."""
loader = GenericLoader.from_filesystem(toy_dir, glob='*.txt', parser=
AsIsParser())
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'This is a test.txt file.'
|
def test_from_filesystem_classmethod_with_glob(toy_dir: str) ->None:
"""Test that glob parameter is taken into account."""
loader = GenericLoader.from_filesystem(toy_dir, glob='*.txt', parser=
AsIsParser())
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'This is a test.txt file.'
|
Test that glob parameter is taken into account.
|
__hash__
|
return hash((self.id, tuple(self.options.keys()), tuple(self.default)))
|
def __hash__(self) ->int:
return hash((self.id, tuple(self.options.keys()), tuple(self.default)))
| null |
_llm_type
|
"""Return type of llm."""
return 'baidu-qianfan-endpoint'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'baidu-qianfan-endpoint'
|
Return type of llm.
|
_get_relevant_documents
|
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
structured_query = self.query_constructor.invoke({'query': query}, config={
'callbacks': run_manager.get_child()})
if self.verbose:
logger.info(f'Generated Query: {structured_query}')
new_query, search_kwargs = self._prepare_query(query, structured_query)
docs = self._get_docs_with_query(new_query, search_kwargs)
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
structured_query = self.query_constructor.invoke({'query': query},
config={'callbacks': run_manager.get_child()})
if self.verbose:
logger.info(f'Generated Query: {structured_query}')
new_query, search_kwargs = self._prepare_query(query, structured_query)
docs = self._get_docs_with_query(new_query, search_kwargs)
return docs
|
Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
|
from_texts
|
"""Create a vector store from a list of texts."""
raise NotImplementedError()
|
@classmethod
def from_texts(cls: Type[VST], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]]=None, **kwargs: Any) ->VST:
"""Create a vector store from a list of texts."""
raise NotImplementedError()
|
Create a vector store from a list of texts.
|
check_code
|
try:
code_obj = PythonCode(code=code)
return f"""# LGTM
# use the `submit` tool to submit this code:
```python
{code_obj.code}
```"""
except ValidationError as e:
return e.errors()[0]['msg']
|
def check_code(code: str) ->str:
try:
code_obj = PythonCode(code=code)
return f"""# LGTM
# use the `submit` tool to submit this code:
```python
{code_obj.code}
```"""
except ValidationError as e:
return e.errors()[0]['msg']
| null |
test_hologres_embeddings
|
"""Test end to end construction with embeddings and search."""
texts = ['foo', 'bar', 'baz']
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Hologres.from_embeddings(text_embeddings=text_embedding_pairs,
table_name='test_table', embedding=FakeEmbeddingsWithAdaDimension(),
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_hologres_embeddings() ->None:
"""Test end to end construction with embeddings and search."""
texts = ['foo', 'bar', 'baz']
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Hologres.from_embeddings(text_embeddings=
text_embedding_pairs, table_name='test_table', embedding=
FakeEmbeddingsWithAdaDimension(), connection_string=
CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction with embeddings and search.
|
test_prompt_jinja2_missing_input_variables
|
"""Test error is raised when input variables are not provided."""
template = 'This is a {{ foo }} test.'
input_variables: list = []
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2').input_variables == ['foo']
|
@pytest.mark.requires('jinja2')
def test_prompt_jinja2_missing_input_variables() ->None:
"""Test error is raised when input variables are not provided."""
template = 'This is a {{ foo }} test.'
input_variables: list = []
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=
template, template_format='jinja2').input_variables == ['foo']
|
Test error is raised when input variables are not provided.
|
vector_store
|
"""Vector store fixture."""
return InMemoryVectorStore()
|
@pytest.fixture
def vector_store() ->InMemoryVectorStore:
"""Vector store fixture."""
return InMemoryVectorStore()
|
Vector store fixture.
|
from_connection_string
|
"""Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
"""
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(
'Could not import pymongo, please install it with `pip install pymongo`.'
)
client: MongoClient = MongoClient(connection_string, driver=DriverInfo(name
='Langchain', version=version('langchain')))
db_name, collection_name = namespace.split('.')
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
|
@classmethod
def from_connection_string(cls, connection_string: str, namespace: str,
embedding: Embeddings, **kwargs: Any) ->MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
"""
try:
from importlib.metadata import version
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
except ImportError:
raise ImportError(
'Could not import pymongo, please install it with `pip install pymongo`.'
)
client: MongoClient = MongoClient(connection_string, driver=DriverInfo(
name='Langchain', version=version('langchain')))
db_name, collection_name = namespace.split('.')
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
|
Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
|
get_random_news_url
|
from bs4 import BeautifulSoup
response = requests.get('https://news.google.com')
soup = BeautifulSoup(response.text, 'html.parser')
article_links = [a['href'] for a in soup.find_all('a', href=True) if
'/articles/' in a['href']]
random_article_link = random.choice(article_links)
return 'https://news.google.com' + random_article_link
|
def get_random_news_url() ->str:
from bs4 import BeautifulSoup
response = requests.get('https://news.google.com')
soup = BeautifulSoup(response.text, 'html.parser')
article_links = [a['href'] for a in soup.find_all('a', href=True) if
'/articles/' in a['href']]
random_article_link = random.choice(article_links)
return 'https://news.google.com' + random_article_link
| null |
test__get_relevant_documents
|
relevant_documents = fake_self_query_retriever._get_relevant_documents('foo',
run_manager=CallbackManagerForRetrieverRun.get_noop_manager())
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata['foo'] == 'bar'
|
def test__get_relevant_documents(fake_self_query_retriever: SelfQueryRetriever
) ->None:
relevant_documents = fake_self_query_retriever._get_relevant_documents(
'foo', run_manager=CallbackManagerForRetrieverRun.get_noop_manager())
assert len(relevant_documents) == 1
assert relevant_documents[0].metadata['foo'] == 'bar'
| null |
_parse_threads
|
results = []
for thread in threads:
thread_id = thread['id']
thread_data = self.api_resource.users().threads().get(userId='me', id=
thread_id).execute()
messages = thread_data['messages']
thread['messages'] = []
for message in messages:
snippet = message['snippet']
thread['messages'].append({'snippet': snippet, 'id': message['id']})
results.append(thread)
return results
|
def _parse_threads(self, threads: List[Dict[str, Any]]) ->List[Dict[str, Any]]:
results = []
for thread in threads:
thread_id = thread['id']
thread_data = self.api_resource.users().threads().get(userId='me',
id=thread_id).execute()
messages = thread_data['messages']
thread['messages'] = []
for message in messages:
snippet = message['snippet']
thread['messages'].append({'snippet': snippet, 'id': message['id']}
)
results.append(thread)
return results
| null |
similarity_search_with_score
|
query_vector = self._embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(query_vector, k,
distance_func, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int=4, distance_func:
Literal['sqrt_euclid', 'neg_dot_prod', 'ned_cos']='sqrt_euclid', **
kwargs: Any) ->List[Tuple[Document, float]]:
query_vector = self._embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(query_vector, k,
distance_func, **kwargs)
| null |
__init__
|
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
|
def __init__(self, task: str, id: int, dep: List[int], args: Dict[str, str],
tool: BaseTool):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
| null |
add_documents
|
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
|
def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
|
Add documents to vectorstore.
|
test_chat_google_genai_batch
|
"""Test batch tokens from ChatGoogleGenerativeAI."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
result = llm.batch(["This is a test. Say 'foo'", "This is a test, say 'bar'"])
for token in result:
assert isinstance(token.content, str)
|
def test_chat_google_genai_batch() ->None:
"""Test batch tokens from ChatGoogleGenerativeAI."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
result = llm.batch(["This is a test. Say 'foo'",
"This is a test, say 'bar'"])
for token in result:
assert isinstance(token.content, str)
|
Test batch tokens from ChatGoogleGenerativeAI.
|
check_spacy_model
|
import spacy
if not spacy.util.is_package('en_core_web_lg'):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
|
@pytest.fixture(scope='module', autouse=True)
def check_spacy_model() ->Iterator[None]:
import spacy
if not spacy.util.is_package('en_core_web_lg'):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
| null |
test_pymupdf_loader
|
"""Test PyMuPDF loader."""
_assert_with_parser(PyMuPDFParser())
|
def test_pymupdf_loader() ->None:
"""Test PyMuPDF loader."""
_assert_with_parser(PyMuPDFParser())
|
Test PyMuPDF loader.
|
test_given_a_connection_is_provided_then_no_engine_should_be_created
|
"""When a connection is provided then no engine should be created."""
pgvector.PGVector(connection_string=_CONNECTION_STRING, embedding_function=
_EMBEDDING_FUNCTION, connection=mock.MagicMock())
create_engine.assert_not_called()
|
@pytest.mark.requires('pgvector')
@mock.patch('sqlalchemy.create_engine')
def test_given_a_connection_is_provided_then_no_engine_should_be_created(
create_engine: Mock) ->None:
"""When a connection is provided then no engine should be created."""
pgvector.PGVector(connection_string=_CONNECTION_STRING,
embedding_function=_EMBEDDING_FUNCTION, connection=mock.MagicMock())
create_engine.assert_not_called()
|
When a connection is provided then no engine should be created.
|
check_examples_and_selector
|
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get('examples', None)
example_selector = values.get('example_selector', None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided")
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided")
return values
|
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) ->Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get('examples', None)
example_selector = values.get('example_selector', None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided")
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided")
return values
|
Check that one and only one of examples/example_selector are provided.
|
on_tool_start
|
"""Run when tool starts running."""
self.metrics['step'] += 1
self.metrics['tool_starts'] += 1
self.metrics['starts'] += 1
tool_starts = self.metrics['tool_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'tool_start_{tool_starts}')
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Run when tool starts running."""
self.metrics['step'] += 1
self.metrics['tool_starts'] += 1
self.metrics['starts'] += 1
tool_starts = self.metrics['tool_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.jsonf(resp, self.temp_dir, f'tool_start_{tool_starts}')
|
Run when tool starts running.
|
__init__
|
self._schema = schema
|
def __init__(self, schema: RedisModel) ->None:
self._schema = schema
| null |
on_llm_error
|
"""Do nothing when LLM outputs an error."""
pass
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when LLM outputs an error."""
pass
|
Do nothing when LLM outputs an error.
|
parse_with_prompt
|
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input PromptValue.
Returns:
Structured output
"""
return self.parse(completion)
|
def parse_with_prompt(self, completion: str, prompt: PromptValue) ->Any:
"""Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input PromptValue.
Returns:
Structured output
"""
return self.parse(completion)
|
Parse the output of an LLM call with the input prompt for context.
The prompt is largely provided in the event the OutputParser wants
to retry or fix the output in some way, and needs information from
the prompt to do so.
Args:
completion: String output of a language model.
prompt: Input PromptValue.
Returns:
Structured output
|
test_vald_search_with_score
|
"""Test end to end construction and search with scores."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo'), Document(page_content='bar'),
Document(page_content='baz')]
assert scores[0] < scores[1] < scores[2]
|
def test_vald_search_with_score() ->None:
"""Test end to end construction and search with scores."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
assert scores[0] < scores[1] < scores[2]
|
Test end to end construction and search with scores.
|
__init__
|
self.nua = nua
|
def __init__(self, nua: NucliaUnderstandingAPI):
self.nua = nua
| null |
test_partial_init_func
|
"""Test prompt can be initialized with partial variables."""
template = 'This is a {foo} test.'
prompt = PromptTemplate(input_variables=[], template=template,
partial_variables={'foo': lambda : 2})
assert prompt.template == template
assert prompt.input_variables == []
result = prompt.format()
assert result == 'This is a 2 test.'
|
def test_partial_init_func() ->None:
"""Test prompt can be initialized with partial variables."""
template = 'This is a {foo} test.'
prompt = PromptTemplate(input_variables=[], template=template,
partial_variables={'foo': lambda : 2})
assert prompt.template == template
assert prompt.input_variables == []
result = prompt.format()
assert result == 'This is a 2 test.'
|
Test prompt can be initialized with partial variables.
|
test_list_raises_other_error
|
"""Test that a valid error is raised when an unknown HTTP Error occurs."""
mock_response = MagicMock()
mock_response.status_code = 404
mock_response.raise_for_status.side_effect = requests.HTTPError(
'404 Client Error: Not found for url', response=mock_response)
mock_session = MagicMock()
mock_session.get.return_value = mock_response
with patch('requests.Session', return_value=mock_session):
wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token='test')
with pytest.raises(requests.HTTPError) as err:
wrapper.list()
assert str(err.value) == '404 Client Error: Not found for url'
|
def test_list_raises_other_error() ->None:
"""Test that a valid error is raised when an unknown HTTP Error occurs."""
mock_response = MagicMock()
mock_response.status_code = 404
mock_response.raise_for_status.side_effect = requests.HTTPError(
'404 Client Error: Not found for url', response=mock_response)
mock_session = MagicMock()
mock_session.get.return_value = mock_response
with patch('requests.Session', return_value=mock_session):
wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token='test')
with pytest.raises(requests.HTTPError) as err:
wrapper.list()
assert str(err.value) == '404 Client Error: Not found for url'
|
Test that a valid error is raised when an unknown HTTP Error occurs.
|
_get_memorize
|
return Memorize(llm=llm)
|
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) ->BaseTool:
return Memorize(llm=llm)
| null |
input_keys
|
"""Return the singular input key.
:meta private:
"""
return self.llm_chain.prompt.input_variables
|
@property
def input_keys(self) ->List[str]:
"""Return the singular input key.
:meta private:
"""
return self.llm_chain.prompt.input_variables
|
Return the singular input key.
:meta private:
|
test_annoy_local_save_load
|
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
temp_dir = tempfile.TemporaryDirectory()
docsearch.save_local(temp_dir.name)
loaded_docsearch = Annoy.load_local(temp_dir.name, FakeEmbeddings())
assert docsearch.index_to_docstore_id == loaded_docsearch.index_to_docstore_id
assert docsearch.docstore.__dict__ == loaded_docsearch.docstore.__dict__
assert loaded_docsearch.index is not None
|
def test_annoy_local_save_load() ->None:
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
temp_dir = tempfile.TemporaryDirectory()
docsearch.save_local(temp_dir.name)
loaded_docsearch = Annoy.load_local(temp_dir.name, FakeEmbeddings())
assert docsearch.index_to_docstore_id == loaded_docsearch.index_to_docstore_id
assert docsearch.docstore.__dict__ == loaded_docsearch.docstore.__dict__
assert loaded_docsearch.index is not None
|
Test end to end serialization.
|
_get_relevant_documents
|
try:
from sklearn import svm
except ImportError:
raise ImportError(
'Could not import scikit-learn, please install with `pip install scikit-learn`.'
)
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000,
tol=1e-06, C=0.1)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-06
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1:self.k + 1]:
if self.relevancy_threshold is None or normalized_similarities[row
] >= self.relevancy_threshold:
metadata = self.metadatas[row - 1] if self.metadatas else {}
doc = Document(page_content=self.texts[row - 1], metadata=metadata)
top_k_results.append(doc)
return top_k_results
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
try:
from sklearn import svm
except ImportError:
raise ImportError(
'Could not import scikit-learn, please install with `pip install scikit-learn`.'
)
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=
10000, tol=1e-06, C=0.1)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[
0]
denominator = np.max(similarities) - np.min(similarities) + 1e-06
normalized_similarities = (similarities - np.min(similarities)
) / denominator
top_k_results = []
for row in sorted_ix[1:self.k + 1]:
if self.relevancy_threshold is None or normalized_similarities[row
] >= self.relevancy_threshold:
metadata = self.metadatas[row - 1] if self.metadatas else {}
doc = Document(page_content=self.texts[row - 1], metadata=metadata)
top_k_results.append(doc)
return top_k_results
| null |
output_keys
|
"""Keys expected to be in the chain output."""
|
@property
@abstractmethod
def output_keys(self) ->List[str]:
"""Keys expected to be in the chain output."""
|
Keys expected to be in the chain output.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.