method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter,
**kwargs)
return _results_to_docs(docs_and_scores)
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[
str, str]]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=
filter, **kwargs)
return _results_to_docs(docs_and_scores)
|
Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
|
_make_request
|
try:
import grpc
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import TextEmbeddingRequest
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import EmbeddingsServiceStub
except ImportError as e:
raise ImportError(
'Please install YandexCloud SDK with `pip install yandexcloud`.'
) from e
result = []
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
for text in texts:
request = TextEmbeddingRequest(model_uri=self.model_uri, text=text)
stub = EmbeddingsServiceStub(channel)
res = stub.TextEmbedding(request, metadata=self._grpc_metadata)
result.append(res.embedding)
return result
|
def _make_request(self: YandexGPTEmbeddings, texts: List[str]):
try:
import grpc
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import TextEmbeddingRequest
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import EmbeddingsServiceStub
except ImportError as e:
raise ImportError(
'Please install YandexCloud SDK with `pip install yandexcloud`.'
) from e
result = []
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
for text in texts:
request = TextEmbeddingRequest(model_uri=self.model_uri, text=text)
stub = EmbeddingsServiceStub(channel)
res = stub.TextEmbedding(request, metadata=self._grpc_metadata)
result.append(res.embedding)
return result
| null |
test_filter_list_metadata
|
documents = [Document(page_content='', metadata={'key1':
'this is a string!', 'key2': ['a', 'list', 'of', 'strings']}), Document
(page_content='', metadata={'key1': 'this is another string!', 'key2':
{'foo'}}), Document(page_content='', metadata={'key1':
'this is another string!', 'key2': {'foo': 'bar'}}), Document(
page_content='', metadata={'key1': 'this is another string!', 'key2':
True}), Document(page_content='', metadata={'key1':
'this is another string!', 'key2': 1}), Document(page_content='',
metadata={'key1': 'this is another string!', 'key2': 1.0}), Document(
page_content='', metadata={'key1': 'this is another string!', 'key2':
'foo'})]
updated_documents = filter_complex_metadata(documents)
filtered_metadata = [doc.metadata for doc in updated_documents]
assert filtered_metadata == [{'key1': 'this is a string!'}, {'key1':
'this is another string!'}, {'key1': 'this is another string!'}, {
'key1': 'this is another string!', 'key2': True}, {'key1':
'this is another string!', 'key2': 1}, {'key1':
'this is another string!', 'key2': 1.0}, {'key1':
'this is another string!', 'key2': 'foo'}]
|
def test_filter_list_metadata() ->None:
documents = [Document(page_content='', metadata={'key1':
'this is a string!', 'key2': ['a', 'list', 'of', 'strings']}),
Document(page_content='', metadata={'key1':
'this is another string!', 'key2': {'foo'}}), Document(page_content
='', metadata={'key1': 'this is another string!', 'key2': {'foo':
'bar'}}), Document(page_content='', metadata={'key1':
'this is another string!', 'key2': True}), Document(page_content='',
metadata={'key1': 'this is another string!', 'key2': 1}), Document(
page_content='', metadata={'key1': 'this is another string!',
'key2': 1.0}), Document(page_content='', metadata={'key1':
'this is another string!', 'key2': 'foo'})]
updated_documents = filter_complex_metadata(documents)
filtered_metadata = [doc.metadata for doc in updated_documents]
assert filtered_metadata == [{'key1': 'this is a string!'}, {'key1':
'this is another string!'}, {'key1': 'this is another string!'}, {
'key1': 'this is another string!', 'key2': True}, {'key1':
'this is another string!', 'key2': 1}, {'key1':
'this is another string!', 'key2': 1.0}, {'key1':
'this is another string!', 'key2': 'foo'}]
| null |
test_load_converts_dataframe_columns_to_document_metadata
|
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata['area'] == sample_gdf.loc[i, 'area']
assert doc.metadata['crs'] == sample_gdf.loc[i, 'crs']
|
@pytest.mark.requires('geopandas')
def test_load_converts_dataframe_columns_to_document_metadata(sample_gdf:
GeoDataFrame) ->None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata['area'] == sample_gdf.loc[i, 'area']
assert doc.metadata['crs'] == sample_gdf.loc[i, 'crs']
| null |
on_agent_finish
|
self.on_agent_finish_common()
|
def on_agent_finish(self, *args: Any, **kwargs: Any) ->Any:
self.on_agent_finish_common()
| null |
get_tools
|
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir))
return tools
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
allowed_tools = self.selected_tools or _FILE_TOOLS.keys()
tools: List[BaseTool] = []
for tool in allowed_tools:
tool_cls = _FILE_TOOLS[tool]
tools.append(tool_cls(root_dir=self.root_dir))
return tools
|
Get the tools in the toolkit.
|
from_texts
|
"""Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
clarifai_vector_db = cls(user_id=user_id, app_id=app_id, number_of_docs=
number_of_docs, pat=pat)
clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas)
return clarifai_vector_db
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None,
metadatas: Optional[List[dict]]=None, user_id: Optional[str]=None,
app_id: Optional[str]=None, number_of_docs: Optional[int]=None, pat:
Optional[str]=None, **kwargs: Any) ->Clarifai:
"""Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
clarifai_vector_db = cls(user_id=user_id, app_id=app_id, number_of_docs
=number_of_docs, pat=pat)
clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas)
return clarifai_vector_db
|
Create a Clarifai vectorstore from a list of texts.
Args:
user_id (str): User ID.
app_id (str): App ID.
texts (List[str]): List of texts to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
metadatas (Optional[List[dict]]): Optional list of metadatas.
Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
|
validate_params
|
"""Validate similarity parameters."""
if values['k'] is None and values['similarity_threshold'] is None:
raise ValueError('Must specify one of `k` or `similarity_threshold`.')
return values
|
@root_validator()
def validate_params(cls, values: Dict) ->Dict:
"""Validate similarity parameters."""
if values['k'] is None and values['similarity_threshold'] is None:
raise ValueError('Must specify one of `k` or `similarity_threshold`.')
return values
|
Validate similarity parameters.
|
embeddings
|
return self.embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self.embedding
| null |
write_key_value_pair
|
self.dispatch(k)
self.write(': ')
self.dispatch(v)
|
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(': ')
self.dispatch(v)
| null |
similarity_search
|
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of Documents
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k)
return [doc for doc, _ in documents]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of Documents
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k)
return [doc for doc, _ in documents]
|
Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of Documents
|
test_task_related
|
time_str = datetime.now().strftime('%d/%m/%Y-%H:%M:%S')
task_name = f'Test Task - {time_str}'
create_response = json.loads(clickup_wrapper.run(mode='create_task', query=
json.dumps({'name': task_name, 'description': 'This is a Test'})))
assert create_response['name'] == task_name
task_id = create_response['id']
get_response = json.loads(clickup_wrapper.run(mode='get_task', query=json.
dumps({'task_id': task_id})))
assert get_response['name'] == task_name
new_name = f'{task_name} - New'
clickup_wrapper.run(mode='update_task', query=json.dumps({'task_id':
task_id, 'attribute_name': 'name', 'value': new_name}))
get_response_2 = json.loads(clickup_wrapper.run(mode='get_task', query=json
.dumps({'task_id': task_id})))
assert get_response_2['name'] == new_name
|
def test_task_related(clickup_wrapper: ClickupAPIWrapper) ->None:
time_str = datetime.now().strftime('%d/%m/%Y-%H:%M:%S')
task_name = f'Test Task - {time_str}'
create_response = json.loads(clickup_wrapper.run(mode='create_task',
query=json.dumps({'name': task_name, 'description': 'This is a Test'}))
)
assert create_response['name'] == task_name
task_id = create_response['id']
get_response = json.loads(clickup_wrapper.run(mode='get_task', query=
json.dumps({'task_id': task_id})))
assert get_response['name'] == task_name
new_name = f'{task_name} - New'
clickup_wrapper.run(mode='update_task', query=json.dumps({'task_id':
task_id, 'attribute_name': 'name', 'value': new_name}))
get_response_2 = json.loads(clickup_wrapper.run(mode='get_task', query=
json.dumps({'task_id': task_id})))
assert get_response_2['name'] == new_name
| null |
__init__
|
if escape_chars_re:
self.escaped_chars_re = escape_chars_re
else:
self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
|
def __init__(self, escape_chars_re: Optional[Pattern]=None):
if escape_chars_re:
self.escaped_chars_re = escape_chars_re
else:
self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
| null |
test_move_file
|
"""Test the FileMove tool."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool()
source_file = Path(temp_dir) / 'source.txt'
destination_file = Path(temp_dir) / 'destination.txt'
source_file.write_text('Hello, world!')
tool.run({'source_path': str(source_file), 'destination_path': str(
destination_file)})
assert not source_file.exists()
assert destination_file.exists()
assert destination_file.read_text() == 'Hello, world!'
|
def test_move_file() ->None:
"""Test the FileMove tool."""
with TemporaryDirectory() as temp_dir:
tool = MoveFileTool()
source_file = Path(temp_dir) / 'source.txt'
destination_file = Path(temp_dir) / 'destination.txt'
source_file.write_text('Hello, world!')
tool.run({'source_path': str(source_file), 'destination_path': str(
destination_file)})
assert not source_file.exists()
assert destination_file.exists()
assert destination_file.read_text() == 'Hello, world!'
|
Test the FileMove tool.
|
test_deepinfra_call
|
"""Test valid call to DeepInfra."""
deepinfra_emb = DeepInfraEmbeddings(model_id=
'sentence-transformers/clip-ViT-B-32')
r1 = deepinfra_emb.embed_documents([
'Alpha is the first letter of Greek alphabet',
'Beta is the second letter of Greek alphabet'])
assert len(r1) == 2
assert len(r1[0]) == 512
assert len(r1[1]) == 512
r2 = deepinfra_emb.embed_query('What is the third letter of Greek alphabet')
assert len(r2) == 512
|
def test_deepinfra_call() ->None:
"""Test valid call to DeepInfra."""
deepinfra_emb = DeepInfraEmbeddings(model_id=
'sentence-transformers/clip-ViT-B-32')
r1 = deepinfra_emb.embed_documents([
'Alpha is the first letter of Greek alphabet',
'Beta is the second letter of Greek alphabet'])
assert len(r1) == 2
assert len(r1[0]) == 512
assert len(r1[1]) == 512
r2 = deepinfra_emb.embed_query('What is the third letter of Greek alphabet'
)
assert len(r2) == 512
|
Test valid call to DeepInfra.
|
_get_relevant_documents
|
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {doc.metadata['buffer_idx']: (doc, self.default_salience) for
doc in self.memory_stream[-self.k:]}
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [(doc, self._get_combined_score(doc, relevance,
current_time)) for doc, relevance in docs_and_scores.values()]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
for doc, _ in rescored_docs[:self.k]:
buffered_doc = self.memory_stream[doc.metadata['buffer_idx']]
buffered_doc.metadata['last_accessed_at'] = current_time
result.append(buffered_doc)
return result
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {doc.metadata['buffer_idx']: (doc, self.
default_salience) for doc in self.memory_stream[-self.k:]}
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [(doc, self._get_combined_score(doc, relevance,
current_time)) for doc, relevance in docs_and_scores.values()]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
for doc, _ in rescored_docs[:self.k]:
buffered_doc = self.memory_stream[doc.metadata['buffer_idx']]
buffered_doc.metadata['last_accessed_at'] = current_time
result.append(buffered_doc)
return result
|
Return documents that are relevant to the query.
|
on_agent_finish_common
|
self.agent_ends += 1
self.ends += 1
|
def on_agent_finish_common(self) ->None:
self.agent_ends += 1
self.ends += 1
| null |
test_faiss_search_not_found
|
"""Test what happens when document is not found."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search('foo')
|
@pytest.mark.requires('faiss')
def test_faiss_search_not_found() ->None:
"""Test what happens when document is not found."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
docsearch.docstore = InMemoryDocstore({})
with pytest.raises(ValueError):
docsearch.similarity_search('foo')
|
Test what happens when document is not found.
|
parse
|
try:
match = re.search(self.pattern, text.strip())
yaml_str = ''
if match:
yaml_str = match.group('yaml')
else:
yaml_str = text
json_object = yaml.safe_load(yaml_str)
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f'Failed to parse {name} from completion {text}. Got: {e}'
raise OutputParserException(msg, llm_output=text) from e
|
def parse(self, text: str) ->T:
try:
match = re.search(self.pattern, text.strip())
yaml_str = ''
if match:
yaml_str = match.group('yaml')
else:
yaml_str = text
json_object = yaml.safe_load(yaml_str)
return self.pydantic_object.parse_obj(json_object)
except (yaml.YAMLError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f'Failed to parse {name} from completion {text}. Got: {e}'
raise OutputParserException(msg, llm_output=text) from e
| null |
test_cypher_return_direct
|
"""Test that chain returns direct results."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})"
)
graph.refresh_schema()
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
return_direct=True)
output = chain.run('Who played in Pulp Fiction?')
expected_output = [{'a.name': 'Bruce Willis'}]
assert output == expected_output
|
def test_cypher_return_direct() ->None:
"""Test that chain returns direct results."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})"
)
graph.refresh_schema()
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
return_direct=True)
output = chain.run('Who played in Pulp Fiction?')
expected_output = [{'a.name': 'Bruce Willis'}]
assert output == expected_output
|
Test that chain returns direct results.
|
create_openai_fn_chain
|
"""[Legacy] Create an LLM chain that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If False,
then the model will be given the option to use the given function or not.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.openai_functions import create_openai_fn_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
""\"Record some identifying information about a person.""\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
""\"Record some identifying information about a dog.""\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a world class algorithm for recording entities."),
("human", "Make calls to the relevant function to record the entities in the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
"""
if not functions:
raise ValueError('Need to pass in at least one function. Received zero.')
openai_functions = [convert_to_openai_function(f) for f in functions]
output_parser = output_parser or get_openai_output_parser(functions)
llm_kwargs: Dict[str, Any] = {'functions': openai_functions}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs['function_call'] = {'name': openai_functions[0]['name']}
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser,
llm_kwargs=llm_kwargs, output_key=output_key, **kwargs)
return llm_chain
|
def create_openai_fn_chain(functions: Sequence[Union[Dict[str, Any], Type[
BaseModel], Callable]], llm: BaseLanguageModel, prompt:
BasePromptTemplate, *, enforce_single_function_usage: bool=True,
output_key: str='function', output_parser: Optional[BaseLLMOutputParser
]=None, **kwargs: Any) ->LLMChain:
"""[Legacy] Create an LLM chain that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If False,
then the model will be given the option to use the given function or not.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.openai_functions import create_openai_fn_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
""\"Record some identifying information about a person.""\"
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
""\"Record some identifying information about a dog.""\"
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a world class algorithm for recording entities."),
("human", "Make calls to the relevant function to record the entities in the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
"""
if not functions:
raise ValueError(
'Need to pass in at least one function. Received zero.')
openai_functions = [convert_to_openai_function(f) for f in functions]
output_parser = output_parser or get_openai_output_parser(functions)
llm_kwargs: Dict[str, Any] = {'functions': openai_functions}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs['function_call'] = {'name': openai_functions[0]['name']}
llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=
output_parser, llm_kwargs=llm_kwargs, output_key=output_key, **kwargs)
return llm_chain
|
[Legacy] Create an LLM chain that uses OpenAI functions.
Args:
functions: A sequence of either dictionaries, pydantic.BaseModels classes, or
Python functions. If dictionaries are passed in, they are assumed to
already be a valid OpenAI functions. If only a single
function is passed in, then it will be enforced that the model use that
function. pydantic.BaseModels and Python functions should have docstrings
describing what the function does. For best results, pydantic.BaseModels
should have descriptions of the parameters and Python functions should have
Google Python style args descriptions in the docstring. Additionally,
Python functions should only use primitive types (str, int, float, bool) or
pydantic.BaseModels for arguments.
llm: Language model to use, assumed to support the OpenAI function-calling API.
prompt: BasePromptTemplate to pass to the model.
enforce_single_function_usage: only used if a single function is passed in. If
True, then the model will be forced to use the given function. If False,
then the model will be given the option to use the given function or not.
output_key: The key to use when returning the output in LLMChain.__call__.
output_parser: BaseLLMOutputParser to use for parsing model outputs. By default
will be inferred from the function types. If pydantic.BaseModels are passed
in, then the OutputParser will try to parse outputs using those. Otherwise
model outputs will simply be parsed as JSON. If multiple functions are
passed in and they are not pydantic.BaseModels, the chain output will
include both the name of the function that was returned and the arguments
to pass to the function.
Returns:
An LLMChain that will pass in the given functions to the model when run.
Example:
.. code-block:: python
from typing import Optional
from langchain.chains.openai_functions import create_openai_fn_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
"""Record some identifying information about a person."""
name: str = Field(..., description="The person's name")
age: int = Field(..., description="The person's age")
fav_food: Optional[str] = Field(None, description="The person's favorite food")
class RecordDog(BaseModel):
"""Record some identifying information about a dog."""
name: str = Field(..., description="The dog's name")
color: str = Field(..., description="The dog's color")
fav_food: Optional[str] = Field(None, description="The dog's favorite food")
llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a world class algorithm for recording entities."),
("human", "Make calls to the relevant function to record the entities in the following input: {input}"),
("human", "Tip: Make sure to answer in the correct format"),
]
)
chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
|
memory_variables
|
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
@property
def memory_variables(self) ->List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
Will always return list of memory variables.
:meta private:
|
_import_yahoo_finance_news
|
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
return YahooFinanceNewsTool
|
def _import_yahoo_finance_news() ->Any:
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
return YahooFinanceNewsTool
| null |
get_graph
|
graph = self.mapper.get_graph(config)
input_node = graph.first_node()
output_node = graph.last_node()
if input_node is not None and output_node is not None:
passthrough_node = graph.add_node(_graph_passthrough)
graph.add_edge(input_node, passthrough_node)
graph.add_edge(passthrough_node, output_node)
return graph
|
def get_graph(self, config: (RunnableConfig | None)=None) ->Graph:
graph = self.mapper.get_graph(config)
input_node = graph.first_node()
output_node = graph.last_node()
if input_node is not None and output_node is not None:
passthrough_node = graph.add_node(_graph_passthrough)
graph.add_edge(input_node, passthrough_node)
graph.add_edge(passthrough_node, output_node)
return graph
| null |
add_texts
|
"""Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
"""
from pymilvus import Collection, MilvusException
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
if not isinstance(self.col, Collection):
self._init(embeddings, metadatas)
insert_dict: dict[str, list] = {self._text_field: texts, self._vector_field:
embeddings}
if self._metadata_field is not None:
for d in metadatas:
insert_dict.setdefault(self._metadata_field, []).append(d)
elif metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self.col, Collection)
for i in range(0, total_count, batch_size):
end = min(i + batch_size, total_count)
insert_list = [insert_dict[x][i:end] for x in self.fields]
try:
res: Collection
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
pks.extend(res.primary_keys)
except MilvusException as e:
logger.error('Failed to insert batch starting at entity: %s/%s', i,
total_count)
raise e
return pks
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, timeout: Optional[int]=None, batch_size: int=1000, **kwargs: Any
) ->List[str]:
"""Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
"""
from pymilvus import Collection, MilvusException
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
if not isinstance(self.col, Collection):
self._init(embeddings, metadatas)
insert_dict: dict[str, list] = {self._text_field: texts, self.
_vector_field: embeddings}
if self._metadata_field is not None:
for d in metadatas:
insert_dict.setdefault(self._metadata_field, []).append(d)
elif metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self.col, Collection)
for i in range(0, total_count, batch_size):
end = min(i + batch_size, total_count)
insert_list = [insert_dict[x][i:end] for x in self.fields]
try:
res: Collection
res = self.col.insert(insert_list, timeout=timeout, **kwargs)
pks.extend(res.primary_keys)
except MilvusException as e:
logger.error('Failed to insert batch starting at entity: %s/%s',
i, total_count)
raise e
return pks
|
Insert text data into Milvus.
Inserting data when the collection has not be made yet will result
in creating a new Collection. The data of the first entity decides
the schema of the new collection, the dim is extracted from the first
embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memory.
metadatas (Optional[List[dict]]): Metadata dicts attached to each of
the texts. Defaults to None.
timeout (Optional[int]): Timeout for each batch insert. Defaults
to None.
batch_size (int, optional): Batch size to use for insertion.
Defaults to 1000.
Raises:
MilvusException: Failure to add texts
Returns:
List[str]: The resulting keys for each inserted element.
|
_convert_to_prompt
|
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!")
if part['type'] == 'text':
return Part.from_text(part['text'])
elif part['type'] == 'image_url':
path = part['image_url']['url']
if path.startswith('gs://'):
image = load_image_from_gcs(path=path, project=project)
elif path.startswith('data:image/'):
try:
encoded = re.search('data:image/\\w{2,4};base64,(.*)', path).group(
1)
except AttributeError:
raise ValueError(
'Invalid image uri. It should be in the format data:image/<image_type>;base64,<base64_encoded_image>.'
)
image = Image.from_bytes(base64.b64decode(encoded))
elif _is_url(path):
response = requests.get(path)
response.raise_for_status()
image = Image.from_bytes(response.content)
else:
image = Image.load_from_file(path)
else:
raise ValueError('Only text and image_url types are supported!')
return Part.from_image(image)
|
def _convert_to_prompt(part: Union[str, Dict]) ->Part:
if isinstance(part, str):
return Part.from_text(part)
if not isinstance(part, Dict):
raise ValueError(
f"Message's content is expected to be a dict, got {type(part)}!")
if part['type'] == 'text':
return Part.from_text(part['text'])
elif part['type'] == 'image_url':
path = part['image_url']['url']
if path.startswith('gs://'):
image = load_image_from_gcs(path=path, project=project)
elif path.startswith('data:image/'):
try:
encoded = re.search('data:image/\\w{2,4};base64,(.*)', path
).group(1)
except AttributeError:
raise ValueError(
'Invalid image uri. It should be in the format data:image/<image_type>;base64,<base64_encoded_image>.'
)
image = Image.from_bytes(base64.b64decode(encoded))
elif _is_url(path):
response = requests.get(path)
response.raise_for_status()
image = Image.from_bytes(response.content)
else:
image = Image.load_from_file(path)
else:
raise ValueError('Only text and image_url types are supported!')
return Part.from_image(image)
| null |
_create_table_if_not_exists
|
self.sql_model_class.metadata.create_all(self.engine)
|
def _create_table_if_not_exists(self) ->None:
self.sql_model_class.metadata.create_all(self.engine)
| null |
set_llm_cache
|
"""Set a new LLM cache, overwriting the previous value, if any."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
langchain.llm_cache = value
except ImportError:
pass
global _llm_cache
_llm_cache = value
|
def set_llm_cache(value: Optional['BaseCache']) ->None:
"""Set a new LLM cache, overwriting the previous value, if any."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
langchain.llm_cache = value
except ImportError:
pass
global _llm_cache
_llm_cache = value
|
Set a new LLM cache, overwriting the previous value, if any.
|
test_PairwiseStringResultOutputParser_parse
|
output_parser = PairwiseStringResultOutputParser()
text = """I like pie better than cake.
[[A]]"""
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': 'A', 'score': 1}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
text = """I like cake better than pie.
[[B]]"""
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': 'B', 'score': 0}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
text = """I like cake and pie.
[[C]]"""
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': None, 'score': 0.5}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
|
def test_PairwiseStringResultOutputParser_parse() ->None:
output_parser = PairwiseStringResultOutputParser()
text = 'I like pie better than cake.\n[[A]]'
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': 'A', 'score': 1}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
text = 'I like cake better than pie.\n[[B]]'
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': 'B', 'score': 0}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
text = 'I like cake and pie.\n[[C]]'
got = output_parser.parse_folder(text)
want = {'reasoning': text, 'value': None, 'score': 0.5}
assert got.get('reasoning') == want['reasoning']
assert got.get('value') == want['value']
assert got.get('score') == want['score']
| null |
on_chain_error
|
"""Need to log the error."""
pass
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Need to log the error."""
pass
|
Need to log the error.
|
format_property_key
|
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return ''.join([first_word] + capitalized_words)
|
def format_property_key(s: str) ->str:
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return ''.join([first_word] + capitalized_words)
| null |
test_azure_cognitive_search_get_relevant_documents
|
"""Test valid call to Azure Cognitive Search."""
retriever = AzureCognitiveSearchRetriever()
documents = retriever.get_relevant_documents('what is langchain')
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureCognitiveSearchRetriever(top_k=1)
documents = retriever.get_relevant_documents('what is langchain')
assert len(documents) <= 1
|
def test_azure_cognitive_search_get_relevant_documents() ->None:
"""Test valid call to Azure Cognitive Search."""
retriever = AzureCognitiveSearchRetriever()
documents = retriever.get_relevant_documents('what is langchain')
for doc in documents:
assert isinstance(doc, Document)
assert doc.page_content
retriever = AzureCognitiveSearchRetriever(top_k=1)
documents = retriever.get_relevant_documents('what is langchain')
assert len(documents) <= 1
|
Test valid call to Azure Cognitive Search.
|
clear
|
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
|
def clear(self) ->None:
"""Clear memory contents."""
self.chat_memory.clear()
self.entity_cache.clear()
self.entity_store.clear()
|
Clear memory contents.
|
_prepare_request_metadata
|
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo
user_info = UserInfo(id=f'user:{user_ldap}')
return RequestMetadata(user_info=user_info)
|
def _prepare_request_metadata(self, user_ldap: str) ->'RequestMetadata':
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo
user_info = UserInfo(id=f'user:{user_ldap}')
return RequestMetadata(user_info=user_info)
| null |
get_elements_from_api
|
"""Retrieve a list of elements from the `Unstructured API`."""
if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list):
from unstructured.partition.api import partition_multiple_via_api
_doc_elements = partition_multiple_via_api(filenames=file_path, files=
file, api_key=api_key, api_url=api_url, **unstructured_kwargs)
elements = []
for _elements in _doc_elements:
elements.extend(_elements)
return elements
else:
from unstructured.partition.api import partition_via_api
return partition_via_api(filename=file_path, file=file, api_key=api_key,
api_url=api_url, **unstructured_kwargs)
|
def get_elements_from_api(file_path: Union[str, List[str], None]=None, file:
Union[IO, Sequence[IO], None]=None, api_url: str=
'https://api.unstructured.io/general/v0/general', api_key: str='', **
unstructured_kwargs: Any) ->List:
"""Retrieve a list of elements from the `Unstructured API`."""
if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list
):
from unstructured.partition.api import partition_multiple_via_api
_doc_elements = partition_multiple_via_api(filenames=file_path,
files=file, api_key=api_key, api_url=api_url, **unstructured_kwargs
)
elements = []
for _elements in _doc_elements:
elements.extend(_elements)
return elements
else:
from unstructured.partition.api import partition_via_api
return partition_via_api(filename=file_path, file=file, api_key=
api_key, api_url=api_url, **unstructured_kwargs)
|
Retrieve a list of elements from the `Unstructured API`.
|
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
'_embedding cannot be None for similarity_search when _by_text=False'
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
if self._by_text:
return self.similarity_search_by_text(query, k, **kwargs)
else:
if self._embedding is None:
raise ValueError(
'_embedding cannot be None for similarity_search when _by_text=False'
)
embedding = self._embedding.embed_query(query)
return self.similarity_search_by_vector(embedding, k, **kwargs)
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
|
test_default_call
|
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_default_call() ->None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test valid chat call to volc engine.
|
embed_documents
|
"""Call out to Voyage Embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._get_embeddings(texts, batch_size=self.batch_size, input_type=
'document')
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._get_embeddings(texts, batch_size=self.batch_size,
input_type='document')
|
Call out to Voyage Embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
test_success
|
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('zsh: command not found: python')
assert 'zsh: command not found: python' in output
|
def test_success() ->None:
"""Test that call that doesn't run."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('zsh: command not found: python')
assert 'zsh: command not found: python' in output
|
Test that call that doesn't run.
|
similarity_search_with_score_by_vector
|
sql_query = f"""
SELECT
text,
metadata,
distance
FROM {self._table} e
INNER JOIN vss_{self._table} v on v.rowid = e.rowid
WHERE vss_search(
v.text_embedding,
vss_search_params('{json.dumps(embedding)}', {k})
)
"""
cursor = self._connection.cursor()
cursor.execute(sql_query)
results = cursor.fetchall()
documents = []
for row in results:
metadata = json.loads(row['metadata']) or {}
doc = Document(page_content=row['text'], metadata=metadata)
documents.append((doc, row['distance']))
return documents
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, **kwargs: Any) ->List[Tuple[Document, float]]:
sql_query = f"""
SELECT
text,
metadata,
distance
FROM {self._table} e
INNER JOIN vss_{self._table} v on v.rowid = e.rowid
WHERE vss_search(
v.text_embedding,
vss_search_params('{json.dumps(embedding)}', {k})
)
"""
cursor = self._connection.cursor()
cursor.execute(sql_query)
results = cursor.fetchall()
documents = []
for row in results:
metadata = json.loads(row['metadata']) or {}
doc = Document(page_content=row['text'], metadata=metadata)
documents.append((doc, row['distance']))
return documents
| null |
get_from_dict_or_env
|
"""Get a value from a dictionary or an environment variable."""
if key in data and data[key]:
return data[key]
else:
return get_from_env(key, env_key, default=default)
|
def get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str,
default: Optional[str]=None) ->str:
"""Get a value from a dictionary or an environment variable."""
if key in data and data[key]:
return data[key]
else:
return get_from_env(key, env_key, default=default)
|
Get a value from a dictionary or an environment variable.
|
__init__
|
self.json_data = json_data
self.status_code = status_code
|
def __init__(self, json_data: Dict, status_code: int):
self.json_data = json_data
self.status_code = status_code
| null |
test_non_presigned_loading
|
mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200)
loader = LakeFSLoader(lakefs_access_key='lakefs_access_key',
lakefs_secret_key='lakefs_secret_key', lakefs_endpoint=self.endpoint)
loader.set_repo(self.repo)
loader.set_ref(self.ref)
loader.set_path(self.path)
loader.load()
|
@requests_mock.Mocker()
@pytest.mark.usefixtures('mock_lakefs_client_no_presign_local',
'mock_unstructured_local')
def test_non_presigned_loading(self, mocker: Mocker) ->None:
mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200)
loader = LakeFSLoader(lakefs_access_key='lakefs_access_key',
lakefs_secret_key='lakefs_secret_key', lakefs_endpoint=self.endpoint)
loader.set_repo(self.repo)
loader.set_ref(self.ref)
loader.set_path(self.path)
loader.load()
| null |
test_load_evaluators
|
"""Test loading evaluators."""
fake_llm = FakeChatModel()
embeddings = FakeEmbeddings(size=32)
load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings)
load_evaluators([evaluator_type.value], llm=fake_llm, embeddings=embeddings)
|
@pytest.mark.requires('rapidfuzz')
@pytest.mark.parametrize('evaluator_type', EvaluatorType)
def test_load_evaluators(evaluator_type: EvaluatorType) ->None:
"""Test loading evaluators."""
fake_llm = FakeChatModel()
embeddings = FakeEmbeddings(size=32)
load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings)
load_evaluators([evaluator_type.value], llm=fake_llm, embeddings=embeddings
)
|
Test loading evaluators.
|
initialize_llm_chain
|
if 'llm_chain' not in values:
from langchain.chains.llm import LLMChain
values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt=
PromptTemplate(template=QUERY_CHECKER, input_variables=['query']))
if values['llm_chain'].prompt.input_variables != ['query']:
raise ValueError(
"LLM chain for QueryCheckerTool need to use ['query'] as input_variables for the embedded prompt"
)
return values
|
@root_validator(pre=True)
def initialize_llm_chain(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if 'llm_chain' not in values:
from langchain.chains.llm import LLMChain
values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt=
PromptTemplate(template=QUERY_CHECKER, input_variables=['query']))
if values['llm_chain'].prompt.input_variables != ['query']:
raise ValueError(
"LLM chain for QueryCheckerTool need to use ['query'] as input_variables for the embedded prompt"
)
return values
| null |
lazy_parse
|
file_path = blob.source
if file_path is None:
raise ValueError('blob.source cannot be None.')
pdf = open(file_path, 'rb')
files = {'input': (file_path, pdf, 'application/pdf', {'Expires': '0'})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ['generateIDs', 'consolidateHeader', 'segmentSentences']:
data[param] = '1'
data['teiCoordinates'] = ['head', 's']
files = files or {}
r = requests.request('POST', self.grobid_server, headers=None, params=
None, files=files, data=data, timeout=60)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error('GROBID server timed out. Return None.')
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
file_path = blob.source
if file_path is None:
raise ValueError('blob.source cannot be None.')
pdf = open(file_path, 'rb')
files = {'input': (file_path, pdf, 'application/pdf', {'Expires': '0'})}
try:
data: Dict[str, Union[str, List[str]]] = {}
for param in ['generateIDs', 'consolidateHeader', 'segmentSentences']:
data[param] = '1'
data['teiCoordinates'] = ['head', 's']
files = files or {}
r = requests.request('POST', self.grobid_server, headers=None,
params=None, files=files, data=data, timeout=60)
xml_data = r.text
except requests.exceptions.ReadTimeout:
logger.error('GROBID server timed out. Return None.')
xml_data = None
if xml_data is None:
return iter([])
else:
return self.process_xml(file_path, xml_data, self.segment_sentences)
| null |
load_memory_variables
|
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
return {'chat_history': self.chat_memory.messages[-10:], 'relevant_context':
docs}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]:
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
return {'chat_history': self.chat_memory.messages[-10:],
'relevant_context': docs}
| null |
test_visit_comparison_range_gt
|
comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'gt': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_range_gt() ->None:
comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'gt': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
_compute
|
self._block_back_door_paths()
self._set_initial_conditions()
self._make_graph()
self._sort_entities()
self._forward_propagate()
self._run_query()
|
def _compute(self) ->Any:
self._block_back_door_paths()
self._set_initial_conditions()
self._make_graph()
self._sort_entities()
self._forward_propagate()
self._run_query()
| null |
mock_list_examples
|
return iter(examples)
|
def mock_list_examples(*args: Any, **kwargs: Any) ->Iterator[Example]:
return iter(examples)
| null |
test_mistralai_model_param
|
llm = ChatMistralAI(model='foo')
assert llm.model == 'foo'
|
@pytest.mark.requires('mistralai')
def test_mistralai_model_param() ->None:
llm = ChatMistralAI(model='foo')
assert llm.model == 'foo'
| null |
test_graph_single_runnable
|
runnable = StrOutputParser()
graph = StrOutputParser().get_graph()
first_node = graph.first_node()
assert first_node is not None
assert first_node.data.schema() == runnable.input_schema.schema()
last_node = graph.last_node()
assert last_node is not None
assert last_node.data.schema() == runnable.output_schema.schema()
assert len(graph.nodes) == 3
assert len(graph.edges) == 2
assert graph.edges[0].source == first_node.id
assert graph.edges[1].target == last_node.id
assert graph.draw_ascii() == snapshot
|
def test_graph_single_runnable(snapshot: SnapshotAssertion) ->None:
runnable = StrOutputParser()
graph = StrOutputParser().get_graph()
first_node = graph.first_node()
assert first_node is not None
assert first_node.data.schema() == runnable.input_schema.schema()
last_node = graph.last_node()
assert last_node is not None
assert last_node.data.schema() == runnable.output_schema.schema()
assert len(graph.nodes) == 3
assert len(graph.edges) == 2
assert graph.edges[0].source == first_node.id
assert graph.edges[1].target == last_node.id
assert graph.draw_ascii() == snapshot
| null |
foo
|
"""Add one to the input."""
raise NotImplementedError()
|
def foo(x: int) ->None:
"""Add one to the input."""
raise NotImplementedError()
|
Add one to the input.
|
_load_dump_file
|
try:
import mwxml
except ImportError as e:
raise ImportError(
"Unable to import 'mwxml'. Please install with `pip install mwxml`."
) from e
return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
|
def _load_dump_file(self):
try:
import mwxml
except ImportError as e:
raise ImportError(
"Unable to import 'mwxml'. Please install with `pip install mwxml`."
) from e
return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
| null |
_validate_mode
|
_valid_modes = {'single', 'elements'}
if mode not in _valid_modes:
raise ValueError(
f'Got {mode} for `mode`, but should be one of `{_valid_modes}`')
|
def _validate_mode(self, mode: str) ->None:
_valid_modes = {'single', 'elements'}
if mode not in _valid_modes:
raise ValueError(
f'Got {mode} for `mode`, but should be one of `{_valid_modes}`')
| null |
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k, filter=filter,
search_params=search_params, offset=offset, score_threshold=
score_threshold, consistency=consistency, **kwargs)
return list(map(itemgetter(0), results))
|
def similarity_search(self, query: str, k: int=4, filter: Optional[
MetadataFilter]=None, search_params: Optional[common_types.SearchParams
]=None, offset: int=0, score_threshold: Optional[float]=None,
consistency: Optional[common_types.ReadConsistency]=None, **kwargs: Any
) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k, filter=filter,
search_params=search_params, offset=offset, score_threshold=
score_threshold, consistency=consistency, **kwargs)
return list(map(itemgetter(0), results))
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents most similar to the query.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token',
'MOSAICML_API_TOKEN')
values['mosaicml_api_token'] = mosaicml_api_token
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token',
'MOSAICML_API_TOKEN')
values['mosaicml_api_token'] = mosaicml_api_token
return values
|
Validate that api key and python package exists in environment.
|
test_visit_comparison_range_lt
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'lt': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_range_lt() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'lt': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
custom_preprocess
|
return [self.preprocess_msg(m) for m in msg_list]
|
def custom_preprocess(self, msg_list: Sequence[BaseMessage]) ->List[Dict[
str, str]]:
return [self.preprocess_msg(m) for m in msg_list]
| null |
_create_tool_message
|
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(tool_call_id=agent_action.tool_call_id, content=content,
additional_kwargs={'name': agent_action.tool})
|
def _create_tool_message(agent_action: OpenAIToolAgentAction, observation: str
) ->ToolMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return ToolMessage(tool_call_id=agent_action.tool_call_id, content=
content, additional_kwargs={'name': agent_action.tool})
|
Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
|
test_redis_cache_multi
|
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN),
ttl=1)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update('foo', llm_string, [Generation(text='fizz'),
Generation(text='Buzz')])
output = llm.generate(['foo'])
expected_output = LLMResult(generations=[[Generation(text='fizz'),
Generation(text='Buzz')]], llm_output={})
assert output == expected_output
langchain.llm_cache.clear()
|
def test_redis_cache_multi() ->None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=
TOKEN), ttl=1)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
langchain.llm_cache.update('foo', llm_string, [Generation(text='fizz'),
Generation(text='Buzz')])
output = llm.generate(['foo'])
expected_output = LLMResult(generations=[[Generation(text='fizz'),
Generation(text='Buzz')]], llm_output={})
assert output == expected_output
langchain.llm_cache.clear()
| null |
reset_deanonymizer_mapping
|
"""Reset the deanonymizer mapping"""
self._deanonymizer_mapping = DeanonymizerMapping()
|
def reset_deanonymizer_mapping(self) ->None:
"""Reset the deanonymizer mapping"""
self._deanonymizer_mapping = DeanonymizerMapping()
|
Reset the deanonymizer mapping
|
gen_mock_zep_document
|
from zep_python.document import Document as ZepDocument
embedding = [random() for _ in range(embedding_dimensions)
] if embedding_dimensions else None
return ZepDocument(uuid=str(uuid4()), collection_name=collection_name,
content='Test Document', embedding=embedding, metadata={'key': 'value'})
|
def gen_mock_zep_document(collection_name: str, embedding_dimensions:
Optional[int]=None) ->'ZepDocument':
from zep_python.document import Document as ZepDocument
embedding = [random() for _ in range(embedding_dimensions)
] if embedding_dimensions else None
return ZepDocument(uuid=str(uuid4()), collection_name=collection_name,
content='Test Document', embedding=embedding, metadata={'key': 'value'}
)
| null |
_convert_message_to_dict
|
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
|
def _convert_message_to_dict(message: BaseMessage) ->dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
| null |
post
|
resp = self._post(self.api_url, request)
return transform_output_fn(resp) if transform_output_fn else resp
|
def post(self, request: Any, transform_output_fn: Optional[Callable[...,
str]]=None) ->Any:
resp = self._post(self.api_url, request)
return transform_output_fn(resp) if transform_output_fn else resp
| null |
_import_huggingface_hub
|
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
|
def _import_huggingface_hub() ->Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
| null |
test_tokenization
|
assert _get_token_ids_default_method('This is a test') == [1212, 318, 257, 1332
]
|
def test_tokenization(self) ->None:
assert _get_token_ids_default_method('This is a test') == [1212, 318,
257, 1332]
| null |
test_model_param
|
"""Test model params works."""
chat = QianfanChatEndpoint()
response = chat(model='BLOOMZ-7B', messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_model_param() ->None:
"""Test model params works."""
chat = QianfanChatEndpoint()
response = chat(model='BLOOMZ-7B', messages=[HumanMessage(content='Hello')]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test model params works.
|
memory_variables
|
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
@property
def memory_variables(self) ->List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
Will always return list of memory variables.
:meta private:
|
assign_name
|
"""Assign name to the run."""
if values.get('name') is None:
if 'name' in values['serialized']:
values['name'] = values['serialized']['name']
elif 'id' in values['serialized']:
values['name'] = values['serialized']['id'][-1]
if values.get('events') is None:
values['events'] = []
return values
|
@root_validator(pre=True)
def assign_name(cls, values: dict) ->dict:
"""Assign name to the run."""
if values.get('name') is None:
if 'name' in values['serialized']:
values['name'] = values['serialized']['name']
elif 'id' in values['serialized']:
values['name'] = values['serialized']['id'][-1]
if values.get('events') is None:
values['events'] = []
return values
|
Assign name to the run.
|
test_init_with_pipeline_fn
|
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(model_load_fn=load_pipeline, hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_init_with_pipeline_fn() ->None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(model_load_fn=load_pipeline, hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test initialization with a self-hosted HF pipeline.
|
_load_package_modules
|
"""Recursively load modules of a package based on the file system.
Traversal based on the file system makes it easy to determine which
of the modules/packages are part of the package vs. 3rd party or built-in.
Parameters:
package_directory: Path to the package directory.
submodule: Optional name of submodule to load.
Returns:
list: A list of loaded module objects.
"""
package_path = Path(package_directory) if isinstance(package_directory, str
) else package_directory
modules_by_namespace = {}
package_name = package_path.name
if submodule is not None:
package_path = package_path / submodule
for file_path in package_path.rglob('*.py'):
if file_path.name.startswith('_'):
continue
relative_module_name = file_path.relative_to(package_path)
if any(part.startswith('_') for part in relative_module_name.parts):
continue
namespace = str(relative_module_name).replace('.py', '').replace('/', '.')
top_namespace = namespace.split('.')[0]
try:
if submodule is not None:
module_members = _load_module_members(
f'{package_name}.{submodule}.{namespace}',
f'{submodule}.{namespace}')
else:
module_members = _load_module_members(f'{package_name}.{namespace}'
, namespace)
if top_namespace in modules_by_namespace:
existing_module_members = modules_by_namespace[top_namespace]
_module_members = _merge_module_members([
existing_module_members, module_members])
else:
_module_members = module_members
modules_by_namespace[top_namespace] = _module_members
except ImportError as e:
print(f"Error: Unable to import module '{namespace}' with error: {e}")
return modules_by_namespace
|
def _load_package_modules(package_directory: Union[str, Path], submodule:
Optional[str]=None) ->Dict[str, ModuleMembers]:
"""Recursively load modules of a package based on the file system.
Traversal based on the file system makes it easy to determine which
of the modules/packages are part of the package vs. 3rd party or built-in.
Parameters:
package_directory: Path to the package directory.
submodule: Optional name of submodule to load.
Returns:
list: A list of loaded module objects.
"""
package_path = Path(package_directory) if isinstance(package_directory, str
) else package_directory
modules_by_namespace = {}
package_name = package_path.name
if submodule is not None:
package_path = package_path / submodule
for file_path in package_path.rglob('*.py'):
if file_path.name.startswith('_'):
continue
relative_module_name = file_path.relative_to(package_path)
if any(part.startswith('_') for part in relative_module_name.parts):
continue
namespace = str(relative_module_name).replace('.py', '').replace('/',
'.')
top_namespace = namespace.split('.')[0]
try:
if submodule is not None:
module_members = _load_module_members(
f'{package_name}.{submodule}.{namespace}',
f'{submodule}.{namespace}')
else:
module_members = _load_module_members(
f'{package_name}.{namespace}', namespace)
if top_namespace in modules_by_namespace:
existing_module_members = modules_by_namespace[top_namespace]
_module_members = _merge_module_members([
existing_module_members, module_members])
else:
_module_members = module_members
modules_by_namespace[top_namespace] = _module_members
except ImportError as e:
print(
f"Error: Unable to import module '{namespace}' with error: {e}"
)
return modules_by_namespace
|
Recursively load modules of a package based on the file system.
Traversal based on the file system makes it easy to determine which
of the modules/packages are part of the package vs. 3rd party or built-in.
Parameters:
package_directory: Path to the package directory.
submodule: Optional name of submodule to load.
Returns:
list: A list of loaded module objects.
|
dependable_usearch_import
|
"""
Import usearch if available, otherwise raise error.
"""
try:
import usearch.index
except ImportError:
raise ImportError(
'Could not import usearch python package. Please install it with `pip install usearch` '
)
return usearch.index
|
def dependable_usearch_import() ->Any:
"""
Import usearch if available, otherwise raise error.
"""
try:
import usearch.index
except ImportError:
raise ImportError(
'Could not import usearch python package. Please install it with `pip install usearch` '
)
return usearch.index
|
Import usearch if available, otherwise raise error.
|
test_from_texts_with_metadatas_inner_product
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
|
def test_from_texts_with_metadatas_inner_product(self,
azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
| null |
_generate_embeddings
|
"""Generate embeddings using the Embaas API."""
payload = self._generate_payload(texts)
try:
return self._handle_request(payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(f'Error raised by embaas embeddings API: {e}')
parsed_response = e.response.json()
if 'message' in parsed_response:
raise ValueError(
f"Validation Error raised by embaas embeddings API:{parsed_response['message']}"
)
raise
|
def _generate_embeddings(self, texts: List[str]) ->List[List[float]]:
"""Generate embeddings using the Embaas API."""
payload = self._generate_payload(texts)
try:
return self._handle_request(payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(f'Error raised by embaas embeddings API: {e}')
parsed_response = e.response.json()
if 'message' in parsed_response:
raise ValueError(
f"Validation Error raised by embaas embeddings API:{parsed_response['message']}"
)
raise
|
Generate embeddings using the Embaas API.
|
embed_documents
|
"""Embed documents using a Bookend deployed embeddings model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
result = []
headers = self.auth_header
headers['Content-Type'] = 'application/json; charset=utf-8'
params = {'model_id': self.model_id, 'task': DEFAULT_TASK}
for text in texts:
data = json.dumps({'text': text, 'question': None, 'context': None,
'instruction': None})
r = requests.request('POST', API_URL + self.domain + PATH, headers=
headers, params=params, data=data)
result.append(r.json()[0]['data'])
return result
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed documents using a Bookend deployed embeddings model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
result = []
headers = self.auth_header
headers['Content-Type'] = 'application/json; charset=utf-8'
params = {'model_id': self.model_id, 'task': DEFAULT_TASK}
for text in texts:
data = json.dumps({'text': text, 'question': None, 'context': None,
'instruction': None})
r = requests.request('POST', API_URL + self.domain + PATH, headers=
headers, params=params, data=data)
result.append(r.json()[0]['data'])
return result
|
Embed documents using a Bookend deployed embeddings model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
update
|
"""Update the documents which have the specified ids.
Args:
ids: The id list of the updating embedding vector.
texts: The texts of the updating documents.
metadatas: The metadatas of the updating documents.
Returns:
the ids of the updated documents.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
return self.awadb_client.UpdateTexts(ids=ids, text_field_name=
'embedding_text', texts=texts, metadatas=metadatas)
|
def update(self, ids: List[str], texts: Iterable[str], metadatas: Optional[
List[dict]]=None, **kwargs: Any) ->List[str]:
"""Update the documents which have the specified ids.
Args:
ids: The id list of the updating embedding vector.
texts: The texts of the updating documents.
metadatas: The metadatas of the updating documents.
Returns:
the ids of the updated documents.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
return self.awadb_client.UpdateTexts(ids=ids, text_field_name=
'embedding_text', texts=texts, metadatas=metadatas)
|
Update the documents which have the specified ids.
Args:
ids: The id list of the updating embedding vector.
texts: The texts of the updating documents.
metadatas: The metadatas of the updating documents.
Returns:
the ids of the updated documents.
|
_run
|
"""Run the tool."""
query = self.api_resource.users().messages().get(userId='me', format='raw',
id=message_id)
message_data = query.execute()
raw_message = base64.urlsafe_b64decode(message_data['raw'])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg['Subject']
sender = email_msg['From']
message_body = ''
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
if ctype == 'text/plain' and 'attachment' not in cdispo:
message_body = part.get_payload(decode=True).decode('utf-8')
break
else:
message_body = email_msg.get_payload(decode=True).decode('utf-8')
body = clean_email_body(message_body)
return {'id': message_id, 'threadId': message_data['threadId'], 'snippet':
message_data['snippet'], 'body': body, 'subject': subject, 'sender': sender
}
|
def _run(self, message_id: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->Dict:
"""Run the tool."""
query = self.api_resource.users().messages().get(userId='me', format=
'raw', id=message_id)
message_data = query.execute()
raw_message = base64.urlsafe_b64decode(message_data['raw'])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg['Subject']
sender = email_msg['From']
message_body = ''
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
if ctype == 'text/plain' and 'attachment' not in cdispo:
message_body = part.get_payload(decode=True).decode('utf-8')
break
else:
message_body = email_msg.get_payload(decode=True).decode('utf-8')
body = clean_email_body(message_body)
return {'id': message_id, 'threadId': message_data['threadId'],
'snippet': message_data['snippet'], 'body': body, 'subject':
subject, 'sender': sender}
|
Run the tool.
|
plan
|
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
|
@abstractmethod
def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks:
Callbacks=None, **kwargs: Any) ->Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
|
Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
|
delete
|
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
delete_all: Delete all records in the table.
"""
if delete_all:
self._delete_all()
self.wait_for_indexing(ndocs=0)
elif ids is not None:
chunk_size = 500
for i in range(0, len(ids), chunk_size):
chunk = ids[i:i + chunk_size]
operations = [{'delete': {'table': self._table_name, 'id': id}} for
id in chunk]
self._client.records().transaction(payload={'operations': operations})
else:
raise ValueError('Either ids or delete_all must be set.')
|
def delete(self, ids: Optional[List[str]]=None, delete_all: Optional[bool]=
None, **kwargs: Any) ->None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
delete_all: Delete all records in the table.
"""
if delete_all:
self._delete_all()
self.wait_for_indexing(ndocs=0)
elif ids is not None:
chunk_size = 500
for i in range(0, len(ids), chunk_size):
chunk = ids[i:i + chunk_size]
operations = [{'delete': {'table': self._table_name, 'id': id}} for
id in chunk]
self._client.records().transaction(payload={'operations':
operations})
else:
raise ValueError('Either ids or delete_all must be set.')
|
Delete by vector IDs.
Args:
ids: List of ids to delete.
delete_all: Delete all records in the table.
|
load
|
"""Transcribes the audio file and loads the transcript into documents.
It uses the AssemblyAI API to transcribe the audio file and blocks until
the transcription is finished.
"""
transcript = self.transcriber.transcribe(self.file_path)
if transcript.error:
raise ValueError(f'Could not transcribe file: {transcript.error}')
if self.transcript_format == TranscriptFormat.TEXT:
return [Document(page_content=transcript.text, metadata=transcript.
json_response)]
elif self.transcript_format == TranscriptFormat.SENTENCES:
sentences = transcript.get_sentences()
return [Document(page_content=s.text, metadata=s.dict(exclude={'text'})
) for s in sentences]
elif self.transcript_format == TranscriptFormat.PARAGRAPHS:
paragraphs = transcript.get_paragraphs()
return [Document(page_content=p.text, metadata=p.dict(exclude={'text'})
) for p in paragraphs]
elif self.transcript_format == TranscriptFormat.SUBTITLES_SRT:
return [Document(page_content=transcript.export_subtitles_srt())]
elif self.transcript_format == TranscriptFormat.SUBTITLES_VTT:
return [Document(page_content=transcript.export_subtitles_vtt())]
else:
raise ValueError('Unknown transcript format.')
|
def load(self) ->List[Document]:
"""Transcribes the audio file and loads the transcript into documents.
It uses the AssemblyAI API to transcribe the audio file and blocks until
the transcription is finished.
"""
transcript = self.transcriber.transcribe(self.file_path)
if transcript.error:
raise ValueError(f'Could not transcribe file: {transcript.error}')
if self.transcript_format == TranscriptFormat.TEXT:
return [Document(page_content=transcript.text, metadata=transcript.
json_response)]
elif self.transcript_format == TranscriptFormat.SENTENCES:
sentences = transcript.get_sentences()
return [Document(page_content=s.text, metadata=s.dict(exclude={
'text'})) for s in sentences]
elif self.transcript_format == TranscriptFormat.PARAGRAPHS:
paragraphs = transcript.get_paragraphs()
return [Document(page_content=p.text, metadata=p.dict(exclude={
'text'})) for p in paragraphs]
elif self.transcript_format == TranscriptFormat.SUBTITLES_SRT:
return [Document(page_content=transcript.export_subtitles_srt())]
elif self.transcript_format == TranscriptFormat.SUBTITLES_VTT:
return [Document(page_content=transcript.export_subtitles_vtt())]
else:
raise ValueError('Unknown transcript format.')
|
Transcribes the audio file and loads the transcript into documents.
It uses the AssemblyAI API to transcribe the audio file and blocks until
the transcription is finished.
|
get_input_schema
|
if all(s.get_input_schema(config).schema().get('type', 'object') ==
'object' for s in self.steps.values()):
return create_model(self.get_name('Input'), **{k: (v.annotation, v.
default) for step in self.steps.values() for k, v in step.
get_input_schema(config).__fields__.items() if k != '__root__'},
__config__=_SchemaConfig)
return super().get_input_schema(config)
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
if all(s.get_input_schema(config).schema().get('type', 'object') ==
'object' for s in self.steps.values()):
return create_model(self.get_name('Input'), **{k: (v.annotation, v.
default) for step in self.steps.values() for k, v in step.
get_input_schema(config).__fields__.items() if k != '__root__'},
__config__=_SchemaConfig)
return super().get_input_schema(config)
| null |
test_cassandra
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = _vectorstore_from_texts(texts)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_cassandra() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = _vectorstore_from_texts(texts)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
load_qa_chain
|
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {'stuff': _load_stuff_chain,
'map_reduce': _load_map_reduce_chain, 'refine': _load_refine_chain,
'map_rerank': _load_map_rerank_chain}
if chain_type not in loader_mapping:
raise ValueError(
f'Got unsupported chain type: {chain_type}. Should be one of {loader_mapping.keys()}'
)
return loader_mapping[chain_type](llm, verbose=verbose, callback_manager=
callback_manager, **kwargs)
|
def load_qa_chain(llm: BaseLanguageModel, chain_type: str='stuff', verbose:
Optional[bool]=None, callback_manager: Optional[BaseCallbackManager]=
None, **kwargs: Any) ->BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
Returns:
A chain to use for question answering.
"""
loader_mapping: Mapping[str, LoadingCallable] = {'stuff':
_load_stuff_chain, 'map_reduce': _load_map_reduce_chain, 'refine':
_load_refine_chain, 'map_rerank': _load_map_rerank_chain}
if chain_type not in loader_mapping:
raise ValueError(
f'Got unsupported chain type: {chain_type}. Should be one of {loader_mapping.keys()}'
)
return loader_mapping[chain_type](llm, verbose=verbose,
callback_manager=callback_manager, **kwargs)
|
Load question answering chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "map_rerank", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
callback_manager: Callback manager to use for the chain.
Returns:
A chain to use for question answering.
|
results
|
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get('results')
|
@property
def results(self) ->Any:
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get('results')
|
Silence mypy for accessing this field.
:meta private:
|
get_input_schema
|
super_schema = super().get_input_schema(config)
if super_schema.__custom_root_type__ is not None:
from langchain_core.messages import BaseMessage
fields: Dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = Union[str, BaseMessage, Sequence[
BaseMessage]], ...
elif self.input_messages_key:
fields[self.input_messages_key] = Sequence[BaseMessage], ...
else:
fields['__root__'] = Sequence[BaseMessage], ...
return create_model('RunnableWithChatHistoryInput', **fields)
else:
return super_schema
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
super_schema = super().get_input_schema(config)
if super_schema.__custom_root_type__ is not None:
from langchain_core.messages import BaseMessage
fields: Dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = Union[str, BaseMessage,
Sequence[BaseMessage]], ...
elif self.input_messages_key:
fields[self.input_messages_key] = Sequence[BaseMessage], ...
else:
fields['__root__'] = Sequence[BaseMessage], ...
return create_model('RunnableWithChatHistoryInput', **fields)
else:
return super_schema
| null |
embed_query
|
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
|
def embed_query(self, text: str) ->List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.llm_chain.input_keys[0]
result = self.llm_chain.generate([{var_name: text}])
documents = [generation.text for generation in result.generations[0]]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
|
Generate a hypothetical document and embedded it.
|
test_singlestoredb_filter_metadata_3
|
"""Test filtering by two metadata fields"""
table_name = 'test_singlestoredb_filter_metadata_3'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget'
}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'index': 1})
assert output == [Document(page_content='bar', metadata={'index': 1,
'category': 'budget'})]
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_filter_metadata_3(texts: List[str]) ->None:
"""Test filtering by two metadata fields"""
table_name = 'test_singlestoredb_filter_metadata_3'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category':
'budget'}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'index': 1})
assert output == [Document(page_content='bar', metadata={'index': 1,
'category': 'budget'})]
drop(table_name)
|
Test filtering by two metadata fields
|
test_embed_documents_quality
|
"""Smoke test embedding quality by comparing similar and dissimilar documents."""
model = GoogleGenerativeAIEmbeddings(model=_MODEL)
similar_docs = ['Document A', 'Similar Document A']
dissimilar_docs = ['Document A', 'Completely Different Zebra']
similar_embeddings = model.embed_documents(similar_docs)
dissimilar_embeddings = model.embed_documents(dissimilar_docs)
similar_distance = np.linalg.norm(np.array(similar_embeddings[0]) - np.
array(similar_embeddings[1]))
dissimilar_distance = np.linalg.norm(np.array(dissimilar_embeddings[0]) -
np.array(dissimilar_embeddings[1]))
assert similar_distance < dissimilar_distance
|
def test_embed_documents_quality() ->None:
"""Smoke test embedding quality by comparing similar and dissimilar documents."""
model = GoogleGenerativeAIEmbeddings(model=_MODEL)
similar_docs = ['Document A', 'Similar Document A']
dissimilar_docs = ['Document A', 'Completely Different Zebra']
similar_embeddings = model.embed_documents(similar_docs)
dissimilar_embeddings = model.embed_documents(dissimilar_docs)
similar_distance = np.linalg.norm(np.array(similar_embeddings[0]) - np.
array(similar_embeddings[1]))
dissimilar_distance = np.linalg.norm(np.array(dissimilar_embeddings[0]) -
np.array(dissimilar_embeddings[1]))
assert similar_distance < dissimilar_distance
|
Smoke test embedding quality by comparing similar and dissimilar documents.
|
_get_default_output_parser
|
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
|
@classmethod
def _get_default_output_parser(cls, llm: Optional[BaseLanguageModel]=None,
**kwargs: Any) ->AgentOutputParser:
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
| null |
__init__
|
if not isinstance(urls, list):
raise TypeError('urls must be a list')
self.urls = urls
self.save_dir = save_dir
|
def __init__(self, urls: List[str], save_dir: str):
if not isinstance(urls, list):
raise TypeError('urls must be a list')
self.urls = urls
self.save_dir = save_dir
| null |
test_get_relevant_documents_with_score
|
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(client=client, index_name=
f'LangChain_{uuid4().hex}', text_key='text', attributes=['page'])
for i, text in enumerate(texts):
retriever.add_documents([Document(page_content=text, metadata=metadatas
[i])])
output = retriever.get_relevant_documents('foo', score=True)
for doc in output:
assert '_additional' in doc.metadata
|
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents_with_score(self, weaviate_url: str) ->None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(client=client, index_name=
f'LangChain_{uuid4().hex}', text_key='text', attributes=['page'])
for i, text in enumerate(texts):
retriever.add_documents([Document(page_content=text, metadata=
metadatas[i])])
output = retriever.get_relevant_documents('foo', score=True)
for doc in output:
assert '_additional' in doc.metadata
|
Test end to end construction and MRR search.
|
__getattr__
|
"""Get attr name."""
if name == 'create_csv_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
def __getattr__(name: str) ->Any:
"""Get attr name."""
if name == 'create_csv_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
Get attr name.
|
delete_collection
|
"""
Just an alias for `clear`
(to better align with other VectorStore implementations).
"""
self.clear()
|
def delete_collection(self) ->None:
"""
Just an alias for `clear`
(to better align with other VectorStore implementations).
"""
self.clear()
|
Just an alias for `clear`
(to better align with other VectorStore implementations).
|
_infer_embedding_dimension
|
"""Infer the embedding dimension from the embedding function."""
assert self.embeddings is not None, 'embedding model is required.'
return len(self.embeddings.embed_query('test'))
|
def _infer_embedding_dimension(self) ->int:
"""Infer the embedding dimension from the embedding function."""
assert self.embeddings is not None, 'embedding model is required.'
return len(self.embeddings.embed_query('test'))
|
Infer the embedding dimension from the embedding function.
|
_import_astradb
|
from langchain_community.vectorstores.astradb import AstraDB
return AstraDB
|
def _import_astradb() ->Any:
from langchain_community.vectorstores.astradb import AstraDB
return AstraDB
| null |
_display_messages
|
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(message, default_flow_style=False, sort_keys=
False, allow_unicode=True, width=10000, line_break=None)
print('\n', '======= start of message =======', '\n\n')
print(yaml_string)
print('======= end of message =======', '\n\n')
|
def _display_messages(messages: List[BaseMessage]) ->None:
dict_messages = messages_to_dict(messages)
for message in dict_messages:
yaml_string = yaml.dump(message, default_flow_style=False,
sort_keys=False, allow_unicode=True, width=10000, line_break=None)
print('\n', '======= start of message =======', '\n\n')
print(yaml_string)
print('======= end of message =======', '\n\n')
| null |
test_sim_search_with_score_for_ip_metric
|
"""
Test end to end construction and similarity search with score for ip
(inner-product) metric.
"""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric='ip')
output = hnsw_vec_store.similarity_search_with_score('foo', k=3)
assert len(output) == 3
for result in output:
assert result[1] == -8.0
|
def test_sim_search_with_score_for_ip_metric(texts: List[str], tmp_path: Path
) ->None:
"""
Test end to end construction and similarity search with score for ip
(inner-product) metric.
"""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric='ip')
output = hnsw_vec_store.similarity_search_with_score('foo', k=3)
assert len(output) == 3
for result in output:
assert result[1] == -8.0
|
Test end to end construction and similarity search with score for ip
(inner-product) metric.
|
clear
|
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ''
|
def clear(self) ->None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ''
|
Clear memory contents.
|
generate_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) ->Any:
resp = llm.client.call(**_kwargs)
return check_response(resp)
return _generate_with_retry(**kwargs)
|
def generate_with_retry(llm: Tongyi, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) ->Any:
resp = llm.client.call(**_kwargs)
return check_response(resp)
return _generate_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.