method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
test_max_chunks
|
documents = [f'text-{i}' for i in range(20)]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 20
|
def test_max_chunks() ->None:
documents = [f'text-{i}' for i in range(20)]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 20
| null |
login
|
"""
login to jaguardb server with a jaguar_api_key or let self._jag find a key
Args:
pod (str): name of a Pod
store (str): name of a vector store
optional jaguar_api_key (str): API key of user to jaguardb server
Returns:
True if successful; False if not successful
"""
if jaguar_api_key == '':
jaguar_api_key = self._jag.getApiKey()
self._jaguar_api_key = jaguar_api_key
self._token = self._jag.login(jaguar_api_key)
if self._token == '':
logger.error('E0001 error init(): invalid jaguar_api_key')
return False
return True
|
def login(self, jaguar_api_key: Optional[str]='') ->bool:
"""
login to jaguardb server with a jaguar_api_key or let self._jag find a key
Args:
pod (str): name of a Pod
store (str): name of a vector store
optional jaguar_api_key (str): API key of user to jaguardb server
Returns:
True if successful; False if not successful
"""
if jaguar_api_key == '':
jaguar_api_key = self._jag.getApiKey()
self._jaguar_api_key = jaguar_api_key
self._token = self._jag.login(jaguar_api_key)
if self._token == '':
logger.error('E0001 error init(): invalid jaguar_api_key')
return False
return True
|
login to jaguardb server with a jaguar_api_key or let self._jag find a key
Args:
pod (str): name of a Pod
store (str): name of a vector store
optional jaguar_api_key (str): API key of user to jaguardb server
Returns:
True if successful; False if not successful
|
lazy_load
|
"""Lazy load the chat sessions."""
|
@abstractmethod
def lazy_load(self) ->Iterator[ChatSession]:
"""Lazy load the chat sessions."""
|
Lazy load the chat sessions.
|
test_extract_html
|
html2text_transformer = Html2TextTransformer()
paragraphs_html = (
'<html>Begin of html tag<h1>Header</h1><p>First paragraph.</p>Middle of html tag<p>Second paragraph.</p>End of html tag</html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """Begin of html tag
# Header
First paragraph.
Middle of html tag
Second paragraph.
End of html tag
"""
|
@pytest.mark.requires('html2text')
def test_extract_html() ->None:
html2text_transformer = Html2TextTransformer()
paragraphs_html = (
'<html>Begin of html tag<h1>Header</h1><p>First paragraph.</p>Middle of html tag<p>Second paragraph.</p>End of html tag</html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = html2text_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == """Begin of html tag
# Header
First paragraph.
Middle of html tag
Second paragraph.
End of html tag
"""
| null |
test_api_key_is_string
|
llm = ChatFireworks(fireworks_api_key='secret-api-key')
assert isinstance(llm.fireworks_api_key, SecretStr)
|
@pytest.mark.requires('fireworks')
def test_api_key_is_string() ->None:
llm = ChatFireworks(fireworks_api_key='secret-api-key')
assert isinstance(llm.fireworks_api_key, SecretStr)
| null |
_import_anyscale
|
from langchain_community.llms.anyscale import Anyscale
return Anyscale
|
def _import_anyscale() ->Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
| null |
_scopes
|
"""Return required scopes."""
return ['Notes.Read']
|
@property
def _scopes(self) ->List[str]:
"""Return required scopes."""
return ['Notes.Read']
|
Return required scopes.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k,
lambda_mult, filter, namespace)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, filter: Optional[dict]=None, namespace:
Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding, k,
fetch_k, lambda_mult, filter, namespace)
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
_get_functions_from_llm_run
|
"""
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
"""
if llm_run.run_type != 'llm':
raise ValueError(f'Expected run of type llm. Got: {llm_run.run_type}')
return (llm_run.extra or {}).get('invocation_params', {}).get('functions')
|
@staticmethod
def _get_functions_from_llm_run(llm_run: 'Run') ->Optional[List[Dict]]:
"""
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
"""
if llm_run.run_type != 'llm':
raise ValueError(f'Expected run of type llm. Got: {llm_run.run_type}')
return (llm_run.extra or {}).get('invocation_params', {}).get('functions')
|
Extract functions from a LangSmith LLM run if they exist.
:param llm_run: The LLM run object.
:return: Functions from the run or None.
|
delete_texts
|
"""Delete a list of docs from the Rockset collection"""
try:
from rockset.models import DeleteDocumentsRequestData
except ImportError:
raise ImportError(
'Could not import rockset client python package. Please install it with `pip install rockset`.'
)
self._client.Documents.delete_documents(collection=self._collection_name,
data=[DeleteDocumentsRequestData(id=i) for i in ids], workspace=self.
_workspace)
|
def delete_texts(self, ids: List[str]) ->None:
"""Delete a list of docs from the Rockset collection"""
try:
from rockset.models import DeleteDocumentsRequestData
except ImportError:
raise ImportError(
'Could not import rockset client python package. Please install it with `pip install rockset`.'
)
self._client.Documents.delete_documents(collection=self.
_collection_name, data=[DeleteDocumentsRequestData(id=i) for i in
ids], workspace=self._workspace)
|
Delete a list of docs from the Rockset collection
|
_llm_type
|
"""Return type of chat model."""
return 'mlflow-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'mlflow-chat'
|
Return type of chat model.
|
validate_environment
|
"""Ensure that the API key and python package exist in environment."""
values['octoai_api_token'] = get_from_dict_or_env(values,
'octoai_api_token', 'OCTOAI_API_TOKEN')
values['endpoint_url'] = get_from_dict_or_env(values, 'endpoint_url',
'ENDPOINT_URL')
return values
|
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Ensure that the API key and python package exist in environment."""
values['octoai_api_token'] = get_from_dict_or_env(values,
'octoai_api_token', 'OCTOAI_API_TOKEN')
values['endpoint_url'] = get_from_dict_or_env(values, 'endpoint_url',
'ENDPOINT_URL')
return values
|
Ensure that the API key and python package exist in environment.
|
embed_query
|
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
_get_elements
|
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
| null |
semantic_hybrid_search_with_score
|
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(query,
k=k, filters=kwargs.get('filters', None))
return [(doc, score) for doc, score, _ in docs_and_scores]
|
def semantic_hybrid_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(query,
k=k, filters=kwargs.get('filters', None))
return [(doc, score) for doc, score, _ in docs_and_scores]
|
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
|
messages
|
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode('utf-8')) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the messages from Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m.decode('utf-8')) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
|
Retrieve the messages from Redis
|
test_json_distance_evaluator_evaluate_strings_list_diff
|
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}, {"a": 2, "b": 4}]'
result = json_distance_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
pytest.approx(1 / len(reference.replace(' ', '')), result['score'])
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_evaluate_strings_list_diff(
json_distance_evaluator: JsonEditDistanceEvaluator) ->None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}, {"a": 2, "b": 4}]'
result = json_distance_evaluator._evaluate_strings(prediction=
prediction, reference=reference)
pytest.approx(1 / len(reference.replace(' ', '')), result['score'])
| null |
generate
|
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve. Defaults to 3.
(Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
response = self._make_request(method='post', route=ArceeRoute.generate.
value, body=self._make_request_body_for_models(prompt=prompt, **kwargs))
return response['text']
|
def generate(self, prompt: str, **kwargs: Any) ->str:
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve. Defaults to 3.
(Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
response = self._make_request(method='post', route=ArceeRoute.generate.
value, body=self._make_request_body_for_models(prompt=prompt, **kwargs)
)
return response['text']
|
Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve. Defaults to 3.
(Can be less if filters are provided).
filters: Filters to apply to the context dataset.
|
similarity_search_by_vector
|
"""Perform retrieval directly using vectors.
Args:
embedding: vectors.
k: top n.
search_filter: Additional filtering conditions.
Returns:
document_list: List of documents.
"""
return self.create_results(self.inner_embedding_query(embedding=embedding,
search_filter=search_filter, k=k))
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
search_filter: Optional[dict]=None, **kwargs: Any) ->List[Document]:
"""Perform retrieval directly using vectors.
Args:
embedding: vectors.
k: top n.
search_filter: Additional filtering conditions.
Returns:
document_list: List of documents.
"""
return self.create_results(self.inner_embedding_query(embedding=
embedding, search_filter=search_filter, k=k))
|
Perform retrieval directly using vectors.
Args:
embedding: vectors.
k: top n.
search_filter: Additional filtering conditions.
Returns:
document_list: List of documents.
|
_import_zilliz
|
from langchain_community.vectorstores.zilliz import Zilliz
return Zilliz
|
def _import_zilliz() ->Any:
from langchain_community.vectorstores.zilliz import Zilliz
return Zilliz
| null |
load
|
"""Load docs."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load docs."""
return list(self.lazy_load())
|
Load docs.
|
__radd__
|
chunk = AddableDict(other)
for key in self:
if key not in chunk or chunk[key] is None:
chunk[key] = self[key]
elif self[key] is not None:
try:
added = chunk[key] + self[key]
except TypeError:
added = self[key]
chunk[key] = added
return chunk
|
def __radd__(self, other: AddableDict) ->AddableDict:
chunk = AddableDict(other)
for key in self:
if key not in chunk or chunk[key] is None:
chunk[key] = self[key]
elif self[key] is not None:
try:
added = chunk[key] + self[key]
except TypeError:
added = self[key]
chunk[key] = added
return chunk
| null |
__init__
|
if atransform is not None:
self._atransform = atransform
func_for_name: Callable = atransform
if inspect.isasyncgenfunction(transform):
self._atransform = transform
func_for_name = transform
elif inspect.isgeneratorfunction(transform):
self._transform = transform
func_for_name = transform
else:
raise TypeError(
f'Expected a generator function type for `transform`.Instead got an unsupported type: {type(transform)}'
)
try:
self.name = func_for_name.__name__
except AttributeError:
pass
|
def __init__(self, transform: Union[Callable[[Iterator[Input]], Iterator[
Output]], Callable[[AsyncIterator[Input]], AsyncIterator[Output]]],
atransform: Optional[Callable[[AsyncIterator[Input]], AsyncIterator[
Output]]]=None) ->None:
if atransform is not None:
self._atransform = atransform
func_for_name: Callable = atransform
if inspect.isasyncgenfunction(transform):
self._atransform = transform
func_for_name = transform
elif inspect.isgeneratorfunction(transform):
self._transform = transform
func_for_name = transform
else:
raise TypeError(
f'Expected a generator function type for `transform`.Instead got an unsupported type: {type(transform)}'
)
try:
self.name = func_for_name.__name__
except AttributeError:
pass
| null |
teardown
|
bigquery.Client(location='US').delete_dataset(TestBigQueryVectorStore.
dataset_name, delete_contents=True, not_found_ok=True)
|
def teardown() ->None:
bigquery.Client(location='US').delete_dataset(TestBigQueryVectorStore.
dataset_name, delete_contents=True, not_found_ok=True)
| null |
check_libcublas
|
if not is_libcublas_available():
pytest.skip(reason='libcublas.so is not available')
yield
|
@pytest.fixture(scope='module', autouse=True)
def check_libcublas() ->Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason='libcublas.so is not available')
yield
| null |
config_specs
|
return self.mapper.config_specs
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
return self.mapper.config_specs
| null |
_default_params
|
"""Get the default parameters for calling Hunyuan API."""
normal_params = {'app_id': self.hunyuan_app_id, 'secret_id': self.
hunyuan_secret_id, 'temperature': self.temperature, 'top_p': self.top_p}
if self.query_id is not None:
normal_params['query_id'] = self.query_id
return {**normal_params, **self.model_kwargs}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Hunyuan API."""
normal_params = {'app_id': self.hunyuan_app_id, 'secret_id': self.
hunyuan_secret_id, 'temperature': self.temperature, 'top_p': self.top_p
}
if self.query_id is not None:
normal_params['query_id'] = self.query_id
return {**normal_params, **self.model_kwargs}
|
Get the default parameters for calling Hunyuan API.
|
test_max_marginal_relevance_search_by_vector
|
"""Test end to end construction and MRR search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
foo_embedding = embedding_openai.embed_query('foo')
standard_ranking = docsearch.similarity_search('foo', k=2)
output = docsearch.max_marginal_relevance_search_by_vector(foo_embedding, k
=2, fetch_k=3, lambda_mult=1.0)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search_by_vector(foo_embedding, k
=2, fetch_k=3, lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})]
|
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search_by_vector(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and MRR search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
foo_embedding = embedding_openai.embed_query('foo')
standard_ranking = docsearch.similarity_search('foo', k=2)
output = docsearch.max_marginal_relevance_search_by_vector(foo_embedding,
k=2, fetch_k=3, lambda_mult=1.0)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search_by_vector(foo_embedding,
k=2, fetch_k=3, lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})]
|
Test end to end construction and MRR search by vector.
|
add_texts
|
"""Add text to the Vectara vectorstore.
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.vectorstore.add_texts(texts, metadatas, doc_metadata or {})
|
def add_texts(self, texts: List[str], metadatas: Optional[List[dict]]=None,
doc_metadata: Optional[dict]=None) ->None:
"""Add text to the Vectara vectorstore.
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.vectorstore.add_texts(texts, metadatas, doc_metadata or {})
|
Add text to the Vectara vectorstore.
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
|
load
|
"""Load documents."""
paths = list(Path(self.file_path).glob('**/*.md'))
docs = []
for path in paths:
with open(path, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
tags = self._parse_document_tags(text)
dataview_fields = self._parse_dataview_fields(text)
text = self._remove_front_matter(text)
metadata = {'source': str(path.name), 'path': str(path), 'created':
path.stat().st_ctime, 'last_modified': path.stat().st_mtime,
'last_accessed': path.stat().st_atime, **self.
_to_langchain_compatible_metadata(front_matter), **dataview_fields}
if tags or front_matter.get('tags'):
metadata['tags'] = ','.join(tags | set(front_matter.get('tags', []) or
[]))
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
def load(self) ->List[Document]:
"""Load documents."""
paths = list(Path(self.file_path).glob('**/*.md'))
docs = []
for path in paths:
with open(path, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
tags = self._parse_document_tags(text)
dataview_fields = self._parse_dataview_fields(text)
text = self._remove_front_matter(text)
metadata = {'source': str(path.name), 'path': str(path), 'created':
path.stat().st_ctime, 'last_modified': path.stat().st_mtime,
'last_accessed': path.stat().st_atime, **self.
_to_langchain_compatible_metadata(front_matter), **dataview_fields}
if tags or front_matter.get('tags'):
metadata['tags'] = ','.join(tags | set(front_matter.get('tags',
[]) or []))
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
Load documents.
|
test_redis_semantic_cache_hit
|
set_llm_cache(RedisSemanticCache(embedding=embedding, redis_url=REDIS_TEST_URL)
)
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [[Generation(text=generation, generation_info=params) for
generation in prompt_i_generations] for prompt_i_generations in generations
]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
print(prompt_i)
print(llm_generations_i)
get_llm_cache().update(prompt_i, llm_string, llm_generations_i)
llm.generate(prompts)
assert llm.generate(prompts) == LLMResult(generations=llm_generations,
llm_output={})
|
@pytest.mark.parametrize('embedding', [ConsistentFakeEmbeddings()])
@pytest.mark.parametrize('prompts, generations', [([random_string()], [[
random_string()]]), ([random_string()], [[random_string(),
random_string()]]), ([random_string()], [[random_string(),
random_string(), random_string()]]), ([random_string(), random_string()
], [[random_string()], [random_string(), random_string()]])], ids=[
'single_prompt_single_generation', 'single_prompt_multiple_generations',
'single_prompt_multiple_generations',
'multiple_prompts_multiple_generations'])
def test_redis_semantic_cache_hit(embedding: Embeddings, prompts: List[str],
generations: List[List[str]]) ->None:
set_llm_cache(RedisSemanticCache(embedding=embedding, redis_url=
REDIS_TEST_URL))
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [[Generation(text=generation, generation_info=params) for
generation in prompt_i_generations] for prompt_i_generations in
generations]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
print(prompt_i)
print(llm_generations_i)
get_llm_cache().update(prompt_i, llm_string, llm_generations_i)
llm.generate(prompts)
assert llm.generate(prompts) == LLMResult(generations=llm_generations,
llm_output={})
| null |
_prepare_chat
|
params = self._prepare_params(stop, **kwargs)
history = _parse_chat_history(messages, convert_system_message_to_human=
self.convert_system_message_to_human)
message = history.pop()
chat = self.client.start_chat(history=history)
return params, chat, message
|
def _prepare_chat(self, messages: List[BaseMessage], stop: Optional[List[
str]]=None, **kwargs: Any) ->Tuple[Dict[str, Any], genai.ChatSession,
genai.types.ContentDict]:
params = self._prepare_params(stop, **kwargs)
history = _parse_chat_history(messages, convert_system_message_to_human
=self.convert_system_message_to_human)
message = history.pop()
chat = self.client.start_chat(history=history)
return params, chat, message
| null |
_results_to_docs
|
return [doc for doc, _ in _results_to_docs_and_scores(results)]
|
def _results_to_docs(results: Any) ->List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
| null |
detect_file_src_type
|
"""Detect if the file is local or remote."""
if os.path.isfile(file_path):
return 'local'
parsed_url = urlparse(file_path)
if parsed_url.scheme and parsed_url.netloc:
return 'remote'
return 'invalid'
|
def detect_file_src_type(file_path: str) ->str:
"""Detect if the file is local or remote."""
if os.path.isfile(file_path):
return 'local'
parsed_url = urlparse(file_path)
if parsed_url.scheme and parsed_url.netloc:
return 'remote'
return 'invalid'
|
Detect if the file is local or remote.
|
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
_create_chat_result
|
generations = []
for candidate in response['llm_response']['choices']:
message = ChatJavelinAIGateway._convert_dict_to_message(candidate[
'message'])
message_metadata = candidate.get('metadata', {})
gen = ChatGeneration(message=message, generation_info=dict(
message_metadata))
generations.append(gen)
response_metadata = response.get('metadata', {})
return ChatResult(generations=generations, llm_output=response_metadata)
|
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) ->ChatResult:
generations = []
for candidate in response['llm_response']['choices']:
message = ChatJavelinAIGateway._convert_dict_to_message(candidate[
'message'])
message_metadata = candidate.get('metadata', {})
gen = ChatGeneration(message=message, generation_info=dict(
message_metadata))
generations.append(gen)
response_metadata = response.get('metadata', {})
return ChatResult(generations=generations, llm_output=response_metadata)
| null |
get_query_constructor_prompt
|
"""Create query construction prompt.
Args:
document_contents: The contents of the document to be queried.
attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators.
allowed_operators: Sequence of allowed operators.
enable_limit: Whether to enable the limit operator. Defaults to False.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
**kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A prompt template that can be used to construct queries.
"""
default_schema_prompt = (SCHEMA_WITH_LIMIT_PROMPT if enable_limit else
DEFAULT_SCHEMA_PROMPT)
schema_prompt = schema_prompt or default_schema_prompt
attribute_str = _format_attribute_info(attribute_info)
schema = schema_prompt.format(allowed_comparators=' | '.join(
allowed_comparators), allowed_operators=' | '.join(allowed_operators))
if examples and isinstance(examples[0], tuple):
examples = construct_examples(examples)
example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT
prefix = PREFIX_WITH_DATA_SOURCE.format(schema=schema, content=
document_contents, attributes=attribute_str)
suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1)
else:
examples = examples or (EXAMPLES_WITH_LIMIT if enable_limit else
DEFAULT_EXAMPLES)
example_prompt = EXAMPLE_PROMPT
prefix = DEFAULT_PREFIX.format(schema=schema)
suffix = DEFAULT_SUFFIX.format(i=len(examples) + 1, content=
document_contents, attributes=attribute_str)
return FewShotPromptTemplate(examples=list(examples), example_prompt=
example_prompt, input_variables=['query'], suffix=suffix, prefix=prefix,
**kwargs)
|
def get_query_constructor_prompt(document_contents: str, attribute_info:
Sequence[Union[AttributeInfo, dict]], *, examples: Optional[Sequence]=
None, allowed_comparators: Sequence[Comparator]=tuple(Comparator),
allowed_operators: Sequence[Operator]=tuple(Operator), enable_limit:
bool=False, schema_prompt: Optional[BasePromptTemplate]=None, **kwargs: Any
) ->BasePromptTemplate:
"""Create query construction prompt.
Args:
document_contents: The contents of the document to be queried.
attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators.
allowed_operators: Sequence of allowed operators.
enable_limit: Whether to enable the limit operator. Defaults to False.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
**kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A prompt template that can be used to construct queries.
"""
default_schema_prompt = (SCHEMA_WITH_LIMIT_PROMPT if enable_limit else
DEFAULT_SCHEMA_PROMPT)
schema_prompt = schema_prompt or default_schema_prompt
attribute_str = _format_attribute_info(attribute_info)
schema = schema_prompt.format(allowed_comparators=' | '.join(
allowed_comparators), allowed_operators=' | '.join(allowed_operators))
if examples and isinstance(examples[0], tuple):
examples = construct_examples(examples)
example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT
prefix = PREFIX_WITH_DATA_SOURCE.format(schema=schema, content=
document_contents, attributes=attribute_str)
suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1)
else:
examples = examples or (EXAMPLES_WITH_LIMIT if enable_limit else
DEFAULT_EXAMPLES)
example_prompt = EXAMPLE_PROMPT
prefix = DEFAULT_PREFIX.format(schema=schema)
suffix = DEFAULT_SUFFIX.format(i=len(examples) + 1, content=
document_contents, attributes=attribute_str)
return FewShotPromptTemplate(examples=list(examples), example_prompt=
example_prompt, input_variables=['query'], suffix=suffix, prefix=
prefix, **kwargs)
|
Create query construction prompt.
Args:
document_contents: The contents of the document to be queried.
attribute_info: A list of AttributeInfo objects describing
the attributes of the document.
examples: Optional list of examples to use for the chain.
allowed_comparators: Sequence of allowed comparators.
allowed_operators: Sequence of allowed operators.
enable_limit: Whether to enable the limit operator. Defaults to False.
schema_prompt: Prompt for describing query schema. Should have string input
variables allowed_comparators and allowed_operators.
**kwargs: Additional named params to pass to FewShotPromptTemplate init.
Returns:
A prompt template that can be used to construct queries.
|
setup_class
|
assert os.getenv('XATA_API_KEY'
), 'XATA_API_KEY environment variable is not set'
assert os.getenv('XATA_DB_URL'), 'XATA_DB_URL environment variable is not set'
|
@classmethod
def setup_class(cls) ->None:
assert os.getenv('XATA_API_KEY'
), 'XATA_API_KEY environment variable is not set'
assert os.getenv('XATA_DB_URL'
), 'XATA_DB_URL environment variable is not set'
| null |
__le__
|
"""Create a Numeric less than or equal to filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") <= 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LE)
return RedisFilterExpression(str(self))
|
def __le__(self, other: Union[int, float]) ->'RedisFilterExpression':
"""Create a Numeric less than or equal to filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") <= 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LE)
return RedisFilterExpression(str(self))
|
Create a Numeric less than or equal to filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") <= 18
|
test_call_no_result
|
"""Test that non-existent words return proper result."""
output = api_client.run('NO_RESULT_NO_RESULT_NO_RESULT')
assert 'No Merriam-Webster definition was found for query' in output
|
def test_call_no_result(api_client: MerriamWebsterAPIWrapper) ->None:
"""Test that non-existent words return proper result."""
output = api_client.run('NO_RESULT_NO_RESULT_NO_RESULT')
assert 'No Merriam-Webster definition was found for query' in output
|
Test that non-existent words return proper result.
|
embed_query
|
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
def embed_query(self, text: str) ->List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
Return simple embeddings.
|
similarity_search
|
results = self.similarity_search_with_score(query, k, **kwargs)
return [r[0] for r in results]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
results = self.similarity_search_with_score(query, k, **kwargs)
return [r[0] for r in results]
| null |
on_agent_finish
|
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end='\n')
|
def on_agent_finish(self, finish: AgentFinish, color: Optional[str]=None,
**kwargs: Any) ->None:
"""Run on agent end."""
print_text(finish.log, color=color or self.color, end='\n')
|
Run on agent end.
|
with_config
|
return RunnableEach(bound=self.bound.with_config(config, **kwargs))
|
def with_config(self, config: Optional[RunnableConfig]=None, **kwargs: Any
) ->RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.with_config(config, **kwargs))
| null |
_get_google_serper_results_json
|
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
|
def _get_google_serper_results_json(**kwargs: Any) ->BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
| null |
test_vald_max_marginal_relevance_search_by_vector
|
"""Test end to end construction and MRR search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedding, k=2,
fetch_k=3)
assert output == [Document(page_content='foo'), Document(page_content='bar')]
|
def test_vald_max_marginal_relevance_search_by_vector() ->None:
"""Test end to end construction and MRR search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedding, k
=2, fetch_k=3)
assert output == [Document(page_content='foo'), Document(page_content=
'bar')]
|
Test end to end construction and MRR search by vector.
|
_llm_type
|
"""Return type of llm."""
return 'huggingface_hub'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'huggingface_hub'
|
Return type of llm.
|
create_csv_agent
|
"""Create csv agent by loading to a dataframe and using pandas agent."""
try:
import pandas as pd
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`')
_kwargs = pandas_kwargs or {}
if isinstance(path, (str, IOBase)):
df = pd.read_csv(path, **_kwargs)
elif isinstance(path, list):
df = []
for item in path:
if not isinstance(item, (str, IOBase)):
raise ValueError(
f'Expected str or file-like object, got {type(path)}')
df.append(pd.read_csv(item, **_kwargs))
else:
raise ValueError(
f'Expected str, list, or file-like object, got {type(path)}')
return create_pandas_dataframe_agent(llm, df, **kwargs)
|
def create_csv_agent(llm: BaseLanguageModel, path: Union[str, IOBase, List[
Union[str, IOBase]]], pandas_kwargs: Optional[dict]=None, **kwargs: Any
) ->AgentExecutor:
"""Create csv agent by loading to a dataframe and using pandas agent."""
try:
import pandas as pd
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`'
)
_kwargs = pandas_kwargs or {}
if isinstance(path, (str, IOBase)):
df = pd.read_csv(path, **_kwargs)
elif isinstance(path, list):
df = []
for item in path:
if not isinstance(item, (str, IOBase)):
raise ValueError(
f'Expected str or file-like object, got {type(path)}')
df.append(pd.read_csv(item, **_kwargs))
else:
raise ValueError(
f'Expected str, list, or file-like object, got {type(path)}')
return create_pandas_dataframe_agent(llm, df, **kwargs)
|
Create csv agent by loading to a dataframe and using pandas agent.
|
validate_environment
|
values['ernie_api_base'] = get_from_dict_or_env(values, 'ernie_api_base',
'ERNIE_API_BASE', 'https://aip.baidubce.com')
values['ernie_client_id'] = get_from_dict_or_env(values, 'ernie_client_id',
'ERNIE_CLIENT_ID')
values['ernie_client_secret'] = get_from_dict_or_env(values,
'ernie_client_secret', 'ERNIE_CLIENT_SECRET')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
values['ernie_api_base'] = get_from_dict_or_env(values,
'ernie_api_base', 'ERNIE_API_BASE', 'https://aip.baidubce.com')
values['ernie_client_id'] = get_from_dict_or_env(values,
'ernie_client_id', 'ERNIE_CLIENT_ID')
values['ernie_client_secret'] = get_from_dict_or_env(values,
'ernie_client_secret', 'ERNIE_CLIENT_SECRET')
return values
| null |
__getattr__
|
"""Get attr name."""
if name in DEPRECATED_CODE:
HERE = Path(__file__).parents[1]
relative_path = as_import_path(Path(__file__).parent, suffix=name,
relative_to=HERE)
old_path = 'langchain.' + relative_path
new_path = 'langchain_experimental.' + relative_path
raise ImportError(
f"""{name} has been moved to langchain experimental. See https://github.com/langchain-ai/langchain/discussions/11680for more information.
Please update your import statement from: `{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
def __getattr__(name: str) ->Any:
"""Get attr name."""
if name in DEPRECATED_CODE:
HERE = Path(__file__).parents[1]
relative_path = as_import_path(Path(__file__).parent, suffix=name,
relative_to=HERE)
old_path = 'langchain.' + relative_path
new_path = 'langchain_experimental.' + relative_path
raise ImportError(
f"""{name} has been moved to langchain experimental. See https://github.com/langchain-ai/langchain/discussions/11680for more information.
Please update your import statement from: `{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
Get attr name.
|
get_model
|
"""Download model.
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path.
"""
model_url = (
'https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin'
)
tokenizer_url = (
'https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model'
)
conversion_script = (
'https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py'
)
local_filename = model_url.split('/')[-1]
if not os.path.exists('convert-unversioned-ggml-to-ggml.py'):
urlretrieve(conversion_script, 'convert-unversioned-ggml-to-ggml.py')
if not os.path.exists('tokenizer.model'):
urlretrieve(tokenizer_url, 'tokenizer.model')
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system('python convert-unversioned-ggml-to-ggml.py . tokenizer.model')
return local_filename
|
def get_model() ->str:
"""Download model.
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path.
"""
model_url = (
'https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin'
)
tokenizer_url = (
'https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model'
)
conversion_script = (
'https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py'
)
local_filename = model_url.split('/')[-1]
if not os.path.exists('convert-unversioned-ggml-to-ggml.py'):
urlretrieve(conversion_script, 'convert-unversioned-ggml-to-ggml.py')
if not os.path.exists('tokenizer.model'):
urlretrieve(tokenizer_url, 'tokenizer.model')
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system(
'python convert-unversioned-ggml-to-ggml.py . tokenizer.model')
return local_filename
|
Download model.
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path.
|
on_chain_error
|
self.on_chain_error_common()
|
def on_chain_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_error_common()
| null |
test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict
|
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [{'test_namespace': encoded_str1, 'test_namespace2': str1}, {
'test_namespace': encoded_str2, 'test_namespace2': str2}, {
'test_namespace': encoded_str3, 'test_namespace2': str3}]
assert base.embed([{'test_namespace': base.Embed(str1), 'test_namespace2':
str1}, {'test_namespace': base.Embed(str2), 'test_namespace2': str2}, {
'test_namespace': base.Embed(str3), 'test_namespace2': str3}],
MockEncoder()) == expected
expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1,
'test_namespace2': str1}, {'test_namespace': str2 + ' ' + encoded_str2,
'test_namespace2': str2}, {'test_namespace': str3 + ' ' + encoded_str3,
'test_namespace2': str3}]
assert base.embed([{'test_namespace': base.EmbedAndKeep(str1),
'test_namespace2': str1}, {'test_namespace': base.EmbedAndKeep(str2),
'test_namespace2': str2}, {'test_namespace': base.EmbedAndKeep(str3),
'test_namespace2': str3}], MockEncoder()) == expected_embed_and_keep
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() ->None:
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [{'test_namespace': encoded_str1, 'test_namespace2': str1},
{'test_namespace': encoded_str2, 'test_namespace2': str2}, {
'test_namespace': encoded_str3, 'test_namespace2': str3}]
assert base.embed([{'test_namespace': base.Embed(str1),
'test_namespace2': str1}, {'test_namespace': base.Embed(str2),
'test_namespace2': str2}, {'test_namespace': base.Embed(str3),
'test_namespace2': str3}], MockEncoder()) == expected
expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1,
'test_namespace2': str1}, {'test_namespace': str2 + ' ' +
encoded_str2, 'test_namespace2': str2}, {'test_namespace': str3 +
' ' + encoded_str3, 'test_namespace2': str3}]
assert base.embed([{'test_namespace': base.EmbedAndKeep(str1),
'test_namespace2': str1}, {'test_namespace': base.EmbedAndKeep(str2
), 'test_namespace2': str2}, {'test_namespace': base.EmbedAndKeep(
str3), 'test_namespace2': str3}], MockEncoder()
) == expected_embed_and_keep
| null |
_import_google_scholar
|
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
return GoogleScholarAPIWrapper
|
def _import_google_scholar() ->Any:
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
return GoogleScholarAPIWrapper
| null |
on_chain_start
|
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(self.handlers, 'on_chain_start', 'ignore_chain', serialized,
inputs, run_id=run_id, parent_run_id=self.parent_run_id, tags=self.tags,
metadata=self.metadata, **kwargs)
return CallbackManagerForChainRun(run_id=run_id, handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers, parent_run_id=self.
parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags,
metadata=self.metadata, inheritable_metadata=self.inheritable_metadata)
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Union[Dict[str,
Any], Any], run_id: Optional[UUID]=None, **kwargs: Any
) ->CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
handle_event(self.handlers, 'on_chain_start', 'ignore_chain',
serialized, inputs, run_id=run_id, parent_run_id=self.parent_run_id,
tags=self.tags, metadata=self.metadata, **kwargs)
return CallbackManagerForChainRun(run_id=run_id, handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers, parent_run_id=self.
parent_run_id, tags=self.tags, inheritable_tags=self.
inheritable_tags, metadata=self.metadata, inheritable_metadata=self
.inheritable_metadata)
|
Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
|
get_referenced_schema
|
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split('/')[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f'No schema found for {ref_name}')
return schemas[ref_name]
|
def get_referenced_schema(self, ref: Reference) ->Schema:
"""Get a schema (or nested reference) or err."""
ref_name = ref.ref.split('/')[-1]
schemas = self._schemas_strict
if ref_name not in schemas:
raise ValueError(f'No schema found for {ref_name}')
return schemas[ref_name]
|
Get a schema (or nested reference) or err.
|
query
|
"""Query the vectorstore."""
llm = llm or OpenAI(temperature=0)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(llm, retriever=self.vectorstore.
as_retriever(**retriever_kwargs), **kwargs)
return chain.run(question)
|
def query(self, question: str, llm: Optional[BaseLanguageModel]=None,
retriever_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any) ->str:
"""Query the vectorstore."""
llm = llm or OpenAI(temperature=0)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(llm, retriever=self.vectorstore.
as_retriever(**retriever_kwargs), **kwargs)
return chain.run(question)
|
Query the vectorstore.
|
on_llm_end
|
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
metadata = self._init_resp()
metadata.update({'action': 'on_llm_end'})
metadata.update(flatten_dict(response.llm_output or {}))
metadata.update(self.get_custom_callback_meta())
output_complexity_metrics = []
output_custom_metrics = []
for prompt_idx, generations in enumerate(response.generations):
for gen_idx, generation in enumerate(generations):
text = generation.text
generation_resp = deepcopy(metadata)
generation_resp.update(flatten_dict(generation.dict()))
complexity_metrics = self._get_complexity_metrics(text)
if complexity_metrics:
output_complexity_metrics.append(complexity_metrics)
generation_resp.update(complexity_metrics)
custom_metrics = self._get_custom_metrics(generation, prompt_idx,
gen_idx)
if custom_metrics:
output_custom_metrics.append(custom_metrics)
generation_resp.update(custom_metrics)
if self.stream_logs:
self._log_stream(text, metadata, self.step)
self.action_records.append(generation_resp)
self.on_llm_end_records.append(generation_resp)
self._log_text_metrics(output_complexity_metrics, step=self.step)
self._log_text_metrics(output_custom_metrics, step=self.step)
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
metadata = self._init_resp()
metadata.update({'action': 'on_llm_end'})
metadata.update(flatten_dict(response.llm_output or {}))
metadata.update(self.get_custom_callback_meta())
output_complexity_metrics = []
output_custom_metrics = []
for prompt_idx, generations in enumerate(response.generations):
for gen_idx, generation in enumerate(generations):
text = generation.text
generation_resp = deepcopy(metadata)
generation_resp.update(flatten_dict(generation.dict()))
complexity_metrics = self._get_complexity_metrics(text)
if complexity_metrics:
output_complexity_metrics.append(complexity_metrics)
generation_resp.update(complexity_metrics)
custom_metrics = self._get_custom_metrics(generation,
prompt_idx, gen_idx)
if custom_metrics:
output_custom_metrics.append(custom_metrics)
generation_resp.update(custom_metrics)
if self.stream_logs:
self._log_stream(text, metadata, self.step)
self.action_records.append(generation_resp)
self.on_llm_end_records.append(generation_resp)
self._log_text_metrics(output_complexity_metrics, step=self.step)
self._log_text_metrics(output_custom_metrics, step=self.step)
|
Run when LLM ends running.
|
__init__
|
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
'Failed to create the client. Please run `pip install mlflow[genai]` to install required dependencies.'
) from e
|
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
'Failed to create the client. Please run `pip install mlflow[genai]` to install required dependencies.'
) from e
| null |
__hash__
|
return id(self)
|
def __hash__(self) ->int:
return id(self)
| null |
fn
|
if url.endswith('/processing/pull'):
return FakePullResponse()
else:
raise Exception('Invalid GET URL')
|
def fn(url: str, **kwargs: Any) ->Any:
if url.endswith('/processing/pull'):
return FakePullResponse()
else:
raise Exception('Invalid GET URL')
| null |
invoke
|
from langchain_core.beta.runnables.context import config_with_context
config = config_with_context(ensure_config(config), self.steps)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name') or self.get_name())
try:
for i, step in enumerate(self.steps):
input = step.invoke(input, patch_config(config, callbacks=
run_manager.get_child(f'seq:step:{i + 1}')))
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(input)
return cast(Output, input)
|
def invoke(self, input: Input, config: Optional[RunnableConfig]=None) ->Output:
from langchain_core.beta.runnables.context import config_with_context
config = config_with_context(ensure_config(config), self.steps)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name') or self.get_name())
try:
for i, step in enumerate(self.steps):
input = step.invoke(input, patch_config(config, callbacks=
run_manager.get_child(f'seq:step:{i + 1}')))
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(input)
return cast(Output, input)
| null |
add_texts
|
"""
Add a list of texts to the Elasticsearch index.
Args:
texts (Iterable[str]): The texts to add to the index.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
model_id (str, optional): The ID of the model to use for transforming the
texts into vectors.
refresh_indices (bool, optional): Whether to refresh the Elasticsearch
indices after adding the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A list of IDs for the added texts.
"""
if not self.client.indices.exists(index=self.index_name):
dims = kwargs.get('dims')
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
similarity = kwargs.get('similarity')
optional_args = {}
if similarity is not None:
optional_args['similarity'] = similarity
mapping = self._default_knn_mapping(dims=dims, **optional_args)
self.create_knn_index(mapping)
embeddings = self.embedding.embed_documents(list(texts))
body: List[Mapping[str, Any]] = []
for text, vector in zip(texts, embeddings):
body.extend([{'index': {'_index': self.index_name}}, {'text': text,
'vector': vector}])
responses = self.client.bulk(operations=body)
ids = [item['index']['_id'] for item in responses['items'] if item['index']
['result'] == 'created']
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any,
Any]]]=None, model_id: Optional[str]=None, refresh_indices: bool=False,
**kwargs: Any) ->List[str]:
"""
Add a list of texts to the Elasticsearch index.
Args:
texts (Iterable[str]): The texts to add to the index.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
model_id (str, optional): The ID of the model to use for transforming the
texts into vectors.
refresh_indices (bool, optional): Whether to refresh the Elasticsearch
indices after adding the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A list of IDs for the added texts.
"""
if not self.client.indices.exists(index=self.index_name):
dims = kwargs.get('dims')
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
similarity = kwargs.get('similarity')
optional_args = {}
if similarity is not None:
optional_args['similarity'] = similarity
mapping = self._default_knn_mapping(dims=dims, **optional_args)
self.create_knn_index(mapping)
embeddings = self.embedding.embed_documents(list(texts))
body: List[Mapping[str, Any]] = []
for text, vector in zip(texts, embeddings):
body.extend([{'index': {'_index': self.index_name}}, {'text': text,
'vector': vector}])
responses = self.client.bulk(operations=body)
ids = [item['index']['_id'] for item in responses['items'] if item[
'index']['result'] == 'created']
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
|
Add a list of texts to the Elasticsearch index.
Args:
texts (Iterable[str]): The texts to add to the index.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
model_id (str, optional): The ID of the model to use for transforming the
texts into vectors.
refresh_indices (bool, optional): Whether to refresh the Elasticsearch
indices after adding the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A list of IDs for the added texts.
|
set_debug
|
"""Set a new value for the `debug` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing debug from langchain root module is no longer supported'
)
langchain.debug = value
except ImportError:
pass
global _debug
_debug = value
|
def set_debug(value: bool) ->None:
"""Set a new value for the `debug` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing debug from langchain root module is no longer supported'
)
langchain.debug = value
except ImportError:
pass
global _debug
_debug = value
|
Set a new value for the `debug` global setting.
|
_identifying_params
|
"""Get the identifying parameters."""
return {'model_id': self.model_id}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {'model_id': self.model_id}
|
Get the identifying parameters.
|
_llm_type
|
"""Return type of llm."""
return 'fake'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake'
|
Return type of llm.
|
adapt
|
"""Adapts an `ArceeDocument` to a langchain's `Document` object."""
return Document(page_content=arcee_document.source.document, metadata={
'name': arcee_document.source.name, 'source_id': arcee_document.source.
id, 'index': arcee_document.index, 'id': arcee_document.id, 'score':
arcee_document.score})
|
@classmethod
def adapt(cls, arcee_document: ArceeDocument) ->Document:
"""Adapts an `ArceeDocument` to a langchain's `Document` object."""
return Document(page_content=arcee_document.source.document, metadata={
'name': arcee_document.source.name, 'source_id': arcee_document.
source.id, 'index': arcee_document.index, 'id': arcee_document.id,
'score': arcee_document.score})
|
Adapts an `ArceeDocument` to a langchain's `Document` object.
|
_destrip
|
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
|
def _destrip(tool_input: Any) ->Any:
if isinstance(tool_input, dict):
return {k: _destrip(v) for k, v in tool_input.items()}
elif isinstance(tool_input, list):
if isinstance(tool_input[0], str):
if len(tool_input) == 1:
return tool_input[0]
else:
raise ValueError
elif isinstance(tool_input[0], dict):
return [_destrip(v) for v in tool_input]
else:
raise ValueError
else:
raise ValueError
| null |
_import_file_management_MoveFileTool
|
from langchain_community.tools.file_management import MoveFileTool
return MoveFileTool
|
def _import_file_management_MoveFileTool() ->Any:
from langchain_community.tools.file_management import MoveFileTool
return MoveFileTool
| null |
test_load_valid_string_content
|
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='value1', metadata={'source':
file_path, 'seq_num': 1}), Document(page_content='value2', metadata={
'source': file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
'[{"text": "value1"}, {"text": "value2"}]')
loader = JSONLoader(file_path=file_path, jq_schema='.[].text', text_content
=True)
result = loader.load()
assert result == expected_docs
|
def test_load_valid_string_content(mocker: MockerFixture) ->None:
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='value1', metadata={'source':
file_path, 'seq_num': 1}), Document(page_content='value2', metadata
={'source': file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
'[{"text": "value1"}, {"text": "value2"}]')
loader = JSONLoader(file_path=file_path, jq_schema='.[].text',
text_content=True)
result = loader.load()
assert result == expected_docs
| null |
compare
|
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"""[1mInput:[0m
{text}
""")
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end='\n')
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end='\n\n')
|
def compare(self, text: str) ->None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f'\x1b[1mInput:\x1b[0m\n{text}\n')
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end='\n')
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end='\n\n')
|
Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
|
get_documents
|
"""Search documents by their ids or metadata values.
Args:
ids: List of ids of documents to retrieve from the vectorstore.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids and len(ids) > 0:
from google.cloud import bigquery
job_config = bigquery.QueryJobConfig(query_parameters=[bigquery.
ArrayQueryParameter('ids', 'STRING', ids)])
id_expr = f'{self.doc_id_field} IN UNNEST(@ids)'
else:
job_config = None
id_expr = 'TRUE'
if filter:
filter_expressions = []
for i in filter.items():
if isinstance(i[1], float):
expr = (
f"ABS(CAST(JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}') AS FLOAT64) - {i[1]}) <= {sys.float_info.epsilon}"
)
else:
val = str(i[1]).replace('"', '\\"')
expr = (
f'JSON_VALUE(`{self.metadata_field}`,\'$.{i[0]}\') = "{val}"')
filter_expressions.append(expr)
filter_expression_str = ' AND '.join(filter_expressions)
where_filter_expr = f' AND ({filter_expression_str})'
else:
where_filter_expr = ''
job = self.bq_client.query(
f"""
SELECT * FROM `{self.full_table_id}` WHERE {id_expr}
{where_filter_expr}
"""
, job_config=job_config)
docs: List[Document] = []
for row in job:
metadata = None
if self.metadata_field:
metadata = row[self.metadata_field]
if metadata:
metadata = json.loads(metadata)
else:
metadata = {}
metadata['__id'] = row[self.doc_id_field]
doc = Document(page_content=row[self.content_field], metadata=metadata)
docs.append(doc)
return docs
|
def get_documents(self, ids: Optional[List[str]]=None, filter: Optional[
Dict[str, Any]]=None) ->List[Document]:
"""Search documents by their ids or metadata values.
Args:
ids: List of ids of documents to retrieve from the vectorstore.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids and len(ids) > 0:
from google.cloud import bigquery
job_config = bigquery.QueryJobConfig(query_parameters=[bigquery.
ArrayQueryParameter('ids', 'STRING', ids)])
id_expr = f'{self.doc_id_field} IN UNNEST(@ids)'
else:
job_config = None
id_expr = 'TRUE'
if filter:
filter_expressions = []
for i in filter.items():
if isinstance(i[1], float):
expr = (
f"ABS(CAST(JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}') AS FLOAT64) - {i[1]}) <= {sys.float_info.epsilon}"
)
else:
val = str(i[1]).replace('"', '\\"')
expr = (
f'JSON_VALUE(`{self.metadata_field}`,\'$.{i[0]}\') = "{val}"'
)
filter_expressions.append(expr)
filter_expression_str = ' AND '.join(filter_expressions)
where_filter_expr = f' AND ({filter_expression_str})'
else:
where_filter_expr = ''
job = self.bq_client.query(
f"""
SELECT * FROM `{self.full_table_id}` WHERE {id_expr}
{where_filter_expr}
"""
, job_config=job_config)
docs: List[Document] = []
for row in job:
metadata = None
if self.metadata_field:
metadata = row[self.metadata_field]
if metadata:
metadata = json.loads(metadata)
else:
metadata = {}
metadata['__id'] = row[self.doc_id_field]
doc = Document(page_content=row[self.content_field], metadata=metadata)
docs.append(doc)
return docs
|
Search documents by their ids or metadata values.
Args:
ids: List of ids of documents to retrieve from the vectorstore.
filter: Filter on metadata properties, e.g.
{
"str_property": "foo",
"int_property": 123
}
Returns:
List of ids from adding the texts into the vectorstore.
|
create_index_if_not_exist
|
index = self.client.tvs_get_index(self.index_name)
if index is not None:
logger.info('Index already exists')
return False
self.client.tvs_create_index(self.index_name, dim, distance_type,
index_type, data_type, **kwargs)
return True
|
def create_index_if_not_exist(self, dim: int, distance_type: str,
index_type: str, data_type: str, **kwargs: Any) ->bool:
index = self.client.tvs_get_index(self.index_name)
if index is not None:
logger.info('Index already exists')
return False
self.client.tvs_create_index(self.index_name, dim, distance_type,
index_type, data_type, **kwargs)
return True
| null |
__init__
|
super().__init__(model_name=model_name, gpu=gpu, **kwargs)
|
def __init__(self, model_name: str='paraphrase-multilingual-mpnet-base-v2',
gpu: bool=False, **kwargs: Any) ->None:
super().__init__(model_name=model_name, gpu=gpu, **kwargs)
| null |
_llm_type
|
"""Return type of chat_model."""
return 'baidu-qianfan-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat_model."""
return 'baidu-qianfan-chat'
|
Return type of chat_model.
|
parse
|
"""Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the normalized score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the LLM's score is not a digit in the range 1-5.
"""
if 'Score:' not in text:
raise OutputParserException(
f'Could not find score in model eval output: {text}')
reasoning, score_str = text.split('Score: ', maxsplit=1)
reasoning, score_str = reasoning.strip(), score_str.strip()
_score = re.search('(\\d+(\\.\\d+)?)', score_str)
if _score is None or '.' in _score.group(1):
raise OutputParserException(
f'Score is not an integer digit in the range 1-5: {text}')
score = int(_score.group(1))
if not 1 <= score <= 5:
raise OutputParserException(
f'Score is not a digit in the range 1-5: {text}')
normalized_score = (score - 1) / 4
return TrajectoryEval(score=normalized_score, reasoning=reasoning)
|
def parse(self, text: str) ->TrajectoryEval:
"""Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the normalized score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the LLM's score is not a digit in the range 1-5.
"""
if 'Score:' not in text:
raise OutputParserException(
f'Could not find score in model eval output: {text}')
reasoning, score_str = text.split('Score: ', maxsplit=1)
reasoning, score_str = reasoning.strip(), score_str.strip()
_score = re.search('(\\d+(\\.\\d+)?)', score_str)
if _score is None or '.' in _score.group(1):
raise OutputParserException(
f'Score is not an integer digit in the range 1-5: {text}')
score = int(_score.group(1))
if not 1 <= score <= 5:
raise OutputParserException(
f'Score is not a digit in the range 1-5: {text}')
normalized_score = (score - 1) / 4
return TrajectoryEval(score=normalized_score, reasoning=reasoning)
|
Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the normalized score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the LLM's score is not a digit in the range 1-5.
|
__init__
|
self.generator = create_data_generation_chain(llm)
self.sentence_preferences = sentence_preferences or {}
|
def __init__(self, llm: BaseLanguageModel, sentence_preferences: Optional[
Dict[str, Any]]=None):
self.generator = create_data_generation_chain(llm)
self.sentence_preferences = sentence_preferences or {}
| null |
test_update_with_delayed_score_with_auto_validator_throws
|
llm, PROMPT = setup()
auto_val_llm = FakeListChatModel(responses=['3'])
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm),
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score == 3.0
with pytest.raises(RuntimeError):
chain.update_with_delayed_score(chain_response=response, score=100)
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_update_with_delayed_score_with_auto_validator_throws() ->None:
llm, PROMPT = setup()
auto_val_llm = FakeListChatModel(responses=['3'])
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm),
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed
=False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score == 3.0
with pytest.raises(RuntimeError):
chain.update_with_delayed_score(chain_response=response, score=100)
| null |
_process_response
|
"""Process API response"""
if response.ok:
data = response.json()
return data['result']['response']
else:
raise ValueError(f'Request failed with status {response.status_code}')
|
def _process_response(self, response: requests.Response) ->str:
"""Process API response"""
if response.ok:
data = response.json()
return data['result']['response']
else:
raise ValueError(f'Request failed with status {response.status_code}')
|
Process API response
|
parse_output
|
text = message.content
if '</tool>' in text:
tool, tool_input = text.split('</tool>')
_tool = tool.split('<tool>')[1]
_tool_input = tool_input.split('<tool_input>')[1]
if '</tool_input>' in _tool_input:
_tool_input = _tool_input.split('</tool_input>')[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
else:
return AgentFinish(return_values={'output': text}, log=text)
|
def parse_output(message):
text = message.content
if '</tool>' in text:
tool, tool_input = text.split('</tool>')
_tool = tool.split('<tool>')[1]
_tool_input = tool_input.split('<tool_input>')[1]
if '</tool_input>' in _tool_input:
_tool_input = _tool_input.split('</tool_input>')[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
else:
return AgentFinish(return_values={'output': text}, log=text)
| null |
_len_check_if_sized
|
if isinstance(x, Sized) and isinstance(y, Sized) and len(x) != len(y):
raise ValueError(
f'{x_name} and {y_name} expected to be equal length but len({x_name})={len(x)} and len({y_name})={len(y)}'
)
return
|
def _len_check_if_sized(x: Any, y: Any, x_name: str, y_name: str) ->None:
if isinstance(x, Sized) and isinstance(y, Sized) and len(x) != len(y):
raise ValueError(
f'{x_name} and {y_name} expected to be equal length but len({x_name})={len(x)} and len({y_name})={len(y)}'
)
return
| null |
_import_anyscale
|
from langchain_community.llms.anyscale import Anyscale
return Anyscale
|
def _import_anyscale() ->Any:
from langchain_community.llms.anyscale import Anyscale
return Anyscale
| null |
astradb_cache
|
cache = AstraDBCache(collection_name='lc_integration_test_cache', token=os.
environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'))
yield cache
cache.astra_db.delete_collection('lc_integration_test_cache')
|
@pytest.fixture(scope='module')
def astradb_cache() ->Iterator[AstraDBCache]:
cache = AstraDBCache(collection_name='lc_integration_test_cache', token
=os.environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')
)
yield cache
cache.astra_db.delete_collection('lc_integration_test_cache')
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
stream
|
result = self.invoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
time.sleep(self.sleep)
if (self.error_on_chunk_number is not None and i_c == self.
error_on_chunk_number):
raise Exception('Fake error')
yield c
|
def stream(self, input: LanguageModelInput, config: Optional[RunnableConfig
]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->Iterator[str]:
result = self.invoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
time.sleep(self.sleep)
if (self.error_on_chunk_number is not None and i_c == self.
error_on_chunk_number):
raise Exception('Fake error')
yield c
| null |
parse
|
cleaned = text.strip()
finished = self.finished_value in cleaned
return cleaned.replace(self.finished_value, ''), finished
|
def parse(self, text: str) ->Tuple[str, bool]:
cleaned = text.strip()
finished = self.finished_value in cleaned
return cleaned.replace(self.finished_value, ''), finished
| null |
load
|
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ImportError(
'Could not import azure storage blob python package. Please install it with `pip install azure-storage-blob`.'
) from exc
container = ContainerClient.from_connection_string(conn_str=self.conn_str,
container_name=self.container)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(self.conn_str, self.container, blob
.name)
docs.extend(loader.load())
return docs
|
def load(self) ->List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ImportError(
'Could not import azure storage blob python package. Please install it with `pip install azure-storage-blob`.'
) from exc
container = ContainerClient.from_connection_string(conn_str=self.
conn_str, container_name=self.container)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader(self.conn_str, self.container,
blob.name)
docs.extend(loader.load())
return docs
|
Load documents.
|
_import_mlflow_chat
|
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
|
def _import_mlflow_chat() ->Any:
from langchain_community.chat_models.mlflow import ChatMlflow
return ChatMlflow
| null |
now
|
return datetime.datetime(dt_value.year, dt_value.month, dt_value.day,
dt_value.hour, dt_value.minute, dt_value.second, dt_value.microsecond,
dt_value.tzinfo)
|
@classmethod
def now(cls):
return datetime.datetime(dt_value.year, dt_value.month, dt_value.day,
dt_value.hour, dt_value.minute, dt_value.second, dt_value.
microsecond, dt_value.tzinfo)
| null |
_import_twilio
|
from langchain_community.utilities.twilio import TwilioAPIWrapper
return TwilioAPIWrapper
|
def _import_twilio() ->Any:
from langchain_community.utilities.twilio import TwilioAPIWrapper
return TwilioAPIWrapper
| null |
_format_chat_history
|
buffer: List[BaseMessage] = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]) ->List[
BaseMessage]:
buffer: List[BaseMessage] = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
__init__
|
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
|
def __init__(self, source: Union[str, Path]):
"""Initialize the TomlLoader with a source file or directory."""
self.source = Path(source)
|
Initialize the TomlLoader with a source file or directory.
|
output_keys
|
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
return _output_keys
|
Output keys.
:meta private:
|
test_mosaicml_extra_kwargs
|
llm = MosaicML(model_kwargs={'max_new_tokens': 1})
assert llm.model_kwargs == {'max_new_tokens': 1}
output = llm('Say foo:')
assert isinstance(output, str)
assert len(output.split()) <= 1
|
def test_mosaicml_extra_kwargs() ->None:
llm = MosaicML(model_kwargs={'max_new_tokens': 1})
assert llm.model_kwargs == {'max_new_tokens': 1}
output = llm('Say foo:')
assert isinstance(output, str)
assert len(output.split()) <= 1
| null |
test_weighted_reciprocal_rank
|
doc1 = Document(page_content='1')
doc2 = Document(page_content='2')
dummy_retriever = BM25Retriever.from_texts(['1', '2'])
ensemble_retriever = EnsembleRetriever(retrievers=[dummy_retriever,
dummy_retriever], weights=[0.4, 0.5], c=0)
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [doc2,
doc1]])
assert result[0].page_content == '2'
assert result[1].page_content == '1'
ensemble_retriever.weights = [0.5, 0.4]
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [doc2,
doc1]])
assert result[0].page_content == '1'
assert result[1].page_content == '2'
|
@pytest.mark.requires('rank_bm25')
def test_weighted_reciprocal_rank() ->None:
doc1 = Document(page_content='1')
doc2 = Document(page_content='2')
dummy_retriever = BM25Retriever.from_texts(['1', '2'])
ensemble_retriever = EnsembleRetriever(retrievers=[dummy_retriever,
dummy_retriever], weights=[0.4, 0.5], c=0)
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [
doc2, doc1]])
assert result[0].page_content == '2'
assert result[1].page_content == '1'
ensemble_retriever.weights = [0.5, 0.4]
result = ensemble_retriever.weighted_reciprocal_rank([[doc1, doc2], [
doc2, doc1]])
assert result[0].page_content == '1'
assert result[1].page_content == '2'
| null |
visit_operation
|
if operation.operator in UNARY_OPERATORS and len(operation.arguments) == 1:
operator = self._format_func(operation.operator)
return operator + '(' + operation.arguments[0].accept(self) + ')'
elif operation.operator in UNARY_OPERATORS:
raise ValueError(
f'"{operation.operator.value}" can have only one argument in Milvus')
else:
args = [arg.accept(self) for arg in operation.arguments]
operator = self._format_func(operation.operator)
return '(' + (' ' + operator + ' ').join(args) + ')'
|
def visit_operation(self, operation: Operation) ->str:
if operation.operator in UNARY_OPERATORS and len(operation.arguments) == 1:
operator = self._format_func(operation.operator)
return operator + '(' + operation.arguments[0].accept(self) + ')'
elif operation.operator in UNARY_OPERATORS:
raise ValueError(
f'"{operation.operator.value}" can have only one argument in Milvus'
)
else:
args = [arg.accept(self) for arg in operation.arguments]
operator = self._format_func(operation.operator)
return '(' + (' ' + operator + ' ').join(args) + ')'
| null |
test_custom_template_tool_response
|
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log=
'Log1'), 'Observation1')]
template_tool_response = 'Response: {observation}'
expected_result = [AIMessage(content='Log1'), HumanMessage(content=
'Response: Observation1')]
assert format_log_to_messages(intermediate_steps, template_tool_response=
template_tool_response) == expected_result
|
def test_custom_template_tool_response() ->None:
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1',
log='Log1'), 'Observation1')]
template_tool_response = 'Response: {observation}'
expected_result = [AIMessage(content='Log1'), HumanMessage(content=
'Response: Observation1')]
assert format_log_to_messages(intermediate_steps,
template_tool_response=template_tool_response) == expected_result
| null |
test_infinity_emb_sync
|
mocker.patch('requests.post', side_effect=mocked_requests_post)
embedder = InfinityEmbeddings(model=_MODEL_ID, infinity_api_url=
_INFINITY_BASE_URL)
assert embedder.infinity_api_url == _INFINITY_BASE_URL
assert embedder.model == _MODEL_ID
response = embedder.embed_documents(_DOCUMENTS)
want = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.1], [0.0, 0.9, 0.0], [1.0, 0.0, 0.1],
[0.0, 0.9, 0.1]]
assert response == want
|
def test_infinity_emb_sync(mocker: MockerFixture) ->None:
mocker.patch('requests.post', side_effect=mocked_requests_post)
embedder = InfinityEmbeddings(model=_MODEL_ID, infinity_api_url=
_INFINITY_BASE_URL)
assert embedder.infinity_api_url == _INFINITY_BASE_URL
assert embedder.model == _MODEL_ID
response = embedder.embed_documents(_DOCUMENTS)
want = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.1], [0.0, 0.9, 0.0], [1.0, 0.0,
0.1], [0.0, 0.9, 0.1]]
assert response == want
| null |
test__convert_dict_to_message_function
|
message = FunctionMessage(name='foo', content='bar')
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
|
def test__convert_dict_to_message_function() ->None:
message = FunctionMessage(name='foo', content='bar')
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
| null |
_get_default_params
|
return {'language': 'en', 'format': 'json'}
|
def _get_default_params() ->dict:
return {'language': 'en', 'format': 'json'}
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.