method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
index
|
"""Create the mapping for the Elasticsearch index."""
return {'mappings': {'properties': {vector_query_field: {'type':
'dense_vector', 'dims': dims_length, 'index': False}}}}
|
def index(self, dims_length: Union[int, None], vector_query_field: str,
similarity: Union[DistanceStrategy, None]) ->Dict:
"""Create the mapping for the Elasticsearch index."""
return {'mappings': {'properties': {vector_query_field: {'type':
'dense_vector', 'dims': dims_length, 'index': False}}}}
|
Create the mapping for the Elasticsearch index.
|
test_runnable_branch_init
|
"""Verify that runnable branch gets initialized properly."""
add = RunnableLambda(lambda x: x + 1)
condition = RunnableLambda(lambda x: x > 0)
with pytest.raises(ValueError):
RunnableBranch((condition, add))
with pytest.raises(ValueError):
RunnableBranch(condition)
|
def test_runnable_branch_init() ->None:
"""Verify that runnable branch gets initialized properly."""
add = RunnableLambda(lambda x: x + 1)
condition = RunnableLambda(lambda x: x > 0)
with pytest.raises(ValueError):
RunnableBranch((condition, add))
with pytest.raises(ValueError):
RunnableBranch(condition)
|
Verify that runnable branch gets initialized properly.
|
validate_environment
|
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import DDGS
except ImportError:
raise ImportError(
'Could not import duckduckgo-search python package. Please install it with `pip install -U duckduckgo-search`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import DDGS
except ImportError:
raise ImportError(
'Could not import duckduckgo-search python package. Please install it with `pip install -U duckduckgo-search`.'
)
return values
|
Validate that python package exists in environment.
|
get_format_instructions
|
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
|
def get_format_instructions(self) ->str:
"""Instructions on how the LLM output should be formatted."""
raise NotImplementedError
|
Instructions on how the LLM output should be formatted.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['eas_service_url'] = get_from_dict_or_env(values, 'eas_service_url',
'EAS_SERVICE_URL')
values['eas_service_token'] = get_from_dict_or_env(values,
'eas_service_token', 'EAS_SERVICE_TOKEN')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['eas_service_url'] = get_from_dict_or_env(values,
'eas_service_url', 'EAS_SERVICE_URL')
values['eas_service_token'] = get_from_dict_or_env(values,
'eas_service_token', 'EAS_SERVICE_TOKEN')
return values
|
Validate that api key and python package exists in environment.
|
test__convert_delta_to_message_assistant
|
delta = {'role': 'assistant', 'content': 'foo'}
result = _convert_delta_to_message_chunk(delta, AIMessageChunk)
expected_output = AIMessageChunk(content='foo')
assert result == expected_output
|
def test__convert_delta_to_message_assistant() ->None:
delta = {'role': 'assistant', 'content': 'foo'}
result = _convert_delta_to_message_chunk(delta, AIMessageChunk)
expected_output = AIMessageChunk(content='foo')
assert result == expected_output
| null |
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter,
fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str,
Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=
filter, fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['dashscope_api_key'] = get_from_dict_or_env(values,
'dashscope_api_key', 'DASHSCOPE_API_KEY')
try:
import dashscope
except ImportError:
raise ImportError(
'Could not import dashscope python package. Please install it with `pip install dashscope`.'
)
try:
values['client'] = dashscope.Generation
except AttributeError:
raise ValueError(
'`dashscope` has no `Generation` attribute, this is likely due to an old version of the dashscope package. Try upgrading it with `pip install --upgrade dashscope`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['dashscope_api_key'] = get_from_dict_or_env(values,
'dashscope_api_key', 'DASHSCOPE_API_KEY')
try:
import dashscope
except ImportError:
raise ImportError(
'Could not import dashscope python package. Please install it with `pip install dashscope`.'
)
try:
values['client'] = dashscope.Generation
except AttributeError:
raise ValueError(
'`dashscope` has no `Generation` attribute, this is likely due to an old version of the dashscope package. Try upgrading it with `pip install --upgrade dashscope`.'
)
return values
|
Validate that api key and python package exists in environment.
|
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(embedding, fetch_k, search_k=-1,
include_distances=False)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=np.
float32), embeddings, k=k, lambda_mult=lambda_mult)
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}')
docs.append(doc)
return docs
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, **kwargs: Any) ->List[
Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(embedding, fetch_k, search_k=-1,
include_distances=False)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=
np.float32), embeddings, k=k, lambda_mult=lambda_mult)
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}'
)
docs.append(doc)
return docs
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
_retrieve_ref
|
components = path.split('/')
if components[0] != '#':
raise ValueError(
'ref paths are expected to be URI fragments, meaning they should start with #.'
)
out = schema
for component in components[1:]:
if component.isdigit():
out = out[int(component)]
else:
out = out[component]
return deepcopy(out)
|
def _retrieve_ref(path: str, schema: dict) ->dict:
components = path.split('/')
if components[0] != '#':
raise ValueError(
'ref paths are expected to be URI fragments, meaning they should start with #.'
)
out = schema
for component in components[1:]:
if component.isdigit():
out = out[int(component)]
else:
out = out[component]
return deepcopy(out)
| null |
test_pairwise_embedding_distance_eval_chain_chebyshev_distance
|
"""Test the chebyshev distance."""
from scipy.spatial.distance import chebyshev
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.CHEBYSHEV)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array(
vectors))
expected = chebyshev(*vectors)
assert np.isclose(result, expected)
|
@pytest.mark.requires('scipy')
def test_pairwise_embedding_distance_eval_chain_chebyshev_distance(
pairwise_embedding_distance_eval_chain:
PairwiseEmbeddingDistanceEvalChain, vectors: Tuple[np.ndarray, np.ndarray]
) ->None:
"""Test the chebyshev distance."""
from scipy.spatial.distance import chebyshev
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.CHEBYSHEV)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array
(vectors))
expected = chebyshev(*vectors)
assert np.isclose(result, expected)
|
Test the chebyshev distance.
|
batch
|
if not inputs:
return []
config = get_config_list(config, len(inputs))
max_concurrency = config[0].get('max_concurrency')
if max_concurrency is None:
try:
llm_result = self.generate_prompt([self._convert_input(input) for
input in inputs], callbacks=[c.get('callbacks') for c in config
], tags=[c.get('tags') for c in config], metadata=[c.get(
'metadata') for c in config], run_name=[c.get('run_name') for c in
config], **kwargs)
return [g[0].text for g in llm_result.generations]
except Exception as e:
if return_exceptions:
return cast(List[str], [e for _ in inputs])
else:
raise e
else:
batches = [inputs[i:i + max_concurrency] for i in range(0, len(inputs),
max_concurrency)]
config = [{**c, 'max_concurrency': None} for c in config]
return [output for i, batch in enumerate(batches) for output in self.
batch(batch, config=config[i * max_concurrency:(i + 1) *
max_concurrency], return_exceptions=return_exceptions, **kwargs)]
|
def batch(self, inputs: List[LanguageModelInput], config: Optional[Union[
RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool
=False, **kwargs: Any) ->List[str]:
if not inputs:
return []
config = get_config_list(config, len(inputs))
max_concurrency = config[0].get('max_concurrency')
if max_concurrency is None:
try:
llm_result = self.generate_prompt([self._convert_input(input) for
input in inputs], callbacks=[c.get('callbacks') for c in
config], tags=[c.get('tags') for c in config], metadata=[c.
get('metadata') for c in config], run_name=[c.get(
'run_name') for c in config], **kwargs)
return [g[0].text for g in llm_result.generations]
except Exception as e:
if return_exceptions:
return cast(List[str], [e for _ in inputs])
else:
raise e
else:
batches = [inputs[i:i + max_concurrency] for i in range(0, len(
inputs), max_concurrency)]
config = [{**c, 'max_concurrency': None} for c in config]
return [output for i, batch in enumerate(batches) for output in
self.batch(batch, config=config[i * max_concurrency:(i + 1) *
max_concurrency], return_exceptions=return_exceptions, **kwargs)]
| null |
test_news_loader
|
loader = NewsURLLoader([get_random_news_url()])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], 'page_content')
assert hasattr(docs[0], 'metadata')
metadata = docs[0].metadata
assert 'title' in metadata
assert 'link' in metadata
assert 'authors' in metadata
assert 'language' in metadata
assert 'description' in metadata
assert 'publish_date' in metadata
|
def test_news_loader() ->None:
loader = NewsURLLoader([get_random_news_url()])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], 'page_content')
assert hasattr(docs[0], 'metadata')
metadata = docs[0].metadata
assert 'title' in metadata
assert 'link' in metadata
assert 'authors' in metadata
assert 'language' in metadata
assert 'description' in metadata
assert 'publish_date' in metadata
| null |
test_litellm_streaming_callback
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatLiteLLM(model='test', streaming=True, callback_manager=
callback_manager, verbose=True)
message = HumanMessage(content='Write me a sentence with 10 words.')
chat([message])
assert callback_handler.llm_streams > 1
|
def test_litellm_streaming_callback() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = ChatLiteLLM(model='test', streaming=True, callback_manager=
callback_manager, verbose=True)
message = HumanMessage(content='Write me a sentence with 10 words.')
chat([message])
assert callback_handler.llm_streams > 1
|
Test that streaming correctly invokes on_llm_new_token callback.
|
_import_deeplake
|
from langchain_community.vectorstores.deeplake import DeepLake
return DeepLake
|
def _import_deeplake() ->Any:
from langchain_community.vectorstores.deeplake import DeepLake
return DeepLake
| null |
load_llm
|
"""Load LLM from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError('File type must be json or yaml')
return load_llm_from_config(config)
|
def load_llm(file: Union[str, Path]) ->BaseLLM:
"""Load LLM from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError('File type must be json or yaml')
return load_llm_from_config(config)
|
Load LLM from file.
|
test_serialize_openai_llm
|
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello',
callbacks=[LangChainTracer()])
llm.temperature = 0.7
assert dumps(llm, pretty=True) == snapshot
|
@pytest.mark.requires('openai')
def test_serialize_openai_llm(snapshot: Any) ->None:
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello',
callbacks=[LangChainTracer()])
llm.temperature = 0.7
assert dumps(llm, pretty=True) == snapshot
| null |
create_extraction_chain
|
"""Creates a chain that extracts information from a passage.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
prompt: The prompt to use for extraction.
verbose: Whether to run in verbose mode. In verbose mode, some intermediate
logs will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`.
Returns:
Chain that can be used to extract information from a passage.
"""
function = _get_extraction_function(schema)
extraction_prompt = prompt or ChatPromptTemplate.from_template(
_EXTRACTION_TEMPLATE)
output_parser = JsonKeyOutputFunctionsParser(key_name='info')
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=extraction_prompt, llm_kwargs=llm_kwargs,
output_parser=output_parser, tags=tags, verbose=verbose)
return chain
|
def create_extraction_chain(schema: dict, llm: BaseLanguageModel, prompt:
Optional[BasePromptTemplate]=None, tags: Optional[List[str]]=None,
verbose: bool=False) ->Chain:
"""Creates a chain that extracts information from a passage.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
prompt: The prompt to use for extraction.
verbose: Whether to run in verbose mode. In verbose mode, some intermediate
logs will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`.
Returns:
Chain that can be used to extract information from a passage.
"""
function = _get_extraction_function(schema)
extraction_prompt = prompt or ChatPromptTemplate.from_template(
_EXTRACTION_TEMPLATE)
output_parser = JsonKeyOutputFunctionsParser(key_name='info')
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=extraction_prompt, llm_kwargs=
llm_kwargs, output_parser=output_parser, tags=tags, verbose=verbose)
return chain
|
Creates a chain that extracts information from a passage.
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
prompt: The prompt to use for extraction.
verbose: Whether to run in verbose mode. In verbose mode, some intermediate
logs will be printed to the console. Defaults to the global `verbose` value,
accessible via `langchain.globals.get_verbose()`.
Returns:
Chain that can be used to extract information from a passage.
|
_get_root_referenced_request_body
|
"""Get the root request Body or err."""
from openapi_pydantic import Reference
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
|
def _get_root_referenced_request_body(self, ref: Reference) ->Optional[
RequestBody]:
"""Get the root request Body or err."""
from openapi_pydantic import Reference
request_body = self._get_referenced_request_body(ref)
while isinstance(request_body, Reference):
request_body = self._get_referenced_request_body(request_body)
return request_body
|
Get the root request Body or err.
|
_format_chat_history
|
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]) ->List:
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
get_num_tokens_anthropic
|
"""Get the number of tokens in a string of text."""
client = _get_anthropic_client()
return client.count_tokens(text=text)
|
def get_num_tokens_anthropic(text: str) ->int:
"""Get the number of tokens in a string of text."""
client = _get_anthropic_client()
return client.count_tokens(text=text)
|
Get the number of tokens in a string of text.
|
add_texts
|
"""
Add text to the collection.
Args:
texts: An iterable that contains the text to be added.
metadatas: An optional list of dictionaries,
each dictionary contains the metadata associated with a text.
timeout: Optional timeout, in seconds.
batch_size: The number of texts inserted in each batch, defaults to 1000.
**kwargs: Other optional parameters.
Returns:
A list of strings, containing the unique identifiers of the inserted texts.
Note:
If the collection has not yet been created,
this method will create a new collection.
"""
from transwarp_hippo_api.hippo_client import HippoTable
if not texts or all(t == '' for t in texts):
logger.debug('Nothing to insert, skipping.')
return []
texts = list(texts)
logger.debug(f'[add_texts] texts: {texts}')
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
logger.debug(f'[add_texts] len_embeddings:{len(embeddings)}')
if not isinstance(self.col, HippoTable):
self._get_env(embeddings, metadatas)
insert_dict: Dict[str, list] = {self._text_field: texts, self._vector_field:
embeddings}
logger.debug(f'[add_texts] metadatas:{metadatas}')
logger.debug(f'[add_texts] fields:{self.fields}')
if metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
logger.debug(insert_dict[self._text_field])
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
if 'pk' in self.fields:
self.fields.remove('pk')
logger.debug(f'[add_texts] total_count:{total_count}')
for i in range(0, total_count, batch_size):
end = min(i + batch_size, total_count)
insert_list = [insert_dict[x][i:end] for x in self.fields]
try:
res = self.col.insert_rows(insert_list)
logger.info(f'05 [add_texts] insert {res}')
except Exception as e:
logger.error('Failed to insert batch starting at entity: %s/%s', i,
total_count)
raise e
return ['']
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, timeout: Optional[int]=None, batch_size: int=1000, **kwargs: Any
) ->List[str]:
"""
Add text to the collection.
Args:
texts: An iterable that contains the text to be added.
metadatas: An optional list of dictionaries,
each dictionary contains the metadata associated with a text.
timeout: Optional timeout, in seconds.
batch_size: The number of texts inserted in each batch, defaults to 1000.
**kwargs: Other optional parameters.
Returns:
A list of strings, containing the unique identifiers of the inserted texts.
Note:
If the collection has not yet been created,
this method will create a new collection.
"""
from transwarp_hippo_api.hippo_client import HippoTable
if not texts or all(t == '' for t in texts):
logger.debug('Nothing to insert, skipping.')
return []
texts = list(texts)
logger.debug(f'[add_texts] texts: {texts}')
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug('Nothing to insert, skipping.')
return []
logger.debug(f'[add_texts] len_embeddings:{len(embeddings)}')
if not isinstance(self.col, HippoTable):
self._get_env(embeddings, metadatas)
insert_dict: Dict[str, list] = {self._text_field: texts, self.
_vector_field: embeddings}
logger.debug(f'[add_texts] metadatas:{metadatas}')
logger.debug(f'[add_texts] fields:{self.fields}')
if metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
logger.debug(insert_dict[self._text_field])
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
if 'pk' in self.fields:
self.fields.remove('pk')
logger.debug(f'[add_texts] total_count:{total_count}')
for i in range(0, total_count, batch_size):
end = min(i + batch_size, total_count)
insert_list = [insert_dict[x][i:end] for x in self.fields]
try:
res = self.col.insert_rows(insert_list)
logger.info(f'05 [add_texts] insert {res}')
except Exception as e:
logger.error('Failed to insert batch starting at entity: %s/%s',
i, total_count)
raise e
return ['']
|
Add text to the collection.
Args:
texts: An iterable that contains the text to be added.
metadatas: An optional list of dictionaries,
each dictionary contains the metadata associated with a text.
timeout: Optional timeout, in seconds.
batch_size: The number of texts inserted in each batch, defaults to 1000.
**kwargs: Other optional parameters.
Returns:
A list of strings, containing the unique identifiers of the inserted texts.
Note:
If the collection has not yet been created,
this method will create a new collection.
|
get_executor_for_config
|
"""Get an executor for a config.
Args:
config (RunnableConfig): The config.
Yields:
Generator[Executor, None, None]: The executor.
"""
config = config or {}
with ContextThreadPoolExecutor(max_workers=config.get('max_concurrency')
) as executor:
yield executor
|
@contextmanager
def get_executor_for_config(config: Optional[RunnableConfig]) ->Generator[
Executor, None, None]:
"""Get an executor for a config.
Args:
config (RunnableConfig): The config.
Yields:
Generator[Executor, None, None]: The executor.
"""
config = config or {}
with ContextThreadPoolExecutor(max_workers=config.get('max_concurrency')
) as executor:
yield executor
|
Get an executor for a config.
Args:
config (RunnableConfig): The config.
Yields:
Generator[Executor, None, None]: The executor.
|
cache_embeddings
|
"""Create a cache backed embeddings."""
store = InMemoryStore()
embeddings = MockEmbeddings()
return CacheBackedEmbeddings.from_bytes_store(embeddings, store, namespace=
'test_namespace')
|
@pytest.fixture
def cache_embeddings() ->CacheBackedEmbeddings:
"""Create a cache backed embeddings."""
store = InMemoryStore()
embeddings = MockEmbeddings()
return CacheBackedEmbeddings.from_bytes_store(embeddings, store,
namespace='test_namespace')
|
Create a cache backed embeddings.
|
_chain_type
|
return 'chat-vector-db'
|
@property
def _chain_type(self) ->str:
return 'chat-vector-db'
| null |
__init__
|
"""Initialize with Lance DB connection"""
try:
import lancedb
except ImportError:
raise ImportError(
'Could not import lancedb python package. Please install it with `pip install lancedb`.'
)
if not isinstance(connection, lancedb.db.LanceTable):
raise ValueError(
'connection should be an instance of lancedb.db.LanceTable, ',
f'got {type(connection)}')
self._connection = connection
self._embedding = embedding
self._vector_key = vector_key
self._id_key = id_key
self._text_key = text_key
|
def __init__(self, connection: Any, embedding: Embeddings, vector_key:
Optional[str]='vector', id_key: Optional[str]='id', text_key: Optional[
str]='text'):
"""Initialize with Lance DB connection"""
try:
import lancedb
except ImportError:
raise ImportError(
'Could not import lancedb python package. Please install it with `pip install lancedb`.'
)
if not isinstance(connection, lancedb.db.LanceTable):
raise ValueError(
'connection should be an instance of lancedb.db.LanceTable, ',
f'got {type(connection)}')
self._connection = connection
self._embedding = embedding
self._vector_key = vector_key
self._id_key = id_key
self._text_key = text_key
|
Initialize with Lance DB connection
|
on_retry_common
|
self.retries += 1
|
def on_retry_common(self) ->None:
self.retries += 1
| null |
lookup_with_id
|
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry) for the top hit
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
hit = self.collection.vector_find_one(vector=prompt_embedding, filter={
'llm_string_hash': llm_string_hash}, fields=['body_blob', '_id'],
include_similarity=True)
if hit is None or hit['$similarity'] < self.similarity_threshold:
return None
else:
generations = _loads_generations(hit['body_blob'])
if generations is not None:
return hit['_id'], generations
else:
return None
|
def lookup_with_id(self, prompt: str, llm_string: str) ->Optional[Tuple[str,
RETURN_VAL_TYPE]]:
"""
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry) for the top hit
"""
prompt_embedding: List[float] = self._get_embedding(text=prompt)
llm_string_hash = _hash(llm_string)
hit = self.collection.vector_find_one(vector=prompt_embedding, filter={
'llm_string_hash': llm_string_hash}, fields=['body_blob', '_id'],
include_similarity=True)
if hit is None or hit['$similarity'] < self.similarity_threshold:
return None
else:
generations = _loads_generations(hit['body_blob'])
if generations is not None:
return hit['_id'], generations
else:
return None
|
Look up based on prompt and llm_string.
If there are hits, return (document_id, cached_entry) for the top hit
|
on_llm_start
|
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
handle_event(self.handlers, 'on_llm_start', 'ignore_llm', serialized, [
prompt], run_id=run_id_, parent_run_id=self.parent_run_id, tags=
self.tags, metadata=self.metadata, **kwargs)
managers.append(CallbackManagerForLLMRun(run_id=run_id_, handlers=self.
handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=
self.inheritable_tags, metadata=self.metadata, inheritable_metadata
=self.inheritable_metadata))
return managers
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
handle_event(self.handlers, 'on_llm_start', 'ignore_llm',
serialized, [prompt], run_id=run_id_, parent_run_id=self.
parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs)
managers.append(CallbackManagerForLLMRun(run_id=run_id_, handlers=
self.handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags,
inheritable_tags=self.inheritable_tags, metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata))
return managers
|
Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
|
_call
|
return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **
kwargs)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop,
**kwargs)
| null |
output_parser
|
"""Output parser for testing."""
return BashOutputParser()
|
@pytest.fixture
def output_parser() ->BashOutputParser:
"""Output parser for testing."""
return BashOutputParser()
|
Output parser for testing.
|
_call_after_predict_before_llm
|
import numpy as np
prob_sum = sum(prob for _, prob in prediction)
probabilities = [(prob / prob_sum) for _, prob in prediction]
sampled_index = np.random.choice(len(prediction), p=probabilities)
sampled_ap = prediction[sampled_index]
sampled_action = sampled_ap[0]
sampled_prob = sampled_ap[1]
selected = PickBestSelected(index=sampled_action, probability=sampled_prob)
event.selected = selected
key, value = next(iter(event.to_select_from.items()))
next_chain_inputs = inputs.copy()
next_chain_inputs.update({key: value[event.selected.index]})
return next_chain_inputs, event
|
def _call_after_predict_before_llm(self, inputs: Dict[str, Any], event:
PickBestEvent, prediction: List[Tuple[int, float]]) ->Tuple[Dict[str,
Any], PickBestEvent]:
import numpy as np
prob_sum = sum(prob for _, prob in prediction)
probabilities = [(prob / prob_sum) for _, prob in prediction]
sampled_index = np.random.choice(len(prediction), p=probabilities)
sampled_ap = prediction[sampled_index]
sampled_action = sampled_ap[0]
sampled_prob = sampled_ap[1]
selected = PickBestSelected(index=sampled_action, probability=sampled_prob)
event.selected = selected
key, value = next(iter(event.to_select_from.items()))
next_chain_inputs = inputs.copy()
next_chain_inputs.update({key: value[event.selected.index]})
return next_chain_inputs, event
| null |
test_llamacpp_streaming
|
"""Test streaming tokens from LlamaCpp."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path, max_tokens=10)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["'"])
stream_results_string = ''
assert isinstance(generator, Generator)
for chunk in generator:
assert not isinstance(chunk, str)
assert isinstance(chunk['choices'][0]['text'], str)
stream_results_string += chunk['choices'][0]['text']
assert len(stream_results_string.strip()) > 1
|
def test_llamacpp_streaming() ->None:
"""Test streaming tokens from LlamaCpp."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path, max_tokens=10)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop
=["'"])
stream_results_string = ''
assert isinstance(generator, Generator)
for chunk in generator:
assert not isinstance(chunk, str)
assert isinstance(chunk['choices'][0]['text'], str)
stream_results_string += chunk['choices'][0]['text']
assert len(stream_results_string.strip()) > 1
|
Test streaming tokens from LlamaCpp.
|
from_llm
|
"""Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt = cls._resolve_prompt(prompt)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items())
prompt_ = prompt.partial(criteria=criteria_str)
return cls(llm=llm, prompt=prompt_, criterion_name='-'.join(criteria_), **
kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, criteria: Optional[CRITERIA_TYPE]
=None, *, prompt: Optional[BasePromptTemplate]=None, **kwargs: Any
) ->CriteriaEvalChain:
"""Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
"""
prompt = cls._resolve_prompt(prompt)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = '\n'.join(f'{k}: {v}' for k, v in criteria_.items())
prompt_ = prompt.partial(criteria=criteria_str)
return cls(llm=llm, prompt=prompt_, criterion_name='-'.join(criteria_),
**kwargs)
|
Create a `LabeledCriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns
-------
LabeledCriteriaEvalChain
An instance of the `LabeledCriteriaEvalChain` class.
Examples
--------
>>> from langchain_community.llms import OpenAI
>>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain
>>> llm = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=llm,
criteria=criteria,
)
|
add_texts
|
"""Insert documents into the instance..
Args:
texts: The text segments to be inserted into the vector storage,
should not be empty.
metadatas: Metadata information.
Returns:
id_list: List of document IDs.
"""
def _upsert(push_doc_list: List[Dict]) ->List[str]:
if push_doc_list is None or len(push_doc_list) == 0:
return []
try:
push_request = models.PushDocumentsRequest(self.options_headers,
push_doc_list)
push_response = self.ha3_engine_client.push_documents(self.config.
opt_table_name, field_name_map['id'], push_request)
json_response = json.loads(push_response.body)
if json_response['status'] == 'OK':
return [push_doc['fields'][field_name_map['id']] for push_doc in
push_doc_list]
return []
except Exception as e:
logger.error(
f'add doc to endpoint:{self.config.endpoint} instance_id:{self.config.instance_id} failed.'
, e)
raise e
from alibabacloud_ha3engine_vector import models
id_list = [sha1(t.encode('utf-8')).hexdigest() for t in texts]
embeddings = self.embedding.embed_documents(list(texts))
metadatas = metadatas or [{} for _ in texts]
field_name_map = self.config.field_name_mapping
add_doc_list = []
text_list = list(texts)
for idx, doc_id in enumerate(id_list):
embedding = embeddings[idx] if idx < len(embeddings) else None
metadata = metadatas[idx] if idx < len(metadatas) else None
text = text_list[idx] if idx < len(text_list) else None
add_doc: Dict[str, Any] = dict()
add_doc_fields: Dict[str, Any] = dict()
add_doc_fields.__setitem__(field_name_map['id'], doc_id)
add_doc_fields.__setitem__(field_name_map['document'], text)
if embedding is not None:
add_doc_fields.__setitem__(field_name_map['embedding'], self.config
.embedding_field_separator.join(str(unit) for unit in embedding))
if metadata is not None:
for md_key, md_value in metadata.items():
add_doc_fields.__setitem__(field_name_map[md_key].split(',')[0],
md_value)
add_doc.__setitem__('fields', add_doc_fields)
add_doc.__setitem__('cmd', 'add')
add_doc_list.append(add_doc)
return _upsert(add_doc_list)
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Insert documents into the instance..
Args:
texts: The text segments to be inserted into the vector storage,
should not be empty.
metadatas: Metadata information.
Returns:
id_list: List of document IDs.
"""
def _upsert(push_doc_list: List[Dict]) ->List[str]:
if push_doc_list is None or len(push_doc_list) == 0:
return []
try:
push_request = models.PushDocumentsRequest(self.options_headers,
push_doc_list)
push_response = self.ha3_engine_client.push_documents(self.
config.opt_table_name, field_name_map['id'], push_request)
json_response = json.loads(push_response.body)
if json_response['status'] == 'OK':
return [push_doc['fields'][field_name_map['id']] for
push_doc in push_doc_list]
return []
except Exception as e:
logger.error(
f'add doc to endpoint:{self.config.endpoint} instance_id:{self.config.instance_id} failed.'
, e)
raise e
from alibabacloud_ha3engine_vector import models
id_list = [sha1(t.encode('utf-8')).hexdigest() for t in texts]
embeddings = self.embedding.embed_documents(list(texts))
metadatas = metadatas or [{} for _ in texts]
field_name_map = self.config.field_name_mapping
add_doc_list = []
text_list = list(texts)
for idx, doc_id in enumerate(id_list):
embedding = embeddings[idx] if idx < len(embeddings) else None
metadata = metadatas[idx] if idx < len(metadatas) else None
text = text_list[idx] if idx < len(text_list) else None
add_doc: Dict[str, Any] = dict()
add_doc_fields: Dict[str, Any] = dict()
add_doc_fields.__setitem__(field_name_map['id'], doc_id)
add_doc_fields.__setitem__(field_name_map['document'], text)
if embedding is not None:
add_doc_fields.__setitem__(field_name_map['embedding'], self.
config.embedding_field_separator.join(str(unit) for unit in
embedding))
if metadata is not None:
for md_key, md_value in metadata.items():
add_doc_fields.__setitem__(field_name_map[md_key].split(','
)[0], md_value)
add_doc.__setitem__('fields', add_doc_fields)
add_doc.__setitem__('cmd', 'add')
add_doc_list.append(add_doc)
return _upsert(add_doc_list)
|
Insert documents into the instance..
Args:
texts: The text segments to be inserted into the vector storage,
should not be empty.
metadatas: Metadata information.
Returns:
id_list: List of document IDs.
|
register_configure_hook
|
"""Register a configure hook.
Args:
context_var (ContextVar[Optional[Any]]): The context variable.
inheritable (bool): Whether the context variable is inheritable.
handle_class (Optional[Type[BaseCallbackHandler]], optional):
The callback handler class. Defaults to None.
env_var (Optional[str], optional): The environment variable. Defaults to None.
Raises:
ValueError: If env_var is set, handle_class must also be set
to a non-None value.
"""
if env_var is not None and handle_class is None:
raise ValueError(
'If env_var is set, handle_class must also be set to a non-None value.'
)
from langchain_core.callbacks.base import BaseCallbackHandler
_configure_hooks.append((cast(ContextVar[Optional[BaseCallbackHandler]],
context_var), inheritable, handle_class, env_var))
|
def register_configure_hook(context_var: ContextVar[Optional[Any]],
inheritable: bool, handle_class: Optional[Type[BaseCallbackHandler]]=
None, env_var: Optional[str]=None) ->None:
"""Register a configure hook.
Args:
context_var (ContextVar[Optional[Any]]): The context variable.
inheritable (bool): Whether the context variable is inheritable.
handle_class (Optional[Type[BaseCallbackHandler]], optional):
The callback handler class. Defaults to None.
env_var (Optional[str], optional): The environment variable. Defaults to None.
Raises:
ValueError: If env_var is set, handle_class must also be set
to a non-None value.
"""
if env_var is not None and handle_class is None:
raise ValueError(
'If env_var is set, handle_class must also be set to a non-None value.'
)
from langchain_core.callbacks.base import BaseCallbackHandler
_configure_hooks.append((cast(ContextVar[Optional[BaseCallbackHandler]],
context_var), inheritable, handle_class, env_var))
|
Register a configure hook.
Args:
context_var (ContextVar[Optional[Any]]): The context variable.
inheritable (bool): Whether the context variable is inheritable.
handle_class (Optional[Type[BaseCallbackHandler]], optional):
The callback handler class. Defaults to None.
env_var (Optional[str], optional): The environment variable. Defaults to None.
Raises:
ValueError: If env_var is set, handle_class must also be set
to a non-None value.
|
from_documents
|
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(texts=texts, tfidf_params=tfidf_params, metadatas=
metadatas, **kwargs)
|
@classmethod
def from_documents(cls, documents: Iterable[Document], *, tfidf_params:
Optional[Dict[str, Any]]=None, **kwargs: Any) ->TFIDFRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(texts=texts, tfidf_params=tfidf_params, metadatas
=metadatas, **kwargs)
| null |
_end_trace
|
"""End a trace for a run."""
if not run.parent_run_id:
self._persist_run(run)
else:
parent_run = self.run_map.get(str(run.parent_run_id))
if parent_run is None:
logger.debug(f'Parent run with UUID {run.parent_run_id} not found.')
elif run.child_execution_order is not None and parent_run.child_execution_order is not None and run.child_execution_order > parent_run.child_execution_order:
parent_run.child_execution_order = run.child_execution_order
self.run_map.pop(str(run.id))
self._on_run_update(run)
|
def _end_trace(self, run: Run) ->None:
"""End a trace for a run."""
if not run.parent_run_id:
self._persist_run(run)
else:
parent_run = self.run_map.get(str(run.parent_run_id))
if parent_run is None:
logger.debug(f'Parent run with UUID {run.parent_run_id} not found.'
)
elif run.child_execution_order is not None and parent_run.child_execution_order is not None and run.child_execution_order > parent_run.child_execution_order:
parent_run.child_execution_order = run.child_execution_order
self.run_map.pop(str(run.id))
self._on_run_update(run)
|
End a trace for a run.
|
test_few_shot_chat_message_prompt_template
|
"""Tests for few shot chat message template."""
examples = [{'input': '2+2', 'output': '4'}, {'input': '2+3', 'output': '5'}]
example_prompt = ChatPromptTemplate.from_messages([
HumanMessagePromptTemplate.from_template('{input}'),
AIMessagePromptTemplate.from_template('{output}')])
few_shot_prompt = FewShotChatMessagePromptTemplate(input_variables=['input'
], example_prompt=example_prompt, examples=examples)
final_prompt: ChatPromptTemplate = SystemMessagePromptTemplate.from_template(
'You are a helpful AI Assistant'
) + few_shot_prompt + HumanMessagePromptTemplate.from_template('{input}')
messages = final_prompt.format_messages(input='100 + 1')
assert messages == [SystemMessage(content='You are a helpful AI Assistant',
additional_kwargs={}), HumanMessage(content='2+2', additional_kwargs={},
example=False), AIMessage(content='4', additional_kwargs={}, example=
False), HumanMessage(content='2+3', additional_kwargs={}, example=False
), AIMessage(content='5', additional_kwargs={}, example=False),
HumanMessage(content='100 + 1', additional_kwargs={}, example=False)]
|
def test_few_shot_chat_message_prompt_template() ->None:
"""Tests for few shot chat message template."""
examples = [{'input': '2+2', 'output': '4'}, {'input': '2+3', 'output':
'5'}]
example_prompt = ChatPromptTemplate.from_messages([
HumanMessagePromptTemplate.from_template('{input}'),
AIMessagePromptTemplate.from_template('{output}')])
few_shot_prompt = FewShotChatMessagePromptTemplate(input_variables=[
'input'], example_prompt=example_prompt, examples=examples)
final_prompt: ChatPromptTemplate = (SystemMessagePromptTemplate.
from_template('You are a helpful AI Assistant') + few_shot_prompt +
HumanMessagePromptTemplate.from_template('{input}'))
messages = final_prompt.format_messages(input='100 + 1')
assert messages == [SystemMessage(content=
'You are a helpful AI Assistant', additional_kwargs={}),
HumanMessage(content='2+2', additional_kwargs={}, example=False),
AIMessage(content='4', additional_kwargs={}, example=False),
HumanMessage(content='2+3', additional_kwargs={}, example=False),
AIMessage(content='5', additional_kwargs={}, example=False),
HumanMessage(content='100 + 1', additional_kwargs={}, example=False)]
|
Tests for few shot chat message template.
|
test_create
|
"""
Create a vector with vector index 'v' of dimension 10
and 'v:text' to hold text and metadatas author and category
"""
metadata_str = 'author char(32), category char(16)'
self.vectorstore.create(metadata_str, 1024)
podstore = self.pod + '.' + self.store
js = self.vectorstore.run(f'desc {podstore}')
jd = json.loads(js[0])
assert podstore in jd['data']
|
def test_create(self) ->None:
"""
Create a vector with vector index 'v' of dimension 10
and 'v:text' to hold text and metadatas author and category
"""
metadata_str = 'author char(32), category char(16)'
self.vectorstore.create(metadata_str, 1024)
podstore = self.pod + '.' + self.store
js = self.vectorstore.run(f'desc {podstore}')
jd = json.loads(js[0])
assert podstore in jd['data']
|
Create a vector with vector index 'v' of dimension 10
and 'v:text' to hold text and metadatas author and category
|
load
|
"""Load data into document objects."""
docs = []
try:
with open(self.file_path, newline='', encoding=self.encoding) as csvfile:
docs = self.__read_file(csvfile)
except UnicodeDecodeError as e:
if self.autodetect_encoding:
detected_encodings = detect_file_encodings(self.file_path)
for encoding in detected_encodings:
try:
with open(self.file_path, newline='', encoding=encoding.
encoding) as csvfile:
docs = self.__read_file(csvfile)
break
except UnicodeDecodeError:
continue
else:
raise RuntimeError(f'Error loading {self.file_path}') from e
except Exception as e:
raise RuntimeError(f'Error loading {self.file_path}') from e
return docs
|
def load(self) ->List[Document]:
"""Load data into document objects."""
docs = []
try:
with open(self.file_path, newline='', encoding=self.encoding
) as csvfile:
docs = self.__read_file(csvfile)
except UnicodeDecodeError as e:
if self.autodetect_encoding:
detected_encodings = detect_file_encodings(self.file_path)
for encoding in detected_encodings:
try:
with open(self.file_path, newline='', encoding=encoding
.encoding) as csvfile:
docs = self.__read_file(csvfile)
break
except UnicodeDecodeError:
continue
else:
raise RuntimeError(f'Error loading {self.file_path}') from e
except Exception as e:
raise RuntimeError(f'Error loading {self.file_path}') from e
return docs
|
Load data into document objects.
|
from_texts
|
"""
Return VectorStore initialized from texts and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=
ids, ndims=ndims, table_name=table_name, pre_delete_table=
pre_delete_table, **kwargs)
|
@classmethod
def from_texts(cls: Type[Hologres], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, ndims: int=ADA_TOKEN_COUNT,
table_name: str=_LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]]
=None, pre_delete_table: bool=False, **kwargs: Any) ->Hologres:
"""
Return VectorStore initialized from texts and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(texts, embeddings, embedding, metadatas=metadatas,
ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=
pre_delete_table, **kwargs)
|
Return VectorStore initialized from texts and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
|
__str__
|
"""Return the query syntax for a RedisText filter expression."""
if not self._value:
return '*'
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
|
def __str__(self) ->str:
"""Return the query syntax for a RedisText filter expression."""
if not self._value:
return '*'
return self.OPERATOR_MAP[self._operator] % (self._field, self._value)
|
Return the query syntax for a RedisText filter expression.
|
mock_lakefs_client_no_presign_local
|
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'local:///physical_address_bla')]
mock_lakefs_client.return_value.is_presign_supported.return_value = False
yield mock_lakefs_client.return_value
|
@pytest.fixture
def mock_lakefs_client_no_presign_local() ->Any:
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'local:///physical_address_bla')]
(mock_lakefs_client.return_value.is_presign_supported.return_value
) = False
yield mock_lakefs_client.return_value
| null |
save_context
|
"""Nothing should be saved or changed"""
pass
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Nothing should be saved or changed"""
pass
|
Nothing should be saved or changed
|
test_chat_google_palm_multiple_completions
|
"""Test Google PaLM Chat API wrapper with multiple completions."""
chat = ChatGooglePalm(n=5, temperature=1.0)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
def test_chat_google_palm_multiple_completions() ->None:
"""Test Google PaLM Chat API wrapper with multiple completions."""
chat = ChatGooglePalm(n=5, temperature=1.0)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
Test Google PaLM Chat API wrapper with multiple completions.
|
create
|
...
|
@overload
@staticmethod
def create(messages: Sequence[Dict[str, Any]], *, provider: str=
'ChatOpenAI', stream: Literal[False]=False, **kwargs: Any
) ->ChatCompletions:
...
| null |
embed_documents
|
"""Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
return [self._embedding_func(text, engine=self.deployment) for text in texts]
|
def embed_documents(self, texts: List[str], chunk_size: Optional[int]=0
) ->List[List[float]]:
"""Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
return [self._embedding_func(text, engine=self.deployment) for text in
texts]
|
Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
|
_to_chat_prompt
|
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError('at least one HumanMessage must be provided')
if not isinstance(messages[0], SystemMessage):
messages = [self.system_message] + messages
if not isinstance(messages[1], HumanMessage):
raise ValueError(
'messages list must start with a SystemMessage or UserMessage')
if not isinstance(messages[-1], HumanMessage):
raise ValueError('last message must be a HumanMessage')
prompt_parts = []
if self.usr_0_beg is None:
self.usr_0_beg = self.usr_n_beg
if self.usr_0_end is None:
self.usr_0_end = self.usr_n_end
prompt_parts.append(self.sys_beg + cast(str, messages[0].content) + self.
sys_end)
prompt_parts.append(self.usr_0_beg + cast(str, messages[1].content) + self.
usr_0_end)
for ai_message, human_message in zip(messages[2::2], messages[3::2]):
if not isinstance(ai_message, AIMessage) or not isinstance(human_message,
HumanMessage):
raise ValueError(
'messages must be alternating human- and ai-messages, optionally prepended by a system message'
)
prompt_parts.append(self.ai_n_beg + cast(str, ai_message.content) +
self.ai_n_end)
prompt_parts.append(self.usr_n_beg + cast(str, human_message.content) +
self.usr_n_end)
return ''.join(prompt_parts)
|
def _to_chat_prompt(self, messages: List[BaseMessage]) ->str:
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError('at least one HumanMessage must be provided')
if not isinstance(messages[0], SystemMessage):
messages = [self.system_message] + messages
if not isinstance(messages[1], HumanMessage):
raise ValueError(
'messages list must start with a SystemMessage or UserMessage')
if not isinstance(messages[-1], HumanMessage):
raise ValueError('last message must be a HumanMessage')
prompt_parts = []
if self.usr_0_beg is None:
self.usr_0_beg = self.usr_n_beg
if self.usr_0_end is None:
self.usr_0_end = self.usr_n_end
prompt_parts.append(self.sys_beg + cast(str, messages[0].content) +
self.sys_end)
prompt_parts.append(self.usr_0_beg + cast(str, messages[1].content) +
self.usr_0_end)
for ai_message, human_message in zip(messages[2::2], messages[3::2]):
if not isinstance(ai_message, AIMessage) or not isinstance(
human_message, HumanMessage):
raise ValueError(
'messages must be alternating human- and ai-messages, optionally prepended by a system message'
)
prompt_parts.append(self.ai_n_beg + cast(str, ai_message.content) +
self.ai_n_end)
prompt_parts.append(self.usr_n_beg + cast(str, human_message.
content) + self.usr_n_end)
return ''.join(prompt_parts)
|
Convert a list of messages into a prompt format expected by wrapped LLM.
|
get_structured_schema
|
"""Returns the structured schema of the Graph"""
return self.structured_schema
|
@property
def get_structured_schema(self) ->Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
|
Returns the structured schema of the Graph
|
_run_persistent
|
"""
Runs commands in a persistent environment
and returns the output.
Args:
command: the command to execute
"""
pexpect = self._lazy_import_pexpect()
if self.process is None:
raise ValueError('Process not initialized')
self.process.sendline(command)
self.process.expect(self.prompt, timeout=10)
self.process.sendline('')
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f'Timeout error while executing command {command}'
if self.process.after == pexpect.EOF:
return f'Exited with error status: {self.process.exitstatus}'
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output
|
def _run_persistent(self, command: str) ->str:
"""
Runs commands in a persistent environment
and returns the output.
Args:
command: the command to execute
"""
pexpect = self._lazy_import_pexpect()
if self.process is None:
raise ValueError('Process not initialized')
self.process.sendline(command)
self.process.expect(self.prompt, timeout=10)
self.process.sendline('')
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f'Timeout error while executing command {command}'
if self.process.after == pexpect.EOF:
return f'Exited with error status: {self.process.exitstatus}'
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output
|
Runs commands in a persistent environment
and returns the output.
Args:
command: the command to execute
|
OutputType
|
"""The type of the output of this runnable as a type annotation."""
func = getattr(self, 'func', None) or getattr(self, 'afunc')
try:
sig = inspect.signature(func)
if sig.return_annotation != inspect.Signature.empty:
if getattr(sig.return_annotation, '__origin__', None) in (collections
.abc.Iterator, collections.abc.AsyncIterator):
return getattr(sig.return_annotation, '__args__', (Any,))[0]
return sig.return_annotation
else:
return Any
except ValueError:
return Any
|
@property
def OutputType(self) ->Any:
"""The type of the output of this runnable as a type annotation."""
func = getattr(self, 'func', None) or getattr(self, 'afunc')
try:
sig = inspect.signature(func)
if sig.return_annotation != inspect.Signature.empty:
if getattr(sig.return_annotation, '__origin__', None) in (
collections.abc.Iterator, collections.abc.AsyncIterator):
return getattr(sig.return_annotation, '__args__', (Any,))[0]
return sig.return_annotation
else:
return Any
except ValueError:
return Any
|
The type of the output of this runnable as a type annotation.
|
_call
|
"""Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonx_llm("What is a molecule")
"""
result = self._generate(prompts=[prompt], stop=stop, run_manager=
run_manager, **kwargs)
return result.generations[0][0].text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonx_llm("What is a molecule")
"""
result = self._generate(prompts=[prompt], stop=stop, run_manager=
run_manager, **kwargs)
return result.generations[0][0].text
|
Call the IBM watsonx.ai inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = watsonx_llm("What is a molecule")
|
_process_page_content
|
"""Process the page content based on dedupe."""
if self.dedupe:
return page.dedupe_chars().extract_text(**self.text_kwargs)
return page.extract_text(**self.text_kwargs)
|
def _process_page_content(self, page: pdfplumber.page.Page) ->str:
"""Process the page content based on dedupe."""
if self.dedupe:
return page.dedupe_chars().extract_text(**self.text_kwargs)
return page.extract_text(**self.text_kwargs)
|
Process the page content based on dedupe.
|
test_messages_to_prompt_dict_raises_with_misplaced_system_message
|
pytest.importorskip('google.generativeai')
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict([HumanMessage(content='Real human message'),
SystemMessage(content='Prompt')])
assert 'System message must be first' in str(e)
|
def test_messages_to_prompt_dict_raises_with_misplaced_system_message() ->None:
pytest.importorskip('google.generativeai')
with pytest.raises(ChatGooglePalmError) as e:
_messages_to_prompt_dict([HumanMessage(content='Real human message'
), SystemMessage(content='Prompt')])
assert 'System message must be first' in str(e)
| null |
validate_channel_or_videoIds_is_set
|
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get('channel_name') and not values.get('video_ids'):
raise ValueError('Must specify either channel_name or video_ids')
return values
|
@root_validator
def validate_channel_or_videoIds_is_set(cls, values: Dict[str, Any]) ->Dict[
str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get('channel_name') and not values.get('video_ids'):
raise ValueError('Must specify either channel_name or video_ids')
return values
|
Validate that either folder_id or document_ids is set, but not both.
|
__getattr__
|
try:
return self[name]
except KeyError:
raise AttributeError(f"'EvalError' object has no attribute '{name}'")
|
def __getattr__(self, name: str) ->Any:
try:
return self[name]
except KeyError:
raise AttributeError(f"'EvalError' object has no attribute '{name}'")
| null |
get_return_intermediate_steps
|
"""For backwards compatibility."""
if 'return_refine_steps' in values:
values['return_intermediate_steps'] = values['return_refine_steps']
del values['return_refine_steps']
return values
|
@root_validator(pre=True)
def get_return_intermediate_steps(cls, values: Dict) ->Dict:
"""For backwards compatibility."""
if 'return_refine_steps' in values:
values['return_intermediate_steps'] = values['return_refine_steps']
del values['return_refine_steps']
return values
|
For backwards compatibility.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
get_summary
|
"""Return a descriptive summary of the agent."""
current_time = datetime.now() if now is None else now
since_refresh = (current_time - self.last_refreshed).seconds
if not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh:
self.summary = self._compute_agent_summary()
self.last_refreshed = current_time
age = self.age if self.age is not None else 'N/A'
return f'Name: {self.name} (age: {age})' + f"""
Innate traits: {self.traits}""" + f'\n{self.summary}'
|
def get_summary(self, force_refresh: bool=False, now: Optional[datetime]=None
) ->str:
"""Return a descriptive summary of the agent."""
current_time = datetime.now() if now is None else now
since_refresh = (current_time - self.last_refreshed).seconds
if (not self.summary or since_refresh >= self.summary_refresh_seconds or
force_refresh):
self.summary = self._compute_agent_summary()
self.last_refreshed = current_time
age = self.age if self.age is not None else 'N/A'
return (f'Name: {self.name} (age: {age})' +
f'\nInnate traits: {self.traits}' + f'\n{self.summary}')
|
Return a descriptive summary of the agent.
|
_diff
|
return jsonpatch.make_patch(prev, next).patch
|
def _diff(self, prev: Optional[Any], next: Any) ->Any:
return jsonpatch.make_patch(prev, next).patch
| null |
get_prompt
|
"""Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
prompt_generator = PromptGenerator()
prompt_generator.add_constraint(
'~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.'
)
prompt_generator.add_constraint(
'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.'
)
prompt_generator.add_constraint('No user assistance')
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"')
for tool in tools:
prompt_generator.add_tool(tool)
prompt_generator.add_resource(
'Internet access for searches and information gathering.')
prompt_generator.add_resource('Long Term memory management.')
prompt_generator.add_resource(
'GPT-3.5 powered Agents for delegation of simple tasks.')
prompt_generator.add_resource('File output.')
prompt_generator.add_performance_evaluation(
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.'
)
prompt_generator.add_performance_evaluation(
'Constructively self-criticize your big-picture behavior constantly.')
prompt_generator.add_performance_evaluation(
'Reflect on past decisions and strategies to refine your approach.')
prompt_generator.add_performance_evaluation(
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.'
)
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string
|
def get_prompt(tools: List[BaseTool]) ->str:
"""Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
"""
prompt_generator = PromptGenerator()
prompt_generator.add_constraint(
'~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.'
)
prompt_generator.add_constraint(
'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.'
)
prompt_generator.add_constraint('No user assistance')
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
)
for tool in tools:
prompt_generator.add_tool(tool)
prompt_generator.add_resource(
'Internet access for searches and information gathering.')
prompt_generator.add_resource('Long Term memory management.')
prompt_generator.add_resource(
'GPT-3.5 powered Agents for delegation of simple tasks.')
prompt_generator.add_resource('File output.')
prompt_generator.add_performance_evaluation(
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.'
)
prompt_generator.add_performance_evaluation(
'Constructively self-criticize your big-picture behavior constantly.')
prompt_generator.add_performance_evaluation(
'Reflect on past decisions and strategies to refine your approach.')
prompt_generator.add_performance_evaluation(
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.'
)
prompt_string = prompt_generator.generate_prompt_string()
return prompt_string
|
Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.
Returns:
str: The generated prompt string.
|
test_xml_output_parser_fail
|
"""Test XMLOutputParser where complete output is not in XML format."""
xml_parser = XMLOutputParser()
with pytest.raises(ValueError) as e:
xml_parser.parse_folder(result)
assert 'Could not parse output' in str(e)
|
@pytest.mark.parametrize('result', ['foo></foo>', '<foo></foo', 'foo></foo',
'foofoo'])
def test_xml_output_parser_fail(result: str) ->None:
"""Test XMLOutputParser where complete output is not in XML format."""
xml_parser = XMLOutputParser()
with pytest.raises(ValueError) as e:
xml_parser.parse_folder(result)
assert 'Could not parse output' in str(e)
|
Test XMLOutputParser where complete output is not in XML format.
|
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs':
_model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs':
_model_kwargs}}
|
Get the identifying parameters.
|
format
|
return self.format_prompt(**kwargs).to_string()
|
def format(self, **kwargs: Any) ->str:
return self.format_prompt(**kwargs).to_string()
| null |
_llm_type
|
"""Return type of llm."""
return 'cerebriumai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'cerebriumai'
|
Return type of llm.
|
test_sim_search
|
"""Test end to end construction and simple similarity search."""
texts = ['foo', 'bar', 'baz']
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
output = in_memory_vec_store.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.parametrize('metric', ['cosine_sim', 'euclidean_dist',
'sqeuclidean_dist'])
def test_sim_search(metric: str, texts: List[str]) ->None:
"""Test end to end construction and simple similarity search."""
texts = ['foo', 'bar', 'baz']
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
output = in_memory_vec_store.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and simple similarity search.
|
_on_tool_end
|
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f"{get_colored_text('[tool/end]', color='blue')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:
""") +
f'"{run.outputs[\'output\'].strip()}"')
|
def _on_tool_end(self, run: Run) ->None:
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f"{get_colored_text('[tool/end]', color='blue')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:
"""
) + f'"{run.outputs[\'output\'].strip()}"')
| null |
__init__
|
"""
Initializes the GoogleSpeechToTextLoader.
Args:
project_id: Google Cloud Project ID.
file_path: A Google Cloud Storage URI or a local file path.
location: Speech-to-Text recognizer location.
recognizer_id: Speech-to-Text recognizer id.
config: Recognition options and features.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognitionConfig
config_mask: The list of fields in config that override the values in the
``default_recognition_config`` of the recognizer during this
recognition request.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognizeRequest
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.speech_v2 import AutoDetectDecodingConfig, RecognitionConfig, RecognitionFeatures, SpeechClient
except ImportError as exc:
raise ImportError(
'Could not import google-cloud-speech python package. Please install it with `pip install google-cloud-speech`.'
) from exc
self.project_id = project_id
self.file_path = file_path
self.location = location
self.recognizer_id = recognizer_id
self.config = config or RecognitionConfig(auto_decoding_config=
AutoDetectDecodingConfig(), language_codes=['en-US'], model='chirp',
features=RecognitionFeatures(enable_automatic_punctuation=True))
self.config_mask = config_mask
self._client = SpeechClient(client_info=get_client_info(module=
'speech-to-text'), client_options=ClientOptions(api_endpoint=
f'{location}-speech.googleapis.com') if location != 'global' else None)
self._recognizer_path = self._client.recognizer_path(project_id, location,
recognizer_id)
|
def __init__(self, project_id: str, file_path: str, location: str=
'us-central1', recognizer_id: str='_', config: Optional[
RecognitionConfig]=None, config_mask: Optional[FieldMask]=None):
"""
Initializes the GoogleSpeechToTextLoader.
Args:
project_id: Google Cloud Project ID.
file_path: A Google Cloud Storage URI or a local file path.
location: Speech-to-Text recognizer location.
recognizer_id: Speech-to-Text recognizer id.
config: Recognition options and features.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognitionConfig
config_mask: The list of fields in config that override the values in the
``default_recognition_config`` of the recognizer during this
recognition request.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognizeRequest
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud.speech_v2 import AutoDetectDecodingConfig, RecognitionConfig, RecognitionFeatures, SpeechClient
except ImportError as exc:
raise ImportError(
'Could not import google-cloud-speech python package. Please install it with `pip install google-cloud-speech`.'
) from exc
self.project_id = project_id
self.file_path = file_path
self.location = location
self.recognizer_id = recognizer_id
self.config = config or RecognitionConfig(auto_decoding_config=
AutoDetectDecodingConfig(), language_codes=['en-US'], model='chirp',
features=RecognitionFeatures(enable_automatic_punctuation=True))
self.config_mask = config_mask
self._client = SpeechClient(client_info=get_client_info(module=
'speech-to-text'), client_options=ClientOptions(api_endpoint=
f'{location}-speech.googleapis.com') if location != 'global' else None)
self._recognizer_path = self._client.recognizer_path(project_id,
location, recognizer_id)
|
Initializes the GoogleSpeechToTextLoader.
Args:
project_id: Google Cloud Project ID.
file_path: A Google Cloud Storage URI or a local file path.
location: Speech-to-Text recognizer location.
recognizer_id: Speech-to-Text recognizer id.
config: Recognition options and features.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognitionConfig
config_mask: The list of fields in config that override the values in the
``default_recognition_config`` of the recognizer during this
recognition request.
For more information:
https://cloud.google.com/python/docs/reference/speech/latest/google.cloud.speech_v2.types.RecognizeRequest
|
test_visit_operation_or
|
op = Operation(operator=Operator.OR, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
expected = {'bool': {'should': [{'term': {'metadata.foo': 2}}, {'term': {
'metadata.bar.keyword': 'baz'}}]}}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation_or() ->None:
op = Operation(operator=Operator.OR, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
expected = {'bool': {'should': [{'term': {'metadata.foo': 2}}, {'term':
{'metadata.bar.keyword': 'baz'}}]}}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
test_md_header_text_splitter_preserve_headers_1
|
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = """# Foo
## Bat
Hi this is Jim
Hi Joe
## Baz
# Bar
This is Alice
This is Bob"""
headers_to_split_on = [('#', 'Header 1')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on, strip_headers=False)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
"""# Foo
## Bat
Hi this is Jim
Hi Joe
## Baz""", metadata={
'Header 1': 'Foo'}), Document(page_content=
"""# Bar
This is Alice
This is Bob""", metadata={'Header 1': 'Bar'})]
assert output == expected_output
|
def test_md_header_text_splitter_preserve_headers_1() ->None:
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = """# Foo
## Bat
Hi this is Jim
Hi Joe
## Baz
# Bar
This is Alice
This is Bob"""
headers_to_split_on = [('#', 'Header 1')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on, strip_headers=False)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
"""# Foo
## Bat
Hi this is Jim
Hi Joe
## Baz""", metadata={
'Header 1': 'Foo'}), Document(page_content=
"""# Bar
This is Alice
This is Bob""", metadata={'Header 1':
'Bar'})]
assert output == expected_output
|
Test markdown splitter by header: Preserve Headers.
|
__or__
|
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(self, coerce_to_runnable(other))
|
def __or__(self, other: Union[Runnable[Any, Other], Callable[[Any], Other],
Callable[[Iterator[Any]], Iterator[Other]], Mapping[str, Union[Runnable
[Any, Other], Callable[[Any], Other], Any]]]) ->RunnableSerializable[
Input, Other]:
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(self, coerce_to_runnable(other))
|
Compose this runnable with another object to create a RunnableSequence.
|
test_example_id_assignment_threadsafe
|
"""Test that example assigned at callback start/end is honored."""
example_ids = {}
def mock_create_run(**kwargs: Any) ->Any:
example_ids[kwargs.get('id')] = kwargs.get('reference_example_id')
return unittest.mock.MagicMock()
client = unittest.mock.MagicMock(spec=Client)
client.create_run = mock_create_run
tracer = LangChainTracer(client=client)
old_persist_run_single = tracer._persist_run_single
def new_persist_run_single(run: Run) ->None:
time.sleep(0.01)
old_persist_run_single(run)
with unittest.mock.patch.object(tracer, '_persist_run_single', new=
new_persist_run_single):
run_id_1 = UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
run_id_2 = UUID('f1f9fa53-8b2f-4742-bdbc-38215f7bd1e1')
example_id_1 = UUID('57e42c57-8c79-4d9f-8765-bf6cd3a98055')
tracer.example_id = example_id_1
tracer.on_llm_start({'name': 'example_1'}, ['foo'], run_id=run_id_1)
tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=run_id_1
)
example_id_2 = UUID('4f31216e-7c26-4027-a5fd-0bbf9ace17dc')
tracer.example_id = example_id_2
tracer.on_llm_start({'name': 'example_2'}, ['foo'], run_id=run_id_2)
tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=run_id_2
)
tracer.example_id = None
expected_example_ids = {run_id_1: example_id_1, run_id_2: example_id_2}
tracer.wait_for_futures()
assert example_ids == expected_example_ids
|
def test_example_id_assignment_threadsafe() ->None:
"""Test that example assigned at callback start/end is honored."""
example_ids = {}
def mock_create_run(**kwargs: Any) ->Any:
example_ids[kwargs.get('id')] = kwargs.get('reference_example_id')
return unittest.mock.MagicMock()
client = unittest.mock.MagicMock(spec=Client)
client.create_run = mock_create_run
tracer = LangChainTracer(client=client)
old_persist_run_single = tracer._persist_run_single
def new_persist_run_single(run: Run) ->None:
time.sleep(0.01)
old_persist_run_single(run)
with unittest.mock.patch.object(tracer, '_persist_run_single', new=
new_persist_run_single):
run_id_1 = UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
run_id_2 = UUID('f1f9fa53-8b2f-4742-bdbc-38215f7bd1e1')
example_id_1 = UUID('57e42c57-8c79-4d9f-8765-bf6cd3a98055')
tracer.example_id = example_id_1
tracer.on_llm_start({'name': 'example_1'}, ['foo'], run_id=run_id_1)
tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=
run_id_1)
example_id_2 = UUID('4f31216e-7c26-4027-a5fd-0bbf9ace17dc')
tracer.example_id = example_id_2
tracer.on_llm_start({'name': 'example_2'}, ['foo'], run_id=run_id_2)
tracer.on_llm_end(LLMResult(generations=[], llm_output={}), run_id=
run_id_2)
tracer.example_id = None
expected_example_ids = {run_id_1: example_id_1, run_id_2: example_id_2}
tracer.wait_for_futures()
assert example_ids == expected_example_ids
|
Test that example assigned at callback start/end is honored.
|
__init__
|
"""
Args:
repo_path: The path to the Git repository.
clone_url: Optional. The URL to clone the repository from.
branch: Optional. The branch to load files from. Defaults to `main`.
file_filter: Optional. A function that takes a file path and returns
a boolean indicating whether to load the file. Defaults to None.
"""
self.repo_path = repo_path
self.clone_url = clone_url
self.branch = branch
self.file_filter = file_filter
|
def __init__(self, repo_path: str, clone_url: Optional[str]=None, branch:
Optional[str]='main', file_filter: Optional[Callable[[str], bool]]=None):
"""
Args:
repo_path: The path to the Git repository.
clone_url: Optional. The URL to clone the repository from.
branch: Optional. The branch to load files from. Defaults to `main`.
file_filter: Optional. A function that takes a file path and returns
a boolean indicating whether to load the file. Defaults to None.
"""
self.repo_path = repo_path
self.clone_url = clone_url
self.branch = branch
self.file_filter = file_filter
|
Args:
repo_path: The path to the Git repository.
clone_url: Optional. The URL to clone the repository from.
branch: Optional. The branch to load files from. Defaults to `main`.
file_filter: Optional. A function that takes a file path and returns
a boolean indicating whether to load the file. Defaults to None.
|
_llm_type
|
"""Return type of chat model."""
return 'konko-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'konko-chat'
|
Return type of chat model.
|
llm_prefix
|
"""Prefix to append the llm call with."""
return 'Thought:'
|
@property
def llm_prefix(self) ->str:
"""Prefix to append the llm call with."""
return 'Thought:'
|
Prefix to append the llm call with.
|
delete
|
del self.store[key]
|
def delete(self, key: str) ->None:
del self.store[key]
| null |
test_pgvector_delete_docs
|
"""Add and delete documents."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, ids=['1', '2', '3'], connection_string=
CONNECTION_STRING, pre_delete_collection=True)
docsearch.delete(['1', '2'])
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
assert sorted(record.custom_id for record in records) == ['3']
docsearch.delete(['2', '3'])
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
assert sorted(record.custom_id for record in records) == []
|
def test_pgvector_delete_docs() ->None:
"""Add and delete documents."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, ids=['1', '2', '3'], connection_string=
CONNECTION_STRING, pre_delete_collection=True)
docsearch.delete(['1', '2'])
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
assert sorted(record.custom_id for record in records) == ['3']
docsearch.delete(['2', '3'])
with docsearch._make_session() as session:
records = list(session.query(docsearch.EmbeddingStore).all())
assert sorted(record.custom_id for record in records) == []
|
Add and delete documents.
|
_create_chat_result
|
generations = []
for res in response['choices']:
message = _convert_dict_to_message(res['message'])
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.get('finish_reason')))
generations.append(gen)
return ChatResult(generations=generations)
|
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult:
generations = []
for res in response['choices']:
message = _convert_dict_to_message(res['message'])
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.get('finish_reason')))
generations.append(gen)
return ChatResult(generations=generations)
| null |
test_saving_loading_llm
|
"""Test saving/loading an Cohere LLM."""
llm = Cohere(max_tokens=10)
llm.save(file_path=tmp_path / 'cohere.yaml')
loaded_llm = load_llm(tmp_path / 'cohere.yaml')
assert_llm_equality(llm, loaded_llm)
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an Cohere LLM."""
llm = Cohere(max_tokens=10)
llm.save(file_path=tmp_path / 'cohere.yaml')
loaded_llm = load_llm(tmp_path / 'cohere.yaml')
assert_llm_equality(llm, loaded_llm)
|
Test saving/loading an Cohere LLM.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs': self.
model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs': self.
model_kwargs}}
|
Get the identifying parameters.
|
on_text
|
"""Run on arbitrary text."""
|
def on_text(self, text: str, **kwargs: Any) ->None:
"""Run on arbitrary text."""
|
Run on arbitrary text.
|
execute_task
|
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(objective=objective, context='\n'.join(
context), task=task, **kwargs)
|
def execute_task(self, objective: str, task: str, k: int=5, **kwargs: Any
) ->str:
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(objective=objective, context='\n'.join(
context), task=task, **kwargs)
|
Execute a task.
|
from_es_connection
|
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain_community.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
from elasticsearch.client import MlClient
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
|
@classmethod
def from_es_connection(cls, model_id: str, es_connection: Elasticsearch,
input_field: str='text_field') ->ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain_community.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
from elasticsearch.client import MlClient
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
|
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain_community.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
|
test_sequential_overlapping_inputs
|
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar', 'test'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo', 'test'])
|
def test_sequential_overlapping_inputs() ->None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar',
'test'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo',
'test'])
|
Test error is raised when input variables are overlapping.
|
_run
|
return 'foo'
|
def _run(self, *args: Any, run_manager: Optional[CallbackManagerForToolRun]
=None, **kwargs: Any) ->str:
return 'foo'
| null |
test_tiledb_vector_sim
|
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1, nprobe=
docsearch.vector_index.partitions)
assert output == [Document(page_content='foo')]
|
@pytest.mark.requires('tiledb-vector-search')
def test_tiledb_vector_sim(tmp_path: Path) ->None:
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1, nprobe=
docsearch.vector_index.partitions)
assert output == [Document(page_content='foo')]
|
Test vector similarity.
|
_get_dataforseo_api_search
|
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
|
def _get_dataforseo_api_search(**kwargs: Any) ->BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
| null |
convert_prompt
|
return self._wrap_prompt(prompt.to_string())
|
def convert_prompt(self, prompt: PromptValue) ->str:
return self._wrap_prompt(prompt.to_string())
| null |
load_memory_variables
|
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
|
def load_memory_variables(self, values: Dict[str, Any]) ->Dict[str, Any]:
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
| null |
list_files_in_main_branch
|
"""
Fetches all files in the main branch of the repo.
Returns:
str: A plaintext report containing the paths and names of the files.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents('', ref=self.
github_base_branch)
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = '\n'.join(files)
return f'Found {len(files)} files in the main branch:\n{files_str}'
else:
return 'No files found in the main branch'
except Exception as e:
return str(e)
|
def list_files_in_main_branch(self) ->str:
"""
Fetches all files in the main branch of the repo.
Returns:
str: A plaintext report containing the paths and names of the files.
"""
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents('', ref=self.
github_base_branch)
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
if files:
files_str = '\n'.join(files)
return f'Found {len(files)} files in the main branch:\n{files_str}'
else:
return 'No files found in the main branch'
except Exception as e:
return str(e)
|
Fetches all files in the main branch of the repo.
Returns:
str: A plaintext report containing the paths and names of the files.
|
add_the_end
|
return text + 'THE END!'
|
def add_the_end(text: str) ->str:
return text + 'THE END!'
| null |
test_few_shot_chat_message_prompt_template_with_selector
|
"""Tests for few shot chat message template with an example selector."""
examples = [{'input': '2+2', 'output': '4'}, {'input': '2+3', 'output': '5'}]
example_selector = AsIsSelector(examples)
example_prompt = ChatPromptTemplate.from_messages([
HumanMessagePromptTemplate.from_template('{input}'),
AIMessagePromptTemplate.from_template('{output}')])
few_shot_prompt = FewShotChatMessagePromptTemplate(input_variables=['input'
], example_prompt=example_prompt, example_selector=example_selector)
final_prompt: ChatPromptTemplate = SystemMessagePromptTemplate.from_template(
'You are a helpful AI Assistant'
) + few_shot_prompt + HumanMessagePromptTemplate.from_template('{input}')
messages = final_prompt.format_messages(input='100 + 1')
assert messages == [SystemMessage(content='You are a helpful AI Assistant',
additional_kwargs={}), HumanMessage(content='2+2', additional_kwargs={},
example=False), AIMessage(content='4', additional_kwargs={}, example=
False), HumanMessage(content='2+3', additional_kwargs={}, example=False
), AIMessage(content='5', additional_kwargs={}, example=False),
HumanMessage(content='100 + 1', additional_kwargs={}, example=False)]
|
def test_few_shot_chat_message_prompt_template_with_selector() ->None:
"""Tests for few shot chat message template with an example selector."""
examples = [{'input': '2+2', 'output': '4'}, {'input': '2+3', 'output':
'5'}]
example_selector = AsIsSelector(examples)
example_prompt = ChatPromptTemplate.from_messages([
HumanMessagePromptTemplate.from_template('{input}'),
AIMessagePromptTemplate.from_template('{output}')])
few_shot_prompt = FewShotChatMessagePromptTemplate(input_variables=[
'input'], example_prompt=example_prompt, example_selector=
example_selector)
final_prompt: ChatPromptTemplate = (SystemMessagePromptTemplate.
from_template('You are a helpful AI Assistant') + few_shot_prompt +
HumanMessagePromptTemplate.from_template('{input}'))
messages = final_prompt.format_messages(input='100 + 1')
assert messages == [SystemMessage(content=
'You are a helpful AI Assistant', additional_kwargs={}),
HumanMessage(content='2+2', additional_kwargs={}, example=False),
AIMessage(content='4', additional_kwargs={}, example=False),
HumanMessage(content='2+3', additional_kwargs={}, example=False),
AIMessage(content='5', additional_kwargs={}, example=False),
HumanMessage(content='100 + 1', additional_kwargs={}, example=False)]
|
Tests for few shot chat message template with an example selector.
|
input_keys
|
"""Expect input keys.
:meta private:
"""
return self.input_variables
|
@property
def input_keys(self) ->List[str]:
"""Expect input keys.
:meta private:
"""
return self.input_variables
|
Expect input keys.
:meta private:
|
test_singlestoredb_from_existing
|
"""Test adding a new document"""
table_name = 'test_singlestoredb_from_existing'
drop(table_name)
SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
docsearch2 = SingleStoreDB(NormilizedFakeEmbeddings(), table_name=
'test_singlestoredb_from_existing', host=TEST_SINGLESTOREDB_URL)
output = docsearch2.similarity_search('foo', k=1)
assert output == TEST_SINGLE_RESULT
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_from_existing(texts: List[str]) ->None:
"""Test adding a new document"""
table_name = 'test_singlestoredb_from_existing'
drop(table_name)
SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
docsearch2 = SingleStoreDB(NormilizedFakeEmbeddings(), table_name=
'test_singlestoredb_from_existing', host=TEST_SINGLESTOREDB_URL)
output = docsearch2.similarity_search('foo', k=1)
assert output == TEST_SINGLE_RESULT
drop(table_name)
|
Test adding a new document
|
_embedding_func
|
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(model_id=self.model_id, docs=[{
self.input_field: text} for text in texts])
embeddings = [doc['predicted_value'] for doc in response['inference_results']]
return embeddings
|
def _embedding_func(self, texts: List[str]) ->List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(model_id=self.model_id, docs
=[{self.input_field: text} for text in texts])
embeddings = [doc['predicted_value'] for doc in response[
'inference_results']]
return embeddings
|
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
|
simplify_code
|
import esprima
tree = esprima.parseScript(self.code, loc=True)
simplified_lines = self.source_lines[:]
for node in tree.body:
if isinstance(node, (esprima.nodes.FunctionDeclaration, esprima.nodes.
ClassDeclaration)):
start = node.loc.start.line - 1
simplified_lines[start] = f'// Code for: {simplified_lines[start]}'
for line_num in range(start + 1, node.loc.end.line):
simplified_lines[line_num] = None
return '\n'.join(line for line in simplified_lines if line is not None)
|
def simplify_code(self) ->str:
import esprima
tree = esprima.parseScript(self.code, loc=True)
simplified_lines = self.source_lines[:]
for node in tree.body:
if isinstance(node, (esprima.nodes.FunctionDeclaration, esprima.
nodes.ClassDeclaration)):
start = node.loc.start.line - 1
simplified_lines[start] = f'// Code for: {simplified_lines[start]}'
for line_num in range(start + 1, node.loc.end.line):
simplified_lines[line_num] = None
return '\n'.join(line for line in simplified_lines if line is not None)
| null |
_llm_type
|
"""Return type of llm."""
return 'fake_list'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake_list'
|
Return type of llm.
|
test__filter_similar_embeddings
|
threshold = 0.79
embedded_docs = [[1.0, 2.0], [1.0, 2.0], [2.0, 1.0], [2.0, 0.5], [0.0, 0.0]]
expected = [1, 3, 4]
actual = _filter_similar_embeddings(embedded_docs, cosine_similarity, threshold
)
assert expected == actual
|
def test__filter_similar_embeddings() ->None:
threshold = 0.79
embedded_docs = [[1.0, 2.0], [1.0, 2.0], [2.0, 1.0], [2.0, 0.5], [0.0, 0.0]
]
expected = [1, 3, 4]
actual = _filter_similar_embeddings(embedded_docs, cosine_similarity,
threshold)
assert expected == actual
| null |
yield_blobs
|
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
|
@abstractmethod
def yield_blobs(self) ->Iterable[Blob]:
"""A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
"""
|
A lazy loader for raw data represented by LangChain's Blob object.
Returns:
A generator over blobs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.