method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
test_embedding_documents
|
documents = ['foo bar']
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
assert model.model_name == 'textembedding-gecko@001'
|
def test_embedding_documents() ->None:
documents = ['foo bar']
model = VertexAIEmbeddings()
output = model.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
assert model.model_name == model.client._model_id
assert model.model_name == 'textembedding-gecko@001'
| null |
test_init_fail_text_column_mismatch
|
index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS)
with pytest.raises(ValueError) as ex:
DatabricksVectorSearch(index, text_column='some_other_column')
assert f"text_column 'some_other_column' does not match with the source column of the index: '{DEFAULT_TEXT_COLUMN}'." in str(
ex.value)
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
def test_init_fail_text_column_mismatch() ->None:
index = mock_index(DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS)
with pytest.raises(ValueError) as ex:
DatabricksVectorSearch(index, text_column='some_other_column')
assert f"text_column 'some_other_column' does not match with the source column of the index: '{DEFAULT_TEXT_COLUMN}'." in str(
ex.value)
| null |
_type
|
"""Return the type key."""
return 'regex_dict_parser'
|
@property
def _type(self) ->str:
"""Return the type key."""
return 'regex_dict_parser'
|
Return the type key.
|
test_telegram_chat_loader
|
_check_telegram_chat_loader(path)
|
@pytest.mark.parametrize('path', ['telegram_chat_json',
'telegram_chat_json.zip', 'telegram_chat_json/result.json'])
def test_telegram_chat_loader(path: str) ->None:
_check_telegram_chat_loader(path)
| null |
get_spaces
|
"""
Get all spaces for the team.
"""
url = f'{DEFAULT_URL}/team/{self.team_id}/space'
response = requests.get(url, headers=self.get_headers(), params=self.
get_default_params())
data = response.json()
parsed_spaces = parse_dict_through_component(data, Space, fault_tolerant=True)
return parsed_spaces
|
def get_spaces(self) ->Dict:
"""
Get all spaces for the team.
"""
url = f'{DEFAULT_URL}/team/{self.team_id}/space'
response = requests.get(url, headers=self.get_headers(), params=self.
get_default_params())
data = response.json()
parsed_spaces = parse_dict_through_component(data, Space,
fault_tolerant=True)
return parsed_spaces
|
Get all spaces for the team.
|
as_retriever
|
return super().as_retriever(**kwargs)
|
def as_retriever(self, **kwargs: Any) ->VectorStoreRetriever:
return super().as_retriever(**kwargs)
| null |
load
|
from bs4 import BeautifulSoup
"""Load MHTML document into document objects."""
with open(self.file_path, 'r', encoding=self.open_encoding) as f:
message = email.message_from_string(f.read())
parts = message.get_payload()
if not isinstance(parts, list):
parts = [message]
for part in parts:
if part.get_content_type() == 'text/html':
html = part.get_payload(decode=True).decode()
soup = BeautifulSoup(html, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ''
metadata: Dict[str, Union[str, None]] = {'source': self.
file_path, 'title': title}
return [Document(page_content=text, metadata=metadata)]
return []
|
def load(self) ->List[Document]:
from bs4 import BeautifulSoup
"""Load MHTML document into document objects."""
with open(self.file_path, 'r', encoding=self.open_encoding) as f:
message = email.message_from_string(f.read())
parts = message.get_payload()
if not isinstance(parts, list):
parts = [message]
for part in parts:
if part.get_content_type() == 'text/html':
html = part.get_payload(decode=True).decode()
soup = BeautifulSoup(html, **self.bs_kwargs)
text = soup.get_text(self.get_text_separator)
if soup.title:
title = str(soup.title.string)
else:
title = ''
metadata: Dict[str, Union[str, None]] = {'source': self.
file_path, 'title': title}
return [Document(page_content=text, metadata=metadata)]
return []
| null |
test_max_marginal_relevance_search
|
"""Test max marginal relevance search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=3)
sim_output = docsearch.similarity_search(texts[0], k=3)
assert mmr_output == sim_output
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k=3)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[1]
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2, fetch_k
=3, lambda_mult=0.1)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[2]
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3, fetch_k=2)
assert len(mmr_output) == 2
assert drop(docsearch.index_name)
|
def test_max_marginal_relevance_search(texts: List[str]) ->None:
"""Test max marginal relevance search."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3,
fetch_k=3)
sim_output = docsearch.similarity_search(texts[0], k=3)
assert mmr_output == sim_output
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2,
fetch_k=3)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[1]
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=2,
fetch_k=3, lambda_mult=0.1)
assert len(mmr_output) == 2
assert mmr_output[0].page_content == texts[0]
assert mmr_output[1].page_content == texts[2]
mmr_output = docsearch.max_marginal_relevance_search(texts[0], k=3,
fetch_k=2)
assert len(mmr_output) == 2
assert drop(docsearch.index_name)
|
Test max marginal relevance search.
|
test_astradb_vectorstore_mmr
|
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
def _v_from_i(i: int, N: int) ->str:
angle = 2 * math.pi * i / N
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
i_vals = [0, 4, 5, 13]
N_val = 20
store_parseremb.add_texts([_v_from_i(i, N_val) for i in i_vals], metadatas=
[{'i': i} for i in i_vals])
res1 = store_parseremb.max_marginal_relevance_search(_v_from_i(3, N_val), k
=2, fetch_k=3)
res_i_vals = {doc.metadata['i'] for doc in res1}
assert res_i_vals == {0, 4}
|
def test_astradb_vectorstore_mmr(self, store_parseremb: AstraDB) ->None:
"""
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
"""
def _v_from_i(i: int, N: int) ->str:
angle = 2 * math.pi * i / N
vector = [math.cos(angle), math.sin(angle)]
return json.dumps(vector)
i_vals = [0, 4, 5, 13]
N_val = 20
store_parseremb.add_texts([_v_from_i(i, N_val) for i in i_vals],
metadatas=[{'i': i} for i in i_vals])
res1 = store_parseremb.max_marginal_relevance_search(_v_from_i(3, N_val
), k=2, fetch_k=3)
res_i_vals = {doc.metadata['i'] for doc in res1}
assert res_i_vals == {0, 4}
|
MMR testing. We work on the unit circle with angle multiples
of 2*pi/20 and prepare a store with known vectors for a controlled
MMR outcome.
|
assert_query
|
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 0.0]}}
return query_body
|
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 0.0]}}
return query_body
| null |
test_from_texts_with_metadatas
|
"""Test end to end construction and search."""
from momento import CredentialProvider, VectorIndexConfigurations
random_text = random_string()
random_document = f'Hello world {random_text} goodbye world!'
texts.insert(0, random_document)
metadatas = [{'page': f'{i}', 'source': 'user'} for i in range(len(texts))]
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(texts=texts, embedding=
embedding_openai, index_name=random_index_name, metadatas=metadatas,
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
API_KEY_ENV_VAR))
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [Document(page_content=random_document, metadata={
'page': '0', 'source': 'user'})]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
|
def test_from_texts_with_metadatas(random_index_name: str, embedding_openai:
OpenAIEmbeddings, texts: List[str]) ->None:
"""Test end to end construction and search."""
from momento import CredentialProvider, VectorIndexConfigurations
random_text = random_string()
random_document = f'Hello world {random_text} goodbye world!'
texts.insert(0, random_document)
metadatas = [{'page': f'{i}', 'source': 'user'} for i in range(len(texts))]
vector_store = None
try:
vector_store = MomentoVectorIndex.from_texts(texts=texts, embedding
=embedding_openai, index_name=random_index_name, metadatas=
metadatas, configuration=VectorIndexConfigurations.Default.
latest(), credential_provider=CredentialProvider.
from_environment_variable(API_KEY_ENV_VAR))
wait()
documents = vector_store.similarity_search(query=random_text, k=1)
assert documents == [Document(page_content=random_document,
metadata={'page': '0', 'source': 'user'})]
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
|
Test end to end construction and search.
|
is_openai_v1
|
"""Return whether OpenAI API is v1 or more."""
_version = parse(version('openai'))
return _version.major >= 1
|
def is_openai_v1() ->bool:
"""Return whether OpenAI API is v1 or more."""
_version = parse(version('openai'))
return _version.major >= 1
|
Return whether OpenAI API is v1 or more.
|
_NamedExpr
|
self.write('(')
self.dispatch(tree.target)
self.write(' := ')
self.dispatch(tree.value)
self.write(')')
|
def _NamedExpr(self, tree):
self.write('(')
self.dispatch(tree.target)
self.write(' := ')
self.dispatch(tree.value)
self.write(')')
| null |
_generate_field_schema
|
"""
Generate a schema for the search index in Redis based on the input metadata.
Given a dictionary of metadata, this function categorizes each metadata
field into one of the three categories:
- text: The field contains textual data.
- numeric: The field contains numeric data (either integer or float).
- tag: The field contains list of tags (strings).
Args
data (Dict[str, Any]): A dictionary where keys are metadata field names
and values are the metadata values.
Returns:
Dict[str, Any]: A dictionary with three keys "text", "numeric", and "tag".
Each key maps to a list of fields that belong to that category.
Raises:
ValueError: If a metadata field cannot be categorized into any of
the three known types.
"""
result: Dict[str, Any] = {'text': [], 'numeric': [], 'tag': []}
for key, value in data.items():
try:
int(value)
result['numeric'].append({'name': key})
continue
except (ValueError, TypeError):
pass
if value is None:
continue
if isinstance(value, (list, tuple)):
if not value or isinstance(value[0], str):
result['tag'].append({'name': key})
else:
name = type(value[0]).__name__
raise ValueError(
f"List/tuple values should contain strings: '{key}': {name}")
continue
if isinstance(value, str):
result['text'].append({'name': key})
continue
name = type(value).__name__
raise ValueError('Could not generate Redis index field type mapping ' +
f"for metadata: '{key}': {name}")
return result
|
def _generate_field_schema(data: Dict[str, Any]) ->Dict[str, Any]:
"""
Generate a schema for the search index in Redis based on the input metadata.
Given a dictionary of metadata, this function categorizes each metadata
field into one of the three categories:
- text: The field contains textual data.
- numeric: The field contains numeric data (either integer or float).
- tag: The field contains list of tags (strings).
Args
data (Dict[str, Any]): A dictionary where keys are metadata field names
and values are the metadata values.
Returns:
Dict[str, Any]: A dictionary with three keys "text", "numeric", and "tag".
Each key maps to a list of fields that belong to that category.
Raises:
ValueError: If a metadata field cannot be categorized into any of
the three known types.
"""
result: Dict[str, Any] = {'text': [], 'numeric': [], 'tag': []}
for key, value in data.items():
try:
int(value)
result['numeric'].append({'name': key})
continue
except (ValueError, TypeError):
pass
if value is None:
continue
if isinstance(value, (list, tuple)):
if not value or isinstance(value[0], str):
result['tag'].append({'name': key})
else:
name = type(value[0]).__name__
raise ValueError(
f"List/tuple values should contain strings: '{key}': {name}"
)
continue
if isinstance(value, str):
result['text'].append({'name': key})
continue
name = type(value).__name__
raise ValueError(
'Could not generate Redis index field type mapping ' +
f"for metadata: '{key}': {name}")
return result
|
Generate a schema for the search index in Redis based on the input metadata.
Given a dictionary of metadata, this function categorizes each metadata
field into one of the three categories:
- text: The field contains textual data.
- numeric: The field contains numeric data (either integer or float).
- tag: The field contains list of tags (strings).
Args
data (Dict[str, Any]): A dictionary where keys are metadata field names
and values are the metadata values.
Returns:
Dict[str, Any]: A dictionary with three keys "text", "numeric", and "tag".
Each key maps to a list of fields that belong to that category.
Raises:
ValueError: If a metadata field cannot be categorized into any of
the three known types.
|
_on_llm_error
|
crumbs = self.get_breadcrumbs(run)
self.function_callback(f"{get_colored_text('[llm/error]', color='red')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] LLM run errored with error:
""") +
f"{try_json_stringify(run.error, '[error]')}")
|
def _on_llm_error(self, run: Run) ->None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[llm/error]', color='red')} " +
get_bolded_text(
f"""[{crumbs}] [{elapsed(run)}] LLM run errored with error:
""") +
f"{try_json_stringify(run.error, '[error]')}")
| null |
_create_chat_stream
|
payload = {'messages': self._convert_messages_to_ollama_messages(messages)}
yield from self._create_stream(payload=payload, stop=stop, api_url=
f'{self.base_url}/api/chat/', **kwargs)
|
def _create_chat_stream(self, messages: List[BaseMessage], stop: Optional[
List[str]]=None, **kwargs: Any) ->Iterator[str]:
payload = {'messages': self._convert_messages_to_ollama_messages(messages)}
yield from self._create_stream(payload=payload, stop=stop, api_url=
f'{self.base_url}/api/chat/', **kwargs)
| null |
_import_rwkv
|
from langchain_community.llms.rwkv import RWKV
return RWKV
|
def _import_rwkv() ->Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
| null |
save
|
if self.get_chat_history:
raise ValueError('Chain not saveable when `get_chat_history` is not None.')
super().save(file_path)
|
def save(self, file_path: Union[Path, str]) ->None:
if self.get_chat_history:
raise ValueError(
'Chain not saveable when `get_chat_history` is not None.')
super().save(file_path)
| null |
get_access_token
|
"""Get the access token."""
url = f'{DEFAULT_URL}/oauth/token'
params = {'client_id': oauth_client_id, 'client_secret':
oauth_client_secret, 'code': code}
response = requests.post(url, params=params)
data = response.json()
if 'access_token' not in data:
print(f'Error: {data}')
if 'ECODE' in data and data['ECODE'] == 'OAUTH_014':
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
print('You already used this code once. Generate a new one.',
f"""Our best guess for the url to get a new code is:
{url}""")
return None
return data['access_token']
|
@classmethod
def get_access_token(cls, oauth_client_id: str, oauth_client_secret: str,
code: str) ->Optional[str]:
"""Get the access token."""
url = f'{DEFAULT_URL}/oauth/token'
params = {'client_id': oauth_client_id, 'client_secret':
oauth_client_secret, 'code': code}
response = requests.post(url, params=params)
data = response.json()
if 'access_token' not in data:
print(f'Error: {data}')
if 'ECODE' in data and data['ECODE'] == 'OAUTH_014':
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
print('You already used this code once. Generate a new one.',
f"""Our best guess for the url to get a new code is:
{url}""")
return None
return data['access_token']
|
Get the access token.
|
test_scholar_call
|
"""Test that call gives correct answer for scholar search."""
search = SearchApiAPIWrapper(engine='google_scholar')
output = search.run('large language models')
assert 'state of large language models and their applications' in output
|
def test_scholar_call() ->None:
"""Test that call gives correct answer for scholar search."""
search = SearchApiAPIWrapper(engine='google_scholar')
output = search.run('large language models')
assert 'state of large language models and their applications' in output
|
Test that call gives correct answer for scholar search.
|
make_dict
|
i = 0
di = {}
while i < len(values) - 1:
di[values[i]] = values[i + 1]
i += 2
return di
|
def make_dict(values: List[Any]) ->dict:
i = 0
di = {}
while i < len(values) - 1:
di[values[i]] = values[i + 1]
i += 2
return di
| null |
__call__
|
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
f'Argument `prompt` is expected to be a string. Instead found {type(prompt)}. If you want to run the LLM on multiple prompts, use `generate` instead.'
)
return self.generate([prompt], stop=stop, callbacks=callbacks, tags=tags,
metadata=metadata, **kwargs).generations[0][0].text
|
def __call__(self, prompt: str, stop: Optional[List[str]]=None, callbacks:
Callbacks=None, *, tags: Optional[List[str]]=None, metadata: Optional[
Dict[str, Any]]=None, **kwargs: Any) ->str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
f'Argument `prompt` is expected to be a string. Instead found {type(prompt)}. If you want to run the LLM on multiple prompts, use `generate` instead.'
)
return self.generate([prompt], stop=stop, callbacks=callbacks, tags=
tags, metadata=metadata, **kwargs).generations[0][0].text
|
Check Cache and run the LLM on the given prompt and input.
|
__init__
|
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
|
def __init__(self) ->None:
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
| null |
test_spreedly_loader
|
"""Test Spreedly Loader."""
access_token = ''
resource = 'gateways_options'
spreedly_loader = SpreedlyLoader(access_token, resource)
documents = spreedly_loader.load()
assert len(documents) == 1
|
def test_spreedly_loader() ->None:
"""Test Spreedly Loader."""
access_token = ''
resource = 'gateways_options'
spreedly_loader = SpreedlyLoader(access_token, resource)
documents = spreedly_loader.load()
assert len(documents) == 1
|
Test Spreedly Loader.
|
_text_analysis
|
poller = self.text_analytics_client.begin_analyze_healthcare_entities([{
'id': '1', 'language': 'en', 'text': text}])
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict['entities'] = [
f'{x.text} is a healthcare entity of type {x.category}' for y in
docs for x in y.entities]
return res_dict
|
def _text_analysis(self, text: str) ->Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities([
{'id': '1', 'language': 'en', 'text': text}])
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict['entities'] = [
f'{x.text} is a healthcare entity of type {x.category}' for y in
docs for x in y.entities]
return res_dict
| null |
search_engine
|
""""A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events or about recent information. Input should be a search query. If the user is asking about something that you don't know about, you should probably use this tool to see if that can provide any information."""
return TavilySearchAPIWrapper().results(query, max_results=max_results)
|
@tool
def search_engine(query: str, max_results: int=5) ->str:
""""A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events or about recent information. Input should be a search query. If the user is asking about something that you don't know about, you should probably use this tool to see if that can provide any information."""
return TavilySearchAPIWrapper().results(query, max_results=max_results)
|
"A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events or about recent information. Input should be a search query. If the user is asking about something that you don't know about, you should probably use this tool to see if that can provide any information.
|
_validate_location
|
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(INVALID_LOCATION_TEMPL.format(location=
location, name=name))
|
@staticmethod
def _validate_location(location: APIPropertyLocation, name: str) ->None:
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(INVALID_LOCATION_TEMPL.format(location=
location, name=name))
| null |
test_call
|
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('zsh: command not found: python')
assert output != 'hello'
|
def test_call() ->None:
"""Test that call runs."""
stackexchange = StackExchangeAPIWrapper()
output = stackexchange.run('zsh: command not found: python')
assert output != 'hello'
|
Test that call runs.
|
_replace_template_var
|
"""Replace a template variable with a placeholder."""
placeholder = f'__TEMPLATE_VAR_{len(placeholders)}__'
placeholders[placeholder] = match.group(1)
return placeholder
|
def _replace_template_var(self, placeholders: Dict[str, str], match: re.Match
) ->str:
"""Replace a template variable with a placeholder."""
placeholder = f'__TEMPLATE_VAR_{len(placeholders)}__'
placeholders[placeholder] = match.group(1)
return placeholder
|
Replace a template variable with a placeholder.
|
mock_read_dataset
|
return dataset
|
def mock_read_dataset(*args: Any, **kwargs: Any) ->Dataset:
return dataset
| null |
get_headers
|
"""Get the headers for the request."""
if not isinstance(self.access_token, str):
raise TypeError(f'Access Token: {self.access_token}, must be str.')
headers = {'Authorization': str(self.access_token), 'Content-Type':
'application/json'}
return headers
|
def get_headers(self) ->Mapping[str, Union[str, bytes]]:
"""Get the headers for the request."""
if not isinstance(self.access_token, str):
raise TypeError(f'Access Token: {self.access_token}, must be str.')
headers = {'Authorization': str(self.access_token), 'Content-Type':
'application/json'}
return headers
|
Get the headers for the request.
|
test_mdelete
|
"""Test that deletion works as expected."""
store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': 'value1', 'key2': 'value2'})
store.mdelete(keys)
result = redis_client.mget(*keys)
assert result == [None, None]
|
def test_mdelete(redis_client: Redis) ->None:
"""Test that deletion works as expected."""
store = UpstashRedisByteStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': 'value1', 'key2': 'value2'})
store.mdelete(keys)
result = redis_client.mget(*keys)
assert result == [None, None]
|
Test that deletion works as expected.
|
test_tags_in_page_content
|
"""Verify a doc with tags are included in the metadata"""
doc = next(doc for doc in docs if doc.metadata['source'] == 'no_frontmatter.md'
)
assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {'tags'}
|
def test_tags_in_page_content() ->None:
"""Verify a doc with tags are included in the metadata"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'no_frontmatter.md')
assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {'tags'}
|
Verify a doc with tags are included in the metadata
|
_sanitize_input
|
return re.sub('[^a-zA-Z0-9_]', '', input_str)
|
def _sanitize_input(self, input_str: str) ->str:
return re.sub('[^a-zA-Z0-9_]', '', input_str)
| null |
v_args
|
"""Dummy decorator for when lark is not installed."""
return lambda _: None
|
def v_args(*args: Any, **kwargs: Any) ->Any:
"""Dummy decorator for when lark is not installed."""
return lambda _: None
|
Dummy decorator for when lark is not installed.
|
bind
|
"""Bind additional kwargs to a Runnable, returning a new Runnable.
Args:
**kwargs: The kwargs to bind to the Runnable.
Returns:
A new Runnable with the same type and config as the original,
but with the additional kwargs bound.
"""
return self.__class__(bound=self.bound, config=self.config, kwargs={**self.
kwargs, **kwargs}, custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type)
|
def bind(self, **kwargs: Any) ->Runnable[Input, Output]:
"""Bind additional kwargs to a Runnable, returning a new Runnable.
Args:
**kwargs: The kwargs to bind to the Runnable.
Returns:
A new Runnable with the same type and config as the original,
but with the additional kwargs bound.
"""
return self.__class__(bound=self.bound, config=self.config, kwargs={**
self.kwargs, **kwargs}, custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type)
|
Bind additional kwargs to a Runnable, returning a new Runnable.
Args:
**kwargs: The kwargs to bind to the Runnable.
Returns:
A new Runnable with the same type and config as the original,
but with the additional kwargs bound.
|
test_init_with_local_pipeline
|
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(pipeline=pipeline, hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_init_with_local_pipeline() ->None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(pipeline=pipeline, hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test initialization with a self-hosted HF pipeline.
|
_get_pipeline_name
|
return f'{self.model_id}_sparse_embedding'
|
def _get_pipeline_name(self) ->str:
return f'{self.model_id}_sparse_embedding'
| null |
test_similarity_search_with_filter
|
"""Test similarity search."""
output = deeplake_datastore.similarity_search('foo', k=1, distance_metric=
distance_metric, filter={'metadata': {'page': '1'}})
assert output == [Document(page_content='bar', metadata={'page': '1'})]
deeplake_datastore.delete_dataset()
|
def test_similarity_search_with_filter(deeplake_datastore: DeepLake,
distance_metric: str) ->None:
"""Test similarity search."""
output = deeplake_datastore.similarity_search('foo', k=1,
distance_metric=distance_metric, filter={'metadata': {'page': '1'}})
assert output == [Document(page_content='bar', metadata={'page': '1'})]
deeplake_datastore.delete_dataset()
|
Test similarity search.
|
_import_forefrontai
|
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
|
def _import_forefrontai() ->Any:
from langchain_community.llms.forefrontai import ForefrontAI
return ForefrontAI
| null |
prep_inputs
|
"""Validate and prep inputs."""
if 'reference' not in inputs:
inputs['reference'] = self._format_reference(inputs.get('reference'))
return super().prep_inputs(inputs)
|
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) ->Dict[str, str]:
"""Validate and prep inputs."""
if 'reference' not in inputs:
inputs['reference'] = self._format_reference(inputs.get('reference'))
return super().prep_inputs(inputs)
|
Validate and prep inputs.
|
test_root_question
|
"""Test irrational number that should need sympy."""
question = 'What is the square root of 2?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == f'Answer: {sympy.sqrt(2)}'
|
def test_root_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain
) ->None:
"""Test irrational number that should need sympy."""
question = 'What is the square root of 2?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == f'Answer: {sympy.sqrt(2)}'
|
Test irrational number that should need sympy.
|
add_texts
|
"""Add texts data to an existing index."""
keys = kwargs.get('keys')
ids = []
data = []
for i, text in enumerate(texts):
key = keys[i] if keys else str(uuid.uuid4())
key = base64.urlsafe_b64encode(bytes(key, 'utf-8')).decode('ascii')
metadata = metadatas[i] if metadatas else {}
doc = {'@search.action': 'upload', FIELDS_ID: key, FIELDS_CONTENT: text,
FIELDS_CONTENT_VECTOR: np.array(self.embedding_function(text),
dtype=np.float32).tolist(), FIELDS_METADATA: json.dumps(metadata)}
if metadata:
additional_fields = {k: v for k, v in metadata.items() if k in [x.
name for x in self.fields]}
doc.update(additional_fields)
data.append(doc)
ids.append(key)
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = self.client.upload_documents(documents=data)
if not all([r.succeeded for r in response]):
raise Exception(response)
data = []
if len(data) == 0:
return ids
response = self.client.upload_documents(documents=data)
if all([r.succeeded for r in response]):
return ids
else:
raise Exception(response)
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Add texts data to an existing index."""
keys = kwargs.get('keys')
ids = []
data = []
for i, text in enumerate(texts):
key = keys[i] if keys else str(uuid.uuid4())
key = base64.urlsafe_b64encode(bytes(key, 'utf-8')).decode('ascii')
metadata = metadatas[i] if metadatas else {}
doc = {'@search.action': 'upload', FIELDS_ID: key, FIELDS_CONTENT:
text, FIELDS_CONTENT_VECTOR: np.array(self.embedding_function(
text), dtype=np.float32).tolist(), FIELDS_METADATA: json.dumps(
metadata)}
if metadata:
additional_fields = {k: v for k, v in metadata.items() if k in
[x.name for x in self.fields]}
doc.update(additional_fields)
data.append(doc)
ids.append(key)
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = self.client.upload_documents(documents=data)
if not all([r.succeeded for r in response]):
raise Exception(response)
data = []
if len(data) == 0:
return ids
response = self.client.upload_documents(documents=data)
if all([r.succeeded for r in response]):
return ids
else:
raise Exception(response)
|
Add texts data to an existing index.
|
_format_definition
|
if 'hwi' in definition:
headword = definition['hwi']['hw'].replace('*', '-')
else:
headword = definition['meta']['id'].split(':')[0]
if 'fl' in definition:
functional_label = definition['fl']
if 'shortdef' in definition:
for short_def in definition['shortdef']:
yield f'{headword}, {functional_label}: {short_def}'
else:
yield f'{headword}, {functional_label}'
|
def _format_definition(self, definition: Dict) ->Iterator[str]:
if 'hwi' in definition:
headword = definition['hwi']['hw'].replace('*', '-')
else:
headword = definition['meta']['id'].split(':')[0]
if 'fl' in definition:
functional_label = definition['fl']
if 'shortdef' in definition:
for short_def in definition['shortdef']:
yield f'{headword}, {functional_label}: {short_def}'
else:
yield f'{headword}, {functional_label}'
| null |
get_num_tokens
|
"""Calculate number of tokens."""
return len(self.client.tokenize(text).tokens)
|
def get_num_tokens(self, text: str) ->int:
"""Calculate number of tokens."""
return len(self.client.tokenize(text).tokens)
|
Calculate number of tokens.
|
dict
|
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
"""
_dict = super().dict(**kwargs)
try:
_dict['_type'] = self._chain_type
except NotImplementedError:
pass
return _dict
|
def dict(self, **kwargs: Any) ->Dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
"""
_dict = super().dict(**kwargs)
try:
_dict['_type'] = self._chain_type
except NotImplementedError:
pass
return _dict
|
Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
|
validate_environment
|
"""Validate that AWS credentials to and python package exists in environment."""
if values['client'] is not None:
return values
try:
import boto3
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values['credentials_profile_name']
)
else:
session = boto3.Session()
client_params = {}
if values['region_name']:
client_params['region_name'] = values['region_name']
if values['endpoint_url']:
client_params['endpoint_url'] = values['endpoint_url']
values['client'] = session.client('bedrock-runtime', **client_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if values['client'] is not None:
return values
try:
import boto3
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
client_params = {}
if values['region_name']:
client_params['region_name'] = values['region_name']
if values['endpoint_url']:
client_params['endpoint_url'] = values['endpoint_url']
values['client'] = session.client('bedrock-runtime', **client_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
return values
|
Validate that AWS credentials to and python package exists in environment.
|
completion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return _make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(llm: ChatYandexGPT, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return _make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
__init__
|
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get('ALCHEMY_API_KEY') or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError('Alchemy API key not provided.')
if not re.match('^0x[a-fA-F0-9]{40}$', self.contract_address):
raise ValueError(f'Invalid contract address {self.contract_address}')
|
def __init__(self, contract_address: str, blockchainType: BlockchainType=
BlockchainType.ETH_MAINNET, api_key: str='docs-demo', startToken: str=
'', get_all_tokens: bool=False, max_execution_time: Optional[int]=None):
"""
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
"""
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get('ALCHEMY_API_KEY') or api_key
self.startToken = startToken
self.get_all_tokens = get_all_tokens
self.max_execution_time = max_execution_time
if not self.api_key:
raise ValueError('Alchemy API key not provided.')
if not re.match('^0x[a-fA-F0-9]{40}$', self.contract_address):
raise ValueError(f'Invalid contract address {self.contract_address}')
|
Args:
contract_address: The address of the smart contract.
blockchainType: The blockchain type.
api_key: The Alchemy API key.
startToken: The start token for pagination.
get_all_tokens: Whether to get all tokens on the contract.
max_execution_time: The maximum execution time (sec).
|
parse_result
|
_result = super().parse_result(result)
if self.args_only:
pydantic_args = self.pydantic_schema.parse_raw(_result)
else:
fn_name = _result['name']
_args = _result['arguments']
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args)
return pydantic_args
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
_result = super().parse_result(result)
if self.args_only:
pydantic_args = self.pydantic_schema.parse_raw(_result)
else:
fn_name = _result['name']
_args = _result['arguments']
pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args)
return pydantic_args
| null |
file_store
|
with tempfile.TemporaryDirectory() as temp_dir:
store = LocalFileStore(temp_dir)
yield store
|
@pytest.fixture
def file_store() ->Generator[LocalFileStore, None, None]:
with tempfile.TemporaryDirectory() as temp_dir:
store = LocalFileStore(temp_dir)
yield store
| null |
_import_rwkv
|
from langchain_community.llms.rwkv import RWKV
return RWKV
|
def _import_rwkv() ->Any:
from langchain_community.llms.rwkv import RWKV
return RWKV
| null |
_process_content
|
if isinstance(content, str):
return content
string_array: list = []
for part in content:
if isinstance(part, str):
string_array.append(part)
elif isinstance(part, Mapping):
if _is_openai_parts_format(part):
if part['type'] == 'text':
string_array.append(str(part['text']))
elif part['type'] == 'image_url':
img_url = part['image_url']
if isinstance(img_url, dict):
if 'url' not in img_url:
raise ValueError(
f'Unrecognized message image format: {img_url}')
img_url = img_url['url']
b64_string = _url_to_b64_string(img_url)
string_array.append(f'<img src="{b64_string}" />')
else:
raise ValueError(
f"Unrecognized message part type: {part['type']}")
else:
raise ValueError(f'Unrecognized message part format: {part}')
return ''.join(string_array)
|
def _process_content(self, content: Union[str, List[Union[dict, str]]]) ->str:
if isinstance(content, str):
return content
string_array: list = []
for part in content:
if isinstance(part, str):
string_array.append(part)
elif isinstance(part, Mapping):
if _is_openai_parts_format(part):
if part['type'] == 'text':
string_array.append(str(part['text']))
elif part['type'] == 'image_url':
img_url = part['image_url']
if isinstance(img_url, dict):
if 'url' not in img_url:
raise ValueError(
f'Unrecognized message image format: {img_url}'
)
img_url = img_url['url']
b64_string = _url_to_b64_string(img_url)
string_array.append(f'<img src="{b64_string}" />')
else:
raise ValueError(
f"Unrecognized message part type: {part['type']}")
else:
raise ValueError(f'Unrecognized message part format: {part}')
return ''.join(string_array)
| null |
format_response_payload
|
response_json = json.loads(output)
return response_json[0]['summary_text']
|
def format_response_payload(self, output: bytes) ->str:
response_json = json.loads(output)
return response_json[0]['summary_text']
| null |
lookup
|
"""Look up based on prompt and llm_string."""
stmt = select(self.cache_schema.response).where(self.cache_schema.prompt ==
prompt).where(self.cache_schema.llm == llm_string).order_by(self.
cache_schema.idx)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
'Retrieving a cache value that could not be deserialized properly. This is likely due to the cache being in an older format. Please recreate your cache to avoid this error.'
)
return [Generation(text=row[0]) for row in rows]
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = select(self.cache_schema.response).where(self.cache_schema.
prompt == prompt).where(self.cache_schema.llm == llm_string).order_by(
self.cache_schema.idx)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
'Retrieving a cache value that could not be deserialized properly. This is likely due to the cache being in an older format. Please recreate your cache to avoid this error.'
)
return [Generation(text=row[0]) for row in rows]
return None
|
Look up based on prompt and llm_string.
|
test_get_salient_docs
|
query = 'Test query'
docs_and_scores = time_weighted_retriever.get_salient_docs(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(docs_and_scores, dict)
assert len(docs_and_scores) == len(want)
for k, doc in docs_and_scores.items():
assert doc in want
|
def test_get_salient_docs(time_weighted_retriever:
TimeWeightedVectorStoreRetriever) ->None:
query = 'Test query'
docs_and_scores = time_weighted_retriever.get_salient_docs(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(docs_and_scores, dict)
assert len(docs_and_scores) == len(want)
for k, doc in docs_and_scores.items():
assert doc in want
| null |
_load_transformer
|
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == 'text-generation':
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ('text2text-generation', 'summarization'):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs
)
else:
raise ValueError(
f'Got invalid task {task}, currently only {VALID_TASKS} are supported'
)
except ImportError as e:
raise ValueError(
f'Could not load the {task} model due to missing dependencies.') from e
if importlib.util.find_spec('torch') is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or device >= cuda_device_count:
raise ValueError(
f'Got device=={device}, device is required to be within [-1, {cuda_device_count})'
)
if device < 0 and cuda_device_count > 0:
logger.warning(
'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.'
, cuda_device_count)
pipeline = hf_pipeline(task=task, model=model, tokenizer=tokenizer, device=
device, model_kwargs=_model_kwargs)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f'Got invalid task {pipeline.task}, currently only {VALID_TASKS} are supported'
)
return pipeline
|
def _load_transformer(model_id: str=DEFAULT_MODEL_ID, task: str=
DEFAULT_TASK, device: int=0, model_kwargs: Optional[dict]=None) ->Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == 'text-generation':
model = AutoModelForCausalLM.from_pretrained(model_id, **
_model_kwargs)
elif task in ('text2text-generation', 'summarization'):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **
_model_kwargs)
else:
raise ValueError(
f'Got invalid task {task}, currently only {VALID_TASKS} are supported'
)
except ImportError as e:
raise ValueError(
f'Could not load the {task} model due to missing dependencies.'
) from e
if importlib.util.find_spec('torch') is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or device >= cuda_device_count:
raise ValueError(
f'Got device=={device}, device is required to be within [-1, {cuda_device_count})'
)
if device < 0 and cuda_device_count > 0:
logger.warning(
'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.'
, cuda_device_count)
pipeline = hf_pipeline(task=task, model=model, tokenizer=tokenizer,
device=device, model_kwargs=_model_kwargs)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f'Got invalid task {pipeline.task}, currently only {VALID_TASKS} are supported'
)
return pipeline
|
Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
|
_get_search_client
|
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import SearchIndex, SemanticConfiguration, SemanticField, VectorSearch
try:
from azure.search.documents.indexes.models import HnswAlgorithmConfiguration, SemanticPrioritizedFields, SemanticSearch
NEW_VERSION = True
except ImportError:
from azure.search.documents.indexes.models import HnswVectorSearchAlgorithmConfiguration, PrioritizedFields, SemanticSettings
NEW_VERSION = False
default_fields = default_fields or []
if key is None:
credential = DefaultAzureCredential()
elif key.upper() == 'INTERACTIVE':
credential = InteractiveBrowserCredential()
credential.get_token('https://search.azure.com/.default')
else:
credential = AzureKeyCredential(key)
index_client: SearchIndexClient = SearchIndexClient(endpoint=endpoint,
credential=credential, user_agent=user_agent)
try:
index_client.get_index(name=index_name)
except ResourceNotFoundError:
if fields is not None:
fields_types = {f.name: f.type for f in fields}
mandatory_fields = {df.name: df.type for df in default_fields}
missing_fields = {key: mandatory_fields[key] for key, value in set(
mandatory_fields.items()) - set(fields_types.items())}
if len(missing_fields) > 0:
def fmt_err(x: str) ->str:
return (
f"{x} current type: '{fields_types.get(x, 'MISSING')}'. It has to be '{mandatory_fields.get(x)}' or you can point to a different '{mandatory_fields.get(x)}' field name by using the env variable 'AZURESEARCH_FIELDS_{x.upper()}'"
)
error = '\n'.join([fmt_err(x) for x in missing_fields])
raise ValueError(
f"""You need to specify at least the following fields {missing_fields} or provide alternative field names in the env variables.
{error}"""
)
else:
fields = default_fields
if vector_search is None:
if NEW_VERSION:
vector_search = VectorSearch(algorithms=[
HnswAlgorithmConfiguration(name='default', kind='hnsw',
parameters={'m': 4, 'efConstruction': 400, 'efSearch': 500,
'metric': 'cosine'})])
else:
vector_search = VectorSearch(algorithm_configurations=[
HnswVectorSearchAlgorithmConfiguration(name='default', kind
='hnsw', parameters={'m': 4, 'efConstruction': 400,
'efSearch': 500, 'metric': 'cosine'})])
if semantic_settings is None and semantic_configuration_name is not None:
if NEW_VERSION:
semantic_settings = SemanticSearch(configurations=[
SemanticConfiguration(name=semantic_configuration_name,
prioritized_fields=SemanticPrioritizedFields(content_fields
=[SemanticField(field_name=FIELDS_CONTENT)]))])
else:
semantic_settings = SemanticSettings(configurations=[
SemanticConfiguration(name=semantic_configuration_name,
prioritized_fields=PrioritizedFields(
prioritized_content_fields=[SemanticField(field_name=
FIELDS_CONTENT)]))])
index = SearchIndex(name=index_name, fields=fields, vector_search=
vector_search, semantic_settings=semantic_settings,
scoring_profiles=scoring_profiles, default_scoring_profile=
default_scoring_profile, cors_options=cors_options)
index_client.create_index(index)
return SearchClient(endpoint=endpoint, index_name=index_name, credential=
credential, user_agent=user_agent)
|
def _get_search_client(endpoint: str, key: str, index_name: str,
semantic_configuration_name: Optional[str]=None, fields: Optional[List[
SearchField]]=None, vector_search: Optional[VectorSearch]=None,
semantic_settings: Optional[Union[SemanticSearch, SemanticSettings]]=
None, scoring_profiles: Optional[List[ScoringProfile]]=None,
default_scoring_profile: Optional[str]=None, default_fields: Optional[
List[SearchField]]=None, user_agent: Optional[str]='langchain',
cors_options: Optional[CorsOptions]=None) ->SearchClient:
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import SearchIndex, SemanticConfiguration, SemanticField, VectorSearch
try:
from azure.search.documents.indexes.models import HnswAlgorithmConfiguration, SemanticPrioritizedFields, SemanticSearch
NEW_VERSION = True
except ImportError:
from azure.search.documents.indexes.models import HnswVectorSearchAlgorithmConfiguration, PrioritizedFields, SemanticSettings
NEW_VERSION = False
default_fields = default_fields or []
if key is None:
credential = DefaultAzureCredential()
elif key.upper() == 'INTERACTIVE':
credential = InteractiveBrowserCredential()
credential.get_token('https://search.azure.com/.default')
else:
credential = AzureKeyCredential(key)
index_client: SearchIndexClient = SearchIndexClient(endpoint=endpoint,
credential=credential, user_agent=user_agent)
try:
index_client.get_index(name=index_name)
except ResourceNotFoundError:
if fields is not None:
fields_types = {f.name: f.type for f in fields}
mandatory_fields = {df.name: df.type for df in default_fields}
missing_fields = {key: mandatory_fields[key] for key, value in
set(mandatory_fields.items()) - set(fields_types.items())}
if len(missing_fields) > 0:
def fmt_err(x: str) ->str:
return (
f"{x} current type: '{fields_types.get(x, 'MISSING')}'. It has to be '{mandatory_fields.get(x)}' or you can point to a different '{mandatory_fields.get(x)}' field name by using the env variable 'AZURESEARCH_FIELDS_{x.upper()}'"
)
error = '\n'.join([fmt_err(x) for x in missing_fields])
raise ValueError(
f"""You need to specify at least the following fields {missing_fields} or provide alternative field names in the env variables.
{error}"""
)
else:
fields = default_fields
if vector_search is None:
if NEW_VERSION:
vector_search = VectorSearch(algorithms=[
HnswAlgorithmConfiguration(name='default', kind='hnsw',
parameters={'m': 4, 'efConstruction': 400, 'efSearch':
500, 'metric': 'cosine'})])
else:
vector_search = VectorSearch(algorithm_configurations=[
HnswVectorSearchAlgorithmConfiguration(name='default',
kind='hnsw', parameters={'m': 4, 'efConstruction': 400,
'efSearch': 500, 'metric': 'cosine'})])
if (semantic_settings is None and semantic_configuration_name is not
None):
if NEW_VERSION:
semantic_settings = SemanticSearch(configurations=[
SemanticConfiguration(name=semantic_configuration_name,
prioritized_fields=SemanticPrioritizedFields(
content_fields=[SemanticField(field_name=FIELDS_CONTENT
)]))])
else:
semantic_settings = SemanticSettings(configurations=[
SemanticConfiguration(name=semantic_configuration_name,
prioritized_fields=PrioritizedFields(
prioritized_content_fields=[SemanticField(field_name=
FIELDS_CONTENT)]))])
index = SearchIndex(name=index_name, fields=fields, vector_search=
vector_search, semantic_settings=semantic_settings,
scoring_profiles=scoring_profiles, default_scoring_profile=
default_scoring_profile, cors_options=cors_options)
index_client.create_index(index)
return SearchClient(endpoint=endpoint, index_name=index_name,
credential=credential, user_agent=user_agent)
| null |
_import_weaviate
|
from langchain_community.vectorstores.weaviate import Weaviate
return Weaviate
|
def _import_weaviate() ->Any:
from langchain_community.vectorstores.weaviate import Weaviate
return Weaviate
| null |
setUp
|
self.example_code = """const os = require('os');
function hello(text) {
console.log(text);
}
class Simple {
constructor() {
this.a = 1;
}
}
hello("Hello!");"""
self.expected_simplified_code = """const os = require('os');
// Code for: function hello(text) {
// Code for: class Simple {
hello("Hello!");"""
self.expected_extracted_code = [
"""function hello(text) {
console.log(text);
}""",
"""class Simple {
constructor() {
this.a = 1;
}
}"""]
|
def setUp(self) ->None:
self.example_code = """const os = require('os');
function hello(text) {
console.log(text);
}
class Simple {
constructor() {
this.a = 1;
}
}
hello("Hello!");"""
self.expected_simplified_code = """const os = require('os');
// Code for: function hello(text) {
// Code for: class Simple {
hello("Hello!");"""
self.expected_extracted_code = [
'function hello(text) {\n console.log(text);\n}',
"""class Simple {
constructor() {
this.a = 1;
}
}"""]
| null |
_get_keys
|
input_key = _determine_input_key(config, run_inputs)
prediction_key = _determine_prediction_key(config, run_outputs)
reference_key = _determine_reference_key(config, example_outputs)
return input_key, prediction_key, reference_key
|
def _get_keys(config: smith_eval.RunEvalConfig, run_inputs: Optional[List[
str]], run_outputs: Optional[List[str]], example_outputs: Optional[List
[str]]) ->Tuple[Optional[str], Optional[str], Optional[str]]:
input_key = _determine_input_key(config, run_inputs)
prediction_key = _determine_prediction_key(config, run_outputs)
reference_key = _determine_reference_key(config, example_outputs)
return input_key, prediction_key, reference_key
| null |
create_retrieval_chain
|
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x['input']) | retriever
retrieval_chain = RunnablePassthrough.assign(context=retrieval_docs.
with_config(run_name='retrieve_documents')).assign(answer=
combine_docs_chain).with_config(run_name='retrieval_chain')
return retrieval_chain
|
def create_retrieval_chain(retriever: Union[BaseRetriever, Runnable[dict,
RetrieverOutput]], combine_docs_chain: Runnable[Dict[str, Any], str]
) ->Runnable:
"""Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
chain.invoke({"input": "..."})
"""
if not isinstance(retriever, BaseRetriever):
retrieval_docs: Runnable[dict, RetrieverOutput] = retriever
else:
retrieval_docs = (lambda x: x['input']) | retriever
retrieval_chain = RunnablePassthrough.assign(context=retrieval_docs.
with_config(run_name='retrieve_documents')).assign(answer=
combine_docs_chain).with_config(run_name='retrieval_chain')
return retrieval_chain
|
Create retrieval chain that retrieves documents and then passes them on.
Args:
retriever: Retriever-like object that returns list of documents. Should
either be a subclass of BaseRetriever or a Runnable that returns
a list of documents. If a subclass of BaseRetriever, then it
is expected that an `input` key be passed in - this is what
is will be used to pass into the retriever. If this is NOT a
subclass of BaseRetriever, then all the inputs will be passed
into this runnable, meaning that runnable should take a dictionary
as input.
combine_docs_chain: Runnable that takes inputs and produces a string output.
The inputs to this will be any original inputs to this chain, a new
context key with the retrieved documents, and chat_history (if not present
in the inputs) with a value of `[]` (to easily enable conversational
retrieval.
Returns:
An LCEL Runnable. The Runnable return is a dictionary containing at the very
least a `context` and `answer` key.
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains import create_retrieval_chain
from langchain import hub
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
llm = ChatOpenAI()
retriever = ...
combine_docs_chain = create_stuff_documents_chain(
llm, retrieval_qa_chat_prompt
)
retrieval_chain = create_retrieval_chain(retriever, combine_docs_chain)
chain.invoke({"input": "..."})
|
_generate_random_datetime_strings
|
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
|
def _generate_random_datetime_strings(pattern: str, n: int=3, start_date:
datetime=datetime(1, 1, 1), end_date: datetime=datetime.now() +
timedelta(days=3650)) ->List[str]:
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
|
Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
|
query
|
"""Query the graph."""
pass
|
@abstractmethod
def query(self, query: str, params: dict={}) ->List[Dict[str, Any]]:
"""Query the graph."""
pass
|
Query the graph.
|
similarity_search
|
res = self.store.get(query)
if res is None:
return []
return [res]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
| null |
add_embeddings
|
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
try:
schema_datas = [{'document': t} for t in texts]
self.storage.upsert_vectors(embeddings, ids, metadatas, schema_datas)
except Exception as e:
self.logger.exception(e)
|
def add_embeddings(self, texts: Iterable[str], embeddings: List[List[float]
], metadatas: List[dict], ids: List[str], **kwargs: Any) ->None:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
try:
schema_datas = [{'document': t} for t in texts]
self.storage.upsert_vectors(embeddings, ids, metadatas, schema_datas)
except Exception as e:
self.logger.exception(e)
|
Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
|
test_dict_in_list_throws
|
with pytest.raises(ValueError):
base.embed({'test_namespace': [{'a': 1}, {'b': 2}]}, MockEncoder())
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_dict_in_list_throws() ->None:
with pytest.raises(ValueError):
base.embed({'test_namespace': [{'a': 1}, {'b': 2}]}, MockEncoder())
| null |
_get_default_bash_process
|
"""Get default bash process."""
try:
from langchain_experimental.llm_bash.bash import BashProcess
except ImportError:
raise ImportError(
'BashProcess has been moved to langchain experimental.To use this tool, install langchain-experimental with `pip install langchain-experimental`.'
)
return BashProcess(return_err_output=True)
|
def _get_default_bash_process() ->Any:
"""Get default bash process."""
try:
from langchain_experimental.llm_bash.bash import BashProcess
except ImportError:
raise ImportError(
'BashProcess has been moved to langchain experimental.To use this tool, install langchain-experimental with `pip install langchain-experimental`.'
)
return BashProcess(return_err_output=True)
|
Get default bash process.
|
test_calling_chain_w_reserved_inputs_throws
|
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder()))
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'), rl_chain_selected_based_on=
rl_chain.ToSelectFrom(['0', '1', '2']))
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'), rl_chain_selected=rl_chain.
ToSelectFrom(['0', '1', '2']))
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_calling_chain_w_reserved_inputs_throws() ->None:
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed
=False, model=MockEncoder()))
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'),
rl_chain_selected_based_on=rl_chain.ToSelectFrom(['0', '1', '2']))
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'), rl_chain_selected=
rl_chain.ToSelectFrom(['0', '1', '2']))
| null |
_unique_list
|
visited_keys: Set[U] = set()
new_lst = []
for item in lst:
item_key = key(item)
if item_key not in visited_keys:
visited_keys.add(item_key)
new_lst.append(item)
return new_lst
|
def _unique_list(lst: List[T], key: Callable[[T], U]) ->List[T]:
visited_keys: Set[U] = set()
new_lst = []
for item in lst:
item_key = key(item)
if item_key not in visited_keys:
visited_keys.add(item_key)
new_lst.append(item)
return new_lst
| null |
test__get_messages_invalid
|
with pytest.raises(InputFormatError):
_get_messages(inputs)
|
@pytest.mark.parametrize('inputs', [{'one_key': [_EXAMPLE_MESSAGE],
'other_key': 'value'}, {'messages': [[_EXAMPLE_MESSAGE,
_EXAMPLE_MESSAGE], _EXAMPLE_MESSAGE], 'other_key': 'value'}, {'prompts':
'foo'}, {}])
def test__get_messages_invalid(inputs: Dict[str, Any]) ->None:
with pytest.raises(InputFormatError):
_get_messages(inputs)
| null |
similarity_search_with_relevance_scores
|
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector_with_relevance_scores(vector, k=k,
filter=filter, **kwargs)
|
def similarity_search_with_relevance_scores(self, query: str, k: int=4,
filter: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Tuple[
Document, float]]:
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector_with_relevance_scores(vector, k
=k, filter=filter, **kwargs)
| null |
test_semantic_search_filter_fruits
|
"""Test on semantic similarity with metadata filter."""
docs = store.similarity_search('food', filter={'kind': 'fruit'})
kinds = [d.metadata['kind'] for d in docs]
assert 'fruit' in kinds
assert 'treat' not in kinds
assert 'planet' not in kinds
|
def test_semantic_search_filter_fruits(self, store: BigQueryVectorSearch
) ->None:
"""Test on semantic similarity with metadata filter."""
docs = store.similarity_search('food', filter={'kind': 'fruit'})
kinds = [d.metadata['kind'] for d in docs]
assert 'fruit' in kinds
assert 'treat' not in kinds
assert 'planet' not in kinds
|
Test on semantic similarity with metadata filter.
|
load_agent_from_config
|
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if '_type' not in config:
raise ValueError('Must specify an agent Type in config')
load_from_tools = config.pop('load_from_llm_and_tools', False)
if load_from_tools:
if llm is None:
raise ValueError(
'If `load_from_llm_and_tools` is set to True, then LLM must be provided'
)
if tools is None:
raise ValueError(
'If `load_from_llm_and_tools` is set to True, then tools must be provided'
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop('_type')
if config_type not in AGENT_TO_CLASS:
raise ValueError(f'Loading {config_type} agent not supported')
agent_cls = AGENT_TO_CLASS[config_type]
if 'llm_chain' in config:
config['llm_chain'] = load_chain_from_config(config.pop('llm_chain'))
elif 'llm_chain_path' in config:
config['llm_chain'] = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError(
'One of `llm_chain` and `llm_chain_path` should be specified.')
if 'output_parser' in config:
logger.warning(
'Currently loading output parsers on agent is not supported, will just use the default one.'
)
del config['output_parser']
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
|
def load_agent_from_config(config: dict, llm: Optional[BaseLanguageModel]=
None, tools: Optional[List[Tool]]=None, **kwargs: Any) ->Union[
BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
if '_type' not in config:
raise ValueError('Must specify an agent Type in config')
load_from_tools = config.pop('load_from_llm_and_tools', False)
if load_from_tools:
if llm is None:
raise ValueError(
'If `load_from_llm_and_tools` is set to True, then LLM must be provided'
)
if tools is None:
raise ValueError(
'If `load_from_llm_and_tools` is set to True, then tools must be provided'
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop('_type')
if config_type not in AGENT_TO_CLASS:
raise ValueError(f'Loading {config_type} agent not supported')
agent_cls = AGENT_TO_CLASS[config_type]
if 'llm_chain' in config:
config['llm_chain'] = load_chain_from_config(config.pop('llm_chain'))
elif 'llm_chain_path' in config:
config['llm_chain'] = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError(
'One of `llm_chain` and `llm_chain_path` should be specified.')
if 'output_parser' in config:
logger.warning(
'Currently loading output parsers on agent is not supported, will just use the default one.'
)
del config['output_parser']
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
|
Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
|
parse
|
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
raise ValueError('action not found')
action = found.group(1)
response = json.loads(action.strip())
includes_action = 'action' in response
if includes_answer and includes_action:
raise OutputParserException(
f'Parsing LLM output produced a final answer and a parse-able action: {text}'
)
return AgentAction(response['action'], response.get('action_input', {}),
text)
except Exception:
if not includes_answer:
raise OutputParserException(f'Could not parse LLM output: {text}')
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({'output': output}, text)
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
raise ValueError('action not found')
action = found.group(1)
response = json.loads(action.strip())
includes_action = 'action' in response
if includes_answer and includes_action:
raise OutputParserException(
f'Parsing LLM output produced a final answer and a parse-able action: {text}'
)
return AgentAction(response['action'], response.get('action_input',
{}), text)
except Exception:
if not includes_answer:
raise OutputParserException(f'Could not parse LLM output: {text}')
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({'output': output}, text)
| null |
_call
|
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = None
try:
if self.streaming:
completion = ''
for chunk in self._stream(prompt, stop, run_manager, **params):
completion += chunk.text
return completion
else:
response = self._call_eas(prompt, params)
_stop = params.get('stop')
return self._process_response(response, _stop, self.version)
except Exception as error:
raise ValueError(f'Error raised by the service: {error}')
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = None
try:
if self.streaming:
completion = ''
for chunk in self._stream(prompt, stop, run_manager, **params):
completion += chunk.text
return completion
else:
response = self._call_eas(prompt, params)
_stop = params.get('stop')
return self._process_response(response, _stop, self.version)
except Exception as error:
raise ValueError(f'Error raised by the service: {error}')
| null |
test_qianfan_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
chat = QianfanChatEndpoint(qianfan_ak='test-api-key', qianfan_sk=
'test-secret-key')
print(chat.qianfan_ak, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.qianfan_sk, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_qianfan_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
chat = QianfanChatEndpoint(qianfan_ak='test-api-key', qianfan_sk=
'test-secret-key')
print(chat.qianfan_ak, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.qianfan_sk, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
Get the namespace of the langchain object.
|
__init__
|
self.file_path = Path(file_path)
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(json.dumps([]))
|
def __init__(self, file_path: str):
self.file_path = Path(file_path)
if not self.file_path.exists():
self.file_path.touch()
self.file_path.write_text(json.dumps([]))
| null |
validate_environment
|
values['gpt_router_api_base'] = get_from_dict_or_env(values,
'gpt_router_api_base', 'GPT_ROUTER_API_BASE', DEFAULT_API_BASE_URL)
values['gpt_router_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'gpt_router_api_key', 'GPT_ROUTER_API_KEY'))
try:
from gpt_router.client import GPTRouterClient
except ImportError:
raise GPTRouterException(
'Could not import GPTRouter python package. Please install it with `pip install GPTRouter`.'
)
gpt_router_client = GPTRouterClient(values['gpt_router_api_base'], values[
'gpt_router_api_key'].get_secret_value())
values['client'] = gpt_router_client
return values
|
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) ->Dict:
values['gpt_router_api_base'] = get_from_dict_or_env(values,
'gpt_router_api_base', 'GPT_ROUTER_API_BASE', DEFAULT_API_BASE_URL)
values['gpt_router_api_key'] = convert_to_secret_str(get_from_dict_or_env
(values, 'gpt_router_api_key', 'GPT_ROUTER_API_KEY'))
try:
from gpt_router.client import GPTRouterClient
except ImportError:
raise GPTRouterException(
'Could not import GPTRouter python package. Please install it with `pip install GPTRouter`.'
)
gpt_router_client = GPTRouterClient(values['gpt_router_api_base'],
values['gpt_router_api_key'].get_secret_value())
values['client'] = gpt_router_client
return values
| null |
_get_docs
|
"""Get docs."""
vectordbkwargs = inputs.get('vectordbkwargs', {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(question, k=self.
top_k_docs_for_context, **full_kwargs)
|
def _get_docs(self, question: str, inputs: Dict[str, Any], *, run_manager:
CallbackManagerForChainRun) ->List[Document]:
"""Get docs."""
vectordbkwargs = inputs.get('vectordbkwargs', {})
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
return self.vectorstore.similarity_search(question, k=self.
top_k_docs_for_context, **full_kwargs)
|
Get docs.
|
test__get_messages_valid
|
{'messages': []}
_get_messages(inputs)
|
@pytest.mark.parametrize('inputs', _VALID_MESSAGES)
def test__get_messages_valid(inputs: Dict[str, Any]) ->None:
{'messages': []}
_get_messages(inputs)
| null |
add_metadata
|
self.metadata.update(metadata)
if inherit:
self.inheritable_metadata.update(metadata)
|
def add_metadata(self, metadata: Dict[str, Any], inherit: bool=True) ->None:
self.metadata.update(metadata)
if inherit:
self.inheritable_metadata.update(metadata)
| null |
__init__
|
assert cols > 1
assert lines > 1
self.cols = cols
self.lines = lines
self.canvas = [([' '] * cols) for line in range(lines)]
|
def __init__(self, cols: int, lines: int) ->None:
assert cols > 1
assert lines > 1
self.cols = cols
self.lines = lines
self.canvas = [([' '] * cols) for line in range(lines)]
| null |
test_pairwise_embedding_distance_eval_chain_hamming_distance
|
"""Test the hamming distance."""
from scipy.spatial.distance import hamming
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.HAMMING)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array(
vectors))
expected = hamming(*vectors)
assert np.isclose(result, expected)
|
@pytest.mark.requires('scipy')
def test_pairwise_embedding_distance_eval_chain_hamming_distance(
pairwise_embedding_distance_eval_chain:
PairwiseEmbeddingDistanceEvalChain, vectors: Tuple[np.ndarray, np.ndarray]
) ->None:
"""Test the hamming distance."""
from scipy.spatial.distance import hamming
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.HAMMING)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array
(vectors))
expected = hamming(*vectors)
assert np.isclose(result, expected)
|
Test the hamming distance.
|
test_redis_add_texts_to_existing
|
"""Test adding a new document"""
docsearch = Redis.from_existing_index(FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL, schema='test_schema.yml')
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(TEST_INDEX_NAME)
os.remove('test_schema.yml')
|
def test_redis_add_texts_to_existing() ->None:
"""Test adding a new document"""
docsearch = Redis.from_existing_index(FakeEmbeddings(), index_name=
TEST_INDEX_NAME, redis_url=TEST_REDIS_URL, schema='test_schema.yml')
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(TEST_INDEX_NAME)
os.remove('test_schema.yml')
|
Test adding a new document
|
test_fixed_message_response_when_no_docs_found
|
fixed_resp = "I don't know"
answer = 'I know the answer!'
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory(k=1, output_key='answer', memory_key=
'chat_history', return_messages=True)
qa_chain = ConversationalRetrievalChain.from_llm(llm=llm, memory=memory,
retriever=retriever, return_source_documents=True, rephrase_question=
False, response_if_no_docs_found=fixed_resp, verbose=True)
got = qa_chain('What is the answer?')
assert got['chat_history'][1].content == fixed_resp
assert got['answer'] == fixed_resp
|
def test_fixed_message_response_when_no_docs_found() ->None:
fixed_resp = "I don't know"
answer = 'I know the answer!'
llm = FakeListLLM(responses=[answer])
retriever = SequentialRetriever(sequential_responses=[[]])
memory = ConversationBufferMemory(k=1, output_key='answer', memory_key=
'chat_history', return_messages=True)
qa_chain = ConversationalRetrievalChain.from_llm(llm=llm, memory=memory,
retriever=retriever, return_source_documents=True,
rephrase_question=False, response_if_no_docs_found=fixed_resp,
verbose=True)
got = qa_chain('What is the answer?')
assert got['chat_history'][1].content == fixed_resp
assert got['answer'] == fixed_resp
| null |
test_validating_cypher_statements
|
cypher_file = 'tests/unit_tests/data/cypher_corrector.csv'
examples = pd.read_csv(cypher_file)
examples.fillna('', inplace=True)
for _, row in examples.iterrows():
schema = load_schemas(row['schema'])
corrector = CypherQueryCorrector(schema)
assert corrector(row['statement']) == row['correct_query']
|
def test_validating_cypher_statements() ->None:
cypher_file = 'tests/unit_tests/data/cypher_corrector.csv'
examples = pd.read_csv(cypher_file)
examples.fillna('', inplace=True)
for _, row in examples.iterrows():
schema = load_schemas(row['schema'])
corrector = CypherQueryCorrector(schema)
assert corrector(row['statement']) == row['correct_query']
| null |
_run
|
"""Use the Wikipedia tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the Wikipedia tool."""
return self.api_wrapper.run(query)
|
Use the Wikipedia tool.
|
_fetch_text_complexity_metrics
|
textstat = import_textstat()
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index(
text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(text),
'text_standard': textstat.text_standard(text), 'fernandez_huerta':
textstat.fernandez_huerta(text), 'szigriszt_pazos': textstat.
szigriszt_pazos(text), 'gutierrez_polini': textstat.gutierrez_polini(
text), 'crawford': textstat.crawford(text), 'gulpease_index': textstat.
gulpease_index(text), 'osman': textstat.osman(text)}
return text_complexity_metrics
|
def _fetch_text_complexity_metrics(text: str) ->dict:
textstat = import_textstat()
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index
(text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(
text), 'text_standard': textstat.text_standard(text),
'fernandez_huerta': textstat.fernandez_huerta(text),
'szigriszt_pazos': textstat.szigriszt_pazos(text),
'gutierrez_polini': textstat.gutierrez_polini(text), 'crawford':
textstat.crawford(text), 'gulpease_index': textstat.gulpease_index(
text), 'osman': textstat.osman(text)}
return text_complexity_metrics
| null |
is_lc_serializable
|
"""Return whether or not the class is serializable."""
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether or not the class is serializable."""
return False
|
Return whether or not the class is serializable.
|
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k, filter=filter)
return _results_to_docs(docs_and_scores)
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[dict]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(embedding
=embedding, k=k, filter=filter)
return _results_to_docs(docs_and_scores)
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
|
test_konko_llm_model_name_test
|
"""Check if llm_output has model info."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content='Hi')
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output['model_name'] == chat_instance.model
|
def test_konko_llm_model_name_test() ->None:
"""Check if llm_output has model info."""
chat_instance = ChatKonko(max_tokens=10)
msg = HumanMessage(content='Hi')
llm_data = chat_instance.generate([[msg]])
assert llm_data.llm_output is not None
assert llm_data.llm_output['model_name'] == chat_instance.model
|
Check if llm_output has model info.
|
__init__
|
"""Initialize with a file path."""
self.file_path = path
self.encoding = encoding
|
def __init__(self, path: str, *, encoding: str='utf-8') ->None:
"""Initialize with a file path."""
self.file_path = path
self.encoding = encoding
|
Initialize with a file path.
|
test_delete
|
"""Test the similarity search with normalized similarities."""
ids = ['a', 'b', 'c']
docsearch = FAISS.from_texts(['foo', 'bar', 'baz'], FakeEmbeddings(), ids=ids)
docsearch.delete(ids[1:2])
result = docsearch.similarity_search('bar', k=2)
assert sorted([d.page_content for d in result]) == ['baz', 'foo']
assert docsearch.index_to_docstore_id == {(0): ids[0], (1): ids[2]}
|
@pytest.mark.requires('faiss')
def test_delete() ->None:
"""Test the similarity search with normalized similarities."""
ids = ['a', 'b', 'c']
docsearch = FAISS.from_texts(['foo', 'bar', 'baz'], FakeEmbeddings(),
ids=ids)
docsearch.delete(ids[1:2])
result = docsearch.similarity_search('bar', k=2)
assert sorted([d.page_content for d in result]) == ['baz', 'foo']
assert docsearch.index_to_docstore_id == {(0): ids[0], (1): ids[2]}
|
Test the similarity search with normalized similarities.
|
_create_retry_decorator
|
"""Returns a tenacity retry decorator, preconfigured to handle exceptions"""
errors = [MistralException, MistralAPIException, MistralConnectionException]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: ChatMistralAI, run_manager: Optional[Union
[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None
) ->Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle exceptions"""
errors = [MistralException, MistralAPIException, MistralConnectionException
]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
Returns a tenacity retry decorator, preconfigured to handle exceptions
|
__init__
|
"""Initialize UnstructuredLakeFSLoader.
Args:
:param lakefs_access_key:
:param lakefs_secret_key:
:param lakefs_endpoint:
:param repo:
:param ref:
"""
super().__init__(**unstructured_kwargs)
self.url = url
self.repo = repo
self.ref = ref
self.path = path
self.presign = presign
|
def __init__(self, url: str, repo: str, ref: str='main', path: str='',
presign: bool=True, **unstructured_kwargs: Any):
"""Initialize UnstructuredLakeFSLoader.
Args:
:param lakefs_access_key:
:param lakefs_secret_key:
:param lakefs_endpoint:
:param repo:
:param ref:
"""
super().__init__(**unstructured_kwargs)
self.url = url
self.repo = repo
self.ref = ref
self.path = path
self.presign = presign
|
Initialize UnstructuredLakeFSLoader.
Args:
:param lakefs_access_key:
:param lakefs_secret_key:
:param lakefs_endpoint:
:param repo:
:param ref:
|
run_on_dataset
|
input_mapper = kwargs.pop('input_mapper', None)
if input_mapper:
warn_deprecated('0.0.305', message=_INPUT_MAPPER_DEP_WARNING, pending=True)
if kwargs:
warn_deprecated('0.0.305', message=
f'The following arguments are deprecated and will be removed in a future release: {kwargs.keys()}.'
, removal='0.0.305')
client = client or Client()
container = _DatasetRunContainer.prepare(client, dataset_name,
llm_or_chain_factory, project_name, evaluation, tags, input_mapper,
concurrency_level, project_metadata=project_metadata)
if concurrency_level == 0:
batch_results = [_run_llm_or_chain(example, config,
llm_or_chain_factory=container.wrapped_model, input_mapper=
input_mapper) for example, config in zip(container.examples,
container.configs)]
else:
with runnable_config.get_executor_for_config(container.configs[0]
) as executor:
batch_results = list(executor.map(functools.partial(
_run_llm_or_chain, llm_or_chain_factory=container.wrapped_model,
input_mapper=input_mapper), container.examples, container.configs))
return container.finish(batch_results, verbose=verbose)
|
def run_on_dataset(client: Optional[Client], dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, *, evaluation: Optional[
smith_eval.RunEvalConfig]=None, concurrency_level: int=5, project_name:
Optional[str]=None, project_metadata: Optional[Dict[str, Any]]=None,
verbose: bool=False, tags: Optional[List[str]]=None, **kwargs: Any) ->Dict[
str, Any]:
input_mapper = kwargs.pop('input_mapper', None)
if input_mapper:
warn_deprecated('0.0.305', message=_INPUT_MAPPER_DEP_WARNING,
pending=True)
if kwargs:
warn_deprecated('0.0.305', message=
f'The following arguments are deprecated and will be removed in a future release: {kwargs.keys()}.'
, removal='0.0.305')
client = client or Client()
container = _DatasetRunContainer.prepare(client, dataset_name,
llm_or_chain_factory, project_name, evaluation, tags, input_mapper,
concurrency_level, project_metadata=project_metadata)
if concurrency_level == 0:
batch_results = [_run_llm_or_chain(example, config,
llm_or_chain_factory=container.wrapped_model, input_mapper=
input_mapper) for example, config in zip(container.examples,
container.configs)]
else:
with runnable_config.get_executor_for_config(container.configs[0]
) as executor:
batch_results = list(executor.map(functools.partial(
_run_llm_or_chain, llm_or_chain_factory=container.
wrapped_model, input_mapper=input_mapper), container.
examples, container.configs))
return container.finish(batch_results, verbose=verbose)
| null |
_stream
|
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
'The `websocket-client` package is required for streaming.')
params = {**self._get_parameters(stop), **kwargs}
url = f'{self.model_url}/api/v1/stream'
request = params.copy()
request['prompt'] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result['event'] == 'text_stream':
chunk = GenerationChunk(text=result['text'], generation_info=None)
yield chunk
elif result['event'] == 'stream_end':
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
"""Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
print(chunk, end='', flush=True)
"""
try:
import websocket
except ImportError:
raise ImportError(
'The `websocket-client` package is required for streaming.')
params = {**self._get_parameters(stop), **kwargs}
url = f'{self.model_url}/api/v1/stream'
request = params.copy()
request['prompt'] = prompt
websocket_client = websocket.WebSocket()
websocket_client.connect(url)
websocket_client.send(json.dumps(request))
while True:
result = websocket_client.recv()
result = json.loads(result)
if result['event'] == 'text_stream':
chunk = GenerationChunk(text=result['text'], generation_info=None)
yield chunk
elif result['event'] == 'stream_end':
websocket_client.close()
return
if run_manager:
run_manager.on_llm_new_token(token=chunk.text)
|
Yields results objects as they are generated in real time.
It also calls the callback manager's on_llm_new_token event with
similar parameters to the OpenAI LLM class method of the same name.
Args:
prompt: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
A generator representing the stream of tokens being generated.
Yields:
A dictionary like objects containing a string token and metadata.
See text-generation-webui docs and below for more.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(
model_url = "ws://localhost:5005"
streaming=True
)
for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
stop=["'","
"]):
print(chunk, end='', flush=True)
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.