method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
json
|
return self.json_data
|
def json(self) ->Dict:
return self.json_data
| null |
_parse_generation
|
if self.prompt.output_parser is not None:
return [self.prompt.output_parser.parse_folder(res[self.output_key]) for
res in generation]
else:
return generation
|
def _parse_generation(self, generation: List[Dict[str, str]]) ->Sequence[Union
[str, List[str], Dict[str, str]]]:
if self.prompt.output_parser is not None:
return [self.prompt.output_parser.parse_folder(res[self.output_key]
) for res in generation]
else:
return generation
| null |
normalize
|
"""Normalize vectors to unit length."""
x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None)
return x
|
def normalize(x: np.ndarray) ->np.ndarray:
"""Normalize vectors to unit length."""
x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None)
return x
|
Normalize vectors to unit length.
|
_persist_run
|
pass
|
def _persist_run(self, run: Run) ->None:
pass
| null |
test_visit_structured_query
|
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'filter': {'foo': {'$lt': ['1', '2']}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'filter': {'$and': [{'foo': {'$lt': 2}}, {'bar': {'$eq':
'baz'}}]}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query() ->None:
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1',
'2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'filter': {'foo': {'$lt': ['1', '2']}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'filter': {'$and': [{'foo': {'$lt': 2}}, {'bar': {
'$eq': 'baz'}}]}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
_import_docarray_inmemory
|
from langchain_community.vectorstores.docarray import DocArrayInMemorySearch
return DocArrayInMemorySearch
|
def _import_docarray_inmemory() ->Any:
from langchain_community.vectorstores.docarray import DocArrayInMemorySearch
return DocArrayInMemorySearch
| null |
from_llm
|
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or PROMPT
expected_input_vars = {'query', 'answer', 'result'}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt.input_variables}'
)
return cls(llm=llm, prompt=prompt, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]=
None, **kwargs: Any) ->QAEvalChain:
"""Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
"""
prompt = prompt or PROMPT
expected_input_vars = {'query', 'answer', 'result'}
if expected_input_vars != set(prompt.input_variables):
raise ValueError(
f'Input variables should be {expected_input_vars}, but got {prompt.input_variables}'
)
return cls(llm=llm, prompt=prompt, **kwargs)
|
Load QA Eval Chain from LLM.
Args:
llm (BaseLanguageModel): the base language model to use.
prompt (PromptTemplate): A prompt template containing the input_variables:
'input', 'answer' and 'result' that will be used as the prompt
for evaluation.
Defaults to PROMPT.
**kwargs: additional keyword arguments.
Returns:
QAEvalChain: the loaded QA eval chain.
|
_prepare_input
|
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {'prediction': prediction, 'input': input}
if self.requires_reference:
input_['reference'] = reference
return input_
|
def _prepare_input(self, prediction: str, input: Optional[str], reference:
Optional[str]) ->dict:
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {'prediction': prediction, 'input': input}
if self.requires_reference:
input_['reference'] = reference
return input_
|
Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
|
test_shell_tool_init
|
placeholder = PlaceholderProcess()
shell_tool = ShellTool(process=placeholder)
assert shell_tool.name == 'terminal'
assert isinstance(shell_tool.description, str)
assert shell_tool.args_schema == ShellInput
assert shell_tool.process is not None
|
def test_shell_tool_init() ->None:
placeholder = PlaceholderProcess()
shell_tool = ShellTool(process=placeholder)
assert shell_tool.name == 'terminal'
assert isinstance(shell_tool.description, str)
assert shell_tool.args_schema == ShellInput
assert shell_tool.process is not None
| null |
_run
|
raise self.exception
|
def _run(self) ->str:
raise self.exception
| null |
_make_request
|
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {'source': url}
return [Document(page_content=text, metadata=metadata)]
|
def _make_request(self, url: str) ->List[Document]:
request = urllib.request.Request(url, headers=self.headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
text = stringify_dict(json_data)
metadata = {'source': url}
return [Document(page_content=text, metadata=metadata)]
| null |
embed_documents
|
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range(
len(texts))]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in
range(len(texts))]
|
Return simple embeddings.
|
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
Returns:
List of Documents most similar to the query vector.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
results = self._collection.search(embedding=embedding, limit=k, metadata=
metadata, **kwargs)
return [Document(page_content=doc.content, metadata=doc.metadata) for doc in
results]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
metadata: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
Returns:
List of Documents most similar to the query vector.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
results = self._collection.search(embedding=embedding, limit=k,
metadata=metadata, **kwargs)
return [Document(page_content=doc.content, metadata=doc.metadata) for
doc in results]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
Returns:
List of Documents most similar to the query vector.
|
lazy_load
|
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {'queried_at': datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazily load weather data for the given locations."""
for place in self.places:
metadata = {'queried_at': datetime.now()}
content = self.client.run(place)
yield Document(page_content=content, metadata=metadata)
|
Lazily load weather data for the given locations.
|
mock_lakefs_client_no_presign_not_local
|
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'https://physical_address_bla')]
mock_lakefs_client.return_value.is_presign_supported.return_value = False
yield mock_lakefs_client.return_value
|
@pytest.fixture
def mock_lakefs_client_no_presign_not_local() ->Any:
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'https://physical_address_bla')]
(mock_lakefs_client.return_value.is_presign_supported.return_value
) = False
yield mock_lakefs_client.return_value
| null |
similarity_search_with_score_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
'Using dict as a `filter` is deprecated. Please use qdrant-client filters directly: https://qdrant.tech/documentation/concepts/filtering/'
, DeprecationWarning)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = self.vector_name, embedding
results = self.client.search(collection_name=self.collection_name,
query_vector=query_vector, query_filter=qdrant_filter, search_params=
search_params, limit=k, offset=offset, with_payload=True, with_vectors=
False, score_threshold=score_threshold, consistency=consistency, **kwargs)
return [(self._document_from_scored_point(result, self.content_payload_key,
self.metadata_payload_key), result.score) for result in results]
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, filter: Optional[MetadataFilter]=None, search_params: Optional[
common_types.SearchParams]=None, offset: int=0, score_threshold:
Optional[float]=None, consistency: Optional[common_types.
ReadConsistency]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
"""
if filter is not None and isinstance(filter, dict):
warnings.warn(
'Using dict as a `filter` is deprecated. Please use qdrant-client filters directly: https://qdrant.tech/documentation/concepts/filtering/'
, DeprecationWarning)
qdrant_filter = self._qdrant_filter_from_dict(filter)
else:
qdrant_filter = filter
query_vector = embedding
if self.vector_name is not None:
query_vector = self.vector_name, embedding
results = self.client.search(collection_name=self.collection_name,
query_vector=query_vector, query_filter=qdrant_filter,
search_params=search_params, limit=k, offset=offset, with_payload=
True, with_vectors=False, score_threshold=score_threshold,
consistency=consistency, **kwargs)
return [(self._document_from_scored_point(result, self.
content_payload_key, self.metadata_payload_key), result.score) for
result in results]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
offset:
Offset of the first result to return.
May be used to paginate results.
Note: large offset values may cause performance issues.
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of documents most similar to the query text and distance for each.
|
download_audio_from_url
|
"""Download audio from url to local."""
ext = audio_url.split('.')[-1]
response = requests.get(audio_url, stream=True)
response.raise_for_status()
with tempfile.NamedTemporaryFile(mode='wb', suffix=f'.{ext}', delete=False
) as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return f.name
|
def download_audio_from_url(audio_url: str) ->str:
"""Download audio from url to local."""
ext = audio_url.split('.')[-1]
response = requests.get(audio_url, stream=True)
response.raise_for_status()
with tempfile.NamedTemporaryFile(mode='wb', suffix=f'.{ext}', delete=False
) as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return f.name
|
Download audio from url to local.
|
lazy_load
|
if self.global_id and 'GL' in self.global_id:
for d in self._load_pdf():
yield d
elif self.global_id and 'SD' in self.global_id:
for d in self._load_structured_doc():
yield d
elif self.global_id and self.global_id[0:2] in ['FL', 'NB']:
for d in self._load_folder_tree():
yield d
else:
raise ValueError('Unknown global ID type')
|
def lazy_load(self) ->Iterator[Document]:
if self.global_id and 'GL' in self.global_id:
for d in self._load_pdf():
yield d
elif self.global_id and 'SD' in self.global_id:
for d in self._load_structured_doc():
yield d
elif self.global_id and self.global_id[0:2] in ['FL', 'NB']:
for d in self._load_folder_tree():
yield d
else:
raise ValueError('Unknown global ID type')
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
Get the identifying parameters.
|
_create_empty_doc
|
"""Creates or replaces a document for this message history with no
messages"""
self.client.Documents.add_documents(collection=self.collection, workspace=
self.workspace, data=[{'_id': self.session_id, self.messages_key: []}])
|
def _create_empty_doc(self) ->None:
"""Creates or replaces a document for this message history with no
messages"""
self.client.Documents.add_documents(collection=self.collection,
workspace=self.workspace, data=[{'_id': self.session_id, self.
messages_key: []}])
|
Creates or replaces a document for this message history with no
messages
|
create_schema
|
"""Create the database schema."""
if isinstance(self.engine, AsyncEngine):
raise AssertionError('This method is not supported for async engines.')
Base.metadata.create_all(self.engine)
|
def create_schema(self) ->None:
"""Create the database schema."""
if isinstance(self.engine, AsyncEngine):
raise AssertionError('This method is not supported for async engines.')
Base.metadata.create_all(self.engine)
|
Create the database schema.
|
_validate_example_inputs_for_language_model
|
if input_mapper:
prompt_input = input_mapper(first_example.inputs)
if not isinstance(prompt_input, str) and not (isinstance(prompt_input,
list) and all(isinstance(msg, BaseMessage) for msg in prompt_input)):
raise InputFormatError(
f"""When using an input_mapper to prepare dataset example inputs for an LLM or chat model, the output must a single string or a list of chat messages.
Got: {prompt_input} of type {type(prompt_input)}."""
)
else:
try:
_get_prompt(first_example.inputs)
except InputFormatError:
try:
_get_messages(first_example.inputs)
except InputFormatError:
raise InputFormatError(
f'Example inputs do not match language model input format. Expected a dictionary with messages or a single prompt. Got: {first_example.inputs} Please update your dataset OR provide an input_mapper to convert the example.inputs to a compatible format for the llm or chat model you wish to evaluate.'
)
|
def _validate_example_inputs_for_language_model(first_example: Example,
input_mapper: Optional[Callable[[Dict], Any]]) ->None:
if input_mapper:
prompt_input = input_mapper(first_example.inputs)
if not isinstance(prompt_input, str) and not (isinstance(
prompt_input, list) and all(isinstance(msg, BaseMessage) for
msg in prompt_input)):
raise InputFormatError(
f"""When using an input_mapper to prepare dataset example inputs for an LLM or chat model, the output must a single string or a list of chat messages.
Got: {prompt_input} of type {type(prompt_input)}."""
)
else:
try:
_get_prompt(first_example.inputs)
except InputFormatError:
try:
_get_messages(first_example.inputs)
except InputFormatError:
raise InputFormatError(
f'Example inputs do not match language model input format. Expected a dictionary with messages or a single prompt. Got: {first_example.inputs} Please update your dataset OR provide an input_mapper to convert the example.inputs to a compatible format for the llm or chat model you wish to evaluate.'
)
| null |
format
|
if self.auto_embed:
return self.format_auto_embed_on(event)
else:
return self.format_auto_embed_off(event)
|
def format(self, event: PickBestEvent) ->str:
if self.auto_embed:
return self.format_auto_embed_on(event)
else:
return self.format_auto_embed_off(event)
| null |
_is_visible
|
return not any(part.startswith('.') for part in path.parts)
|
@staticmethod
def _is_visible(path: Path) ->bool:
return not any(part.startswith('.') for part in path.parts)
| null |
_bes_vector_store
|
index_name = kwargs.get('index_name')
if index_name is None:
raise ValueError('Please provide an index_name.')
bes_url = kwargs.get('bes_url')
if bes_url is None:
raise ValueError('Please provided a valid bes connection url')
return BESVectorStore(embedding=embedding, **kwargs)
|
@staticmethod
def _bes_vector_store(embedding: Optional[Embeddings]=None, **kwargs: Any
) ->'BESVectorStore':
index_name = kwargs.get('index_name')
if index_name is None:
raise ValueError('Please provide an index_name.')
bes_url = kwargs.get('bes_url')
if bes_url is None:
raise ValueError('Please provided a valid bes connection url')
return BESVectorStore(embedding=embedding, **kwargs)
| null |
test_run_success_all_meta
|
api_client.load_all_available_meta = True
responses.add(responses.POST, api_client.outline_instance_url + api_client.
outline_search_endpoint, json=OUTLINE_SUCCESS_RESPONSE, status=200)
docs = api_client.run('Testing')
assert_docs(docs, all_meta=True)
|
@responses.activate
def test_run_success_all_meta(api_client: OutlineAPIWrapper) ->None:
api_client.load_all_available_meta = True
responses.add(responses.POST, api_client.outline_instance_url +
api_client.outline_search_endpoint, json=OUTLINE_SUCCESS_RESPONSE,
status=200)
docs = api_client.run('Testing')
assert_docs(docs, all_meta=True)
| null |
time_weighted_retriever
|
vectorstore = MockVectorStore()
return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore,
memory_stream=_get_example_memories())
|
@pytest.fixture
def time_weighted_retriever() ->TimeWeightedVectorStoreRetriever:
vectorstore = MockVectorStore()
return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore,
memory_stream=_get_example_memories())
| null |
_import_elastic_vector_search
|
from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch
return ElasticVectorSearch
|
def _import_elastic_vector_search() ->Any:
from langchain_community.vectorstores.elastic_vector_search import ElasticVectorSearch
return ElasticVectorSearch
| null |
_If
|
self.fill('if ')
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If):
t = t.orelse[0]
self.fill('elif ')
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill('else')
self.enter()
self.dispatch(t.orelse)
self.leave()
|
def _If(self, t):
self.fill('if ')
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If):
t = t.orelse[0]
self.fill('elif ')
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill('else')
self.enter()
self.dispatch(t.orelse)
self.leave()
| null |
test_bedrock_invoke
|
"""Test invoke tokens from BedrockChat."""
result = chat.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
@pytest.mark.scheduled
def test_bedrock_invoke(chat: BedrockChat) ->None:
"""Test invoke tokens from BedrockChat."""
result = chat.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
Test invoke tokens from BedrockChat.
|
_import_baidu_qianfan_endpoint
|
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
|
def _import_baidu_qianfan_endpoint() ->Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
| null |
_identifying_params
|
return {'model_name': self.model}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {'model_name': self.model}
| null |
test_infer_variables
|
messages = [HumanMessagePromptTemplate.from_template('{foo}')]
prompt = ChatPromptTemplate(messages=messages)
assert prompt.input_variables == ['foo']
|
def test_infer_variables() ->None:
messages = [HumanMessagePromptTemplate.from_template('{foo}')]
prompt = ChatPromptTemplate(messages=messages)
assert prompt.input_variables == ['foo']
| null |
memory_variables
|
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
@property
def memory_variables(self) ->List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
Will always return list of memory variables.
:meta private:
|
test_dereference_refs_no_refs
|
schema = {'type': 'object', 'properties': {'first_name': {'type': 'string'}}}
actual = dereference_refs(schema)
assert actual == schema
|
def test_dereference_refs_no_refs() ->None:
schema = {'type': 'object', 'properties': {'first_name': {'type':
'string'}}}
actual = dereference_refs(schema)
assert actual == schema
| null |
_prepare_request
|
return {'url': 'https://www.searchapi.io/api/v1/search', 'headers': {
'Authorization': f'Bearer {self.searchapi_api_key}'}, 'params': {
'engine': self.engine, 'q': query, **{key: value for key, value in
kwargs.items() if value is not None}}}
|
def _prepare_request(self, query: str, **kwargs: Any) ->dict:
return {'url': 'https://www.searchapi.io/api/v1/search', 'headers': {
'Authorization': f'Bearer {self.searchapi_api_key}'}, 'params': {
'engine': self.engine, 'q': query, **{key: value for key, value in
kwargs.items() if value is not None}}}
| null |
test_ip
|
"""Test inner product distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=ip_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == IP_SCORE
assert drop(docsearch.index_name)
|
def test_ip(texts: List[str]) ->None:
"""Test inner product distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=ip_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == IP_SCORE
assert drop(docsearch.index_name)
|
Test inner product distance.
|
create_qa_with_structure_chain
|
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == 'pydantic':
if not (isinstance(schema, type) and issubclass(schema, BaseModel)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is 'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema)
elif output_parser == 'base':
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f'Got unexpected output_parser: {output_parser}. Should be one of `pydantic` or `base`.'
)
if isinstance(schema, type) and issubclass(schema, BaseModel):
schema_dict = schema.schema()
else:
schema_dict = schema
function = {'name': schema_dict['title'], 'description': schema_dict[
'description'], 'parameters': schema_dict}
llm_kwargs = get_llm_kwargs(function)
messages = [SystemMessage(content=
'You are a world class algorithm to answer questions in a specific format.'
), HumanMessage(content='Answer question using the following context'),
HumanMessagePromptTemplate.from_template('{context}'),
HumanMessagePromptTemplate.from_template('Question: {question}'),
HumanMessage(content='Tips: Make sure to answer in the correct format')]
prompt = prompt or ChatPromptTemplate(messages=messages)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=_output_parser, verbose=verbose)
return chain
|
def create_qa_with_structure_chain(llm: BaseLanguageModel, schema: Union[
dict, Type[BaseModel]], output_parser: str='base', prompt: Optional[
Union[PromptTemplate, ChatPromptTemplate]]=None, verbose: bool=False
) ->LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == 'pydantic':
if not (isinstance(schema, type) and issubclass(schema, BaseModel)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is 'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema)
elif output_parser == 'base':
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f'Got unexpected output_parser: {output_parser}. Should be one of `pydantic` or `base`.'
)
if isinstance(schema, type) and issubclass(schema, BaseModel):
schema_dict = schema.schema()
else:
schema_dict = schema
function = {'name': schema_dict['title'], 'description': schema_dict[
'description'], 'parameters': schema_dict}
llm_kwargs = get_llm_kwargs(function)
messages = [SystemMessage(content=
'You are a world class algorithm to answer questions in a specific format.'
), HumanMessage(content=
'Answer question using the following context'),
HumanMessagePromptTemplate.from_template('{context}'),
HumanMessagePromptTemplate.from_template('Question: {question}'),
HumanMessage(content='Tips: Make sure to answer in the correct format')
]
prompt = prompt or ChatPromptTemplate(messages=messages)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=_output_parser, verbose=verbose)
return chain
|
Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
|
stream
|
return self.transform(iter([input]), config, **kwargs)
|
def stream(self, input: Other, config: Optional[RunnableConfig]=None, **
kwargs: Any) ->Iterator[Other]:
return self.transform(iter([input]), config, **kwargs)
| null |
embed_query
|
"""Embed query text."""
raise NotImplementedError
|
def embed_query(self, text: str) ->List[float]:
"""Embed query text."""
raise NotImplementedError
|
Embed query text.
|
on_retriever_start
|
self.on_retriever_start_common()
|
def on_retriever_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_retriever_start_common()
| null |
_get_builtin_translator
|
"""Get the translator class corresponding to the vector store class."""
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {Pinecone:
PineconeTranslator, Chroma: ChromaTranslator, DashVector:
DashvectorTranslator, Weaviate: WeaviateTranslator, Vectara:
VectaraTranslator, Qdrant: QdrantTranslator, MyScale: MyScaleTranslator,
DeepLake: DeepLakeTranslator, ElasticsearchStore:
ElasticsearchTranslator, Milvus: MilvusTranslator, SupabaseVectorStore:
SupabaseVectorTranslator, TimescaleVector: TimescaleVectorTranslator,
OpenSearchVectorSearch: OpenSearchTranslator, MongoDBAtlasVectorSearch:
MongoDBAtlasTranslator}
if isinstance(vectorstore, Qdrant):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
elif isinstance(vectorstore, MyScale):
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
elif isinstance(vectorstore, Redis):
return RedisTranslator.from_vectorstore(vectorstore)
elif vectorstore.__class__ in BUILTIN_TRANSLATORS:
return BUILTIN_TRANSLATORS[vectorstore.__class__]()
else:
raise ValueError(
f'Self query retriever with Vector Store type {vectorstore.__class__} not supported.'
)
|
def _get_builtin_translator(vectorstore: VectorStore) ->Visitor:
"""Get the translator class corresponding to the vector store class."""
BUILTIN_TRANSLATORS: Dict[Type[VectorStore], Type[Visitor]] = {Pinecone:
PineconeTranslator, Chroma: ChromaTranslator, DashVector:
DashvectorTranslator, Weaviate: WeaviateTranslator, Vectara:
VectaraTranslator, Qdrant: QdrantTranslator, MyScale:
MyScaleTranslator, DeepLake: DeepLakeTranslator, ElasticsearchStore:
ElasticsearchTranslator, Milvus: MilvusTranslator,
SupabaseVectorStore: SupabaseVectorTranslator, TimescaleVector:
TimescaleVectorTranslator, OpenSearchVectorSearch:
OpenSearchTranslator, MongoDBAtlasVectorSearch: MongoDBAtlasTranslator}
if isinstance(vectorstore, Qdrant):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
elif isinstance(vectorstore, MyScale):
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
elif isinstance(vectorstore, Redis):
return RedisTranslator.from_vectorstore(vectorstore)
elif vectorstore.__class__ in BUILTIN_TRANSLATORS:
return BUILTIN_TRANSLATORS[vectorstore.__class__]()
else:
raise ValueError(
f'Self query retriever with Vector Store type {vectorstore.__class__} not supported.'
)
|
Get the translator class corresponding to the vector store class.
|
test_serialize_llmchain_chat
|
llm = ChatOpenAI(model='davinci', temperature=0.5, openai_api_key='hello')
prompt = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.
from_template('hello {name}!')])
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
import os
has_env = 'OPENAI_API_KEY' in os.environ
if not has_env:
os.environ['OPENAI_API_KEY'] = 'env_variable'
llm_2 = ChatOpenAI(model='davinci', temperature=0.5)
prompt_2 = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.
from_template('hello {name}!')])
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ['OPENAI_API_KEY']
|
@pytest.mark.requires('openai')
def test_serialize_llmchain_chat(snapshot: Any) ->None:
llm = ChatOpenAI(model='davinci', temperature=0.5, openai_api_key='hello')
prompt = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate.
from_template('hello {name}!')])
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
import os
has_env = 'OPENAI_API_KEY' in os.environ
if not has_env:
os.environ['OPENAI_API_KEY'] = 'env_variable'
llm_2 = ChatOpenAI(model='davinci', temperature=0.5)
prompt_2 = ChatPromptTemplate.from_messages([HumanMessagePromptTemplate
.from_template('hello {name}!')])
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ['OPENAI_API_KEY']
| null |
test_parse_case_matched_and_final_answer
|
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "Final Answer",
"action_input": "This is the final answer"
}
```
"""
output, log = get_action_and_input(llm_output)
assert output == 'This is the final answer'
assert log == llm_output
|
def test_parse_case_matched_and_final_answer() ->None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "Final Answer",
"action_input": "This is the final answer"
}
```
"""
output, log = get_action_and_input(llm_output)
assert output == 'This is the final answer'
assert log == llm_output
| null |
_wait
|
"""Wait for a response from API after an initial response is made."""
i = 1
while response.status_code == 202:
request_id = response.headers.get('NVCF-REQID', '')
response = session.get(self.fetch_url_format + request_id, headers=self
.headers['call'])
if response.status_code == 202:
try:
body = response.json()
except ValueError:
body = str(response)
if i > self.max_tries:
raise ValueError(f'Failed to get response with {i} tries: {body}')
self._try_raise(response)
return response
|
def _wait(self, response: Response, session: Any) ->Response:
"""Wait for a response from API after an initial response is made."""
i = 1
while response.status_code == 202:
request_id = response.headers.get('NVCF-REQID', '')
response = session.get(self.fetch_url_format + request_id, headers=
self.headers['call'])
if response.status_code == 202:
try:
body = response.json()
except ValueError:
body = str(response)
if i > self.max_tries:
raise ValueError(
f'Failed to get response with {i} tries: {body}')
self._try_raise(response)
return response
|
Wait for a response from API after an initial response is made.
|
on_tool_start
|
self.on_tool_start_common()
|
def on_tool_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_tool_start_common()
| null |
get_custom_callback_meta
|
return {'step': self.step, 'starts': self.starts, 'ends': self.ends,
'errors': self.errors, 'text_ctr': self.text_ctr, 'chain_starts': self.
chain_starts, 'chain_ends': self.chain_ends, 'llm_starts': self.
llm_starts, 'llm_ends': self.llm_ends, 'llm_streams': self.llm_streams,
'tool_starts': self.tool_starts, 'tool_ends': self.tool_ends,
'agent_ends': self.agent_ends}
|
def get_custom_callback_meta(self) ->Dict[str, Any]:
return {'step': self.step, 'starts': self.starts, 'ends': self.ends,
'errors': self.errors, 'text_ctr': self.text_ctr, 'chain_starts':
self.chain_starts, 'chain_ends': self.chain_ends, 'llm_starts':
self.llm_starts, 'llm_ends': self.llm_ends, 'llm_streams': self.
llm_streams, 'tool_starts': self.tool_starts, 'tool_ends': self.
tool_ends, 'agent_ends': self.agent_ends}
| null |
_call
|
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with 'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(self._identifying_params['model_name'
], **copied)
if self._client:
res = self._client.generate(prompt, **config.model_dump(flatten=True)
).responses[0]
else:
assert self._runner is not None
res = self._runner(prompt, **config.model_dump(flatten=True))
if isinstance(res, dict) and 'text' in res:
return res['text']
elif isinstance(res, str):
return res
else:
raise ValueError(
f"Expected result to be a dict with key 'text' or a string. Received {res}"
)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
try:
import openllm
except ImportError as e:
raise ImportError(
"Could not import openllm. Make sure to install it with 'pip install openllm'."
) from e
copied = copy.deepcopy(self.llm_kwargs)
copied.update(kwargs)
config = openllm.AutoConfig.for_model(self._identifying_params[
'model_name'], **copied)
if self._client:
res = self._client.generate(prompt, **config.model_dump(flatten=True)
).responses[0]
else:
assert self._runner is not None
res = self._runner(prompt, **config.model_dump(flatten=True))
if isinstance(res, dict) and 'text' in res:
return res['text']
elif isinstance(res, str):
return res
else:
raise ValueError(
f"Expected result to be a dict with key 'text' or a string. Received {res}"
)
| null |
_identifying_params
|
return self._default_params
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
return self._default_params
| null |
authenticate
|
"""Authenticate using the AIN Blockchain"""
try:
from ain.ain import Ain
except ImportError as e:
raise ImportError(
'Cannot import ain-py related modules. Please install the package with `pip install ain-py`.'
) from e
if network == 'mainnet':
provider_url = 'https://mainnet-api.ainetwork.ai/'
chain_id = 1
if 'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ:
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable has not been set.'
)
elif network == 'testnet':
provider_url = 'https://testnet-api.ainetwork.ai/'
chain_id = 0
if 'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ:
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable has not been set.'
)
elif network is None:
if ('AIN_BLOCKCHAIN_PROVIDER_URL' in os.environ and
'AIN_BLOCKCHAIN_CHAIN_ID' in os.environ and
'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ):
provider_url = os.environ['AIN_BLOCKCHAIN_PROVIDER_URL']
chain_id = int(os.environ['AIN_BLOCKCHAIN_CHAIN_ID'])
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_PROVIDER_URL and AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY and AIN_BLOCKCHAIN_CHAIN_ID environmental variable has not been set.'
)
else:
raise ValueError(f"Unsupported 'network': {network}")
ain = Ain(provider_url, chain_id)
ain.wallet.addAndSetDefaultAccount(private_key)
return ain
|
def authenticate(network: Optional[Literal['mainnet', 'testnet']]='testnet'
) ->Ain:
"""Authenticate using the AIN Blockchain"""
try:
from ain.ain import Ain
except ImportError as e:
raise ImportError(
'Cannot import ain-py related modules. Please install the package with `pip install ain-py`.'
) from e
if network == 'mainnet':
provider_url = 'https://mainnet-api.ainetwork.ai/'
chain_id = 1
if 'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ:
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable has not been set.'
)
elif network == 'testnet':
provider_url = 'https://testnet-api.ainetwork.ai/'
chain_id = 0
if 'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ:
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY environmental variable has not been set.'
)
elif network is None:
if ('AIN_BLOCKCHAIN_PROVIDER_URL' in os.environ and
'AIN_BLOCKCHAIN_CHAIN_ID' in os.environ and
'AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY' in os.environ):
provider_url = os.environ['AIN_BLOCKCHAIN_PROVIDER_URL']
chain_id = int(os.environ['AIN_BLOCKCHAIN_CHAIN_ID'])
private_key = os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY']
else:
raise EnvironmentError(
'Error: The AIN_BLOCKCHAIN_PROVIDER_URL and AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY and AIN_BLOCKCHAIN_CHAIN_ID environmental variable has not been set.'
)
else:
raise ValueError(f"Unsupported 'network': {network}")
ain = Ain(provider_url, chain_id)
ain.wallet.addAndSetDefaultAccount(private_key)
return ain
|
Authenticate using the AIN Blockchain
|
_url
|
return f'https://api.deepinfra.com/v1/inference/{self.model_id}'
|
def _url(self) ->str:
return f'https://api.deepinfra.com/v1/inference/{self.model_id}'
| null |
test_saving_loading_llm
|
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAIChat(max_tokens=10)
llm.save(file_path=tmp_path / 'openai.yaml')
loaded_llm = load_llm(tmp_path / 'openai.yaml')
assert loaded_llm == llm
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAIChat(max_tokens=10)
llm.save(file_path=tmp_path / 'openai.yaml')
loaded_llm = load_llm(tmp_path / 'openai.yaml')
assert loaded_llm == llm
|
Test saving/loading an promptlayer OpenAPI LLM.
|
visit_structured_query
|
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {'filter': filter}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
if structured_query.filter is None:
kwargs = {}
else:
filter = structured_query.filter.accept(self)
if isinstance(filter, rest.FieldCondition):
filter = rest.Filter(must=[filter])
kwargs = {'filter': filter}
return structured_query.query, kwargs
| null |
__init__
|
"""Initialize with necessary components."""
self.embedding = embedding
self.embedding_function = embedding.embed_query
self.index_uri = index_uri
self.metric = metric
self.config = config
tiledb_vs, tiledb = dependable_tiledb_import()
with tiledb.scope_ctx(ctx_or_config=config):
index_group = tiledb.Group(self.index_uri, 'r')
self.vector_index_uri = (vector_index_uri if vector_index_uri != '' else
get_vector_index_uri_from_group(index_group))
self.docs_array_uri = (docs_array_uri if docs_array_uri != '' else
get_documents_array_uri_from_group(index_group))
index_group.close()
group = tiledb.Group(self.vector_index_uri, 'r')
self.index_type = group.meta.get('index_type')
group.close()
self.timestamp = timestamp
if self.index_type == 'FLAT':
self.vector_index = tiledb_vs.flat_index.FlatIndex(uri=self.
vector_index_uri, config=self.config, timestamp=self.timestamp,
**kwargs)
elif self.index_type == 'IVF_FLAT':
self.vector_index = tiledb_vs.ivf_flat_index.IVFFlatIndex(uri=self.
vector_index_uri, config=self.config, timestamp=self.timestamp,
**kwargs)
|
def __init__(self, embedding: Embeddings, index_uri: str, metric: str, *,
vector_index_uri: str='', docs_array_uri: str='', config: Optional[
Mapping[str, Any]]=None, timestamp: Any=None, **kwargs: Any):
"""Initialize with necessary components."""
self.embedding = embedding
self.embedding_function = embedding.embed_query
self.index_uri = index_uri
self.metric = metric
self.config = config
tiledb_vs, tiledb = dependable_tiledb_import()
with tiledb.scope_ctx(ctx_or_config=config):
index_group = tiledb.Group(self.index_uri, 'r')
self.vector_index_uri = (vector_index_uri if vector_index_uri != ''
else get_vector_index_uri_from_group(index_group))
self.docs_array_uri = (docs_array_uri if docs_array_uri != '' else
get_documents_array_uri_from_group(index_group))
index_group.close()
group = tiledb.Group(self.vector_index_uri, 'r')
self.index_type = group.meta.get('index_type')
group.close()
self.timestamp = timestamp
if self.index_type == 'FLAT':
self.vector_index = tiledb_vs.flat_index.FlatIndex(uri=self.
vector_index_uri, config=self.config, timestamp=self.
timestamp, **kwargs)
elif self.index_type == 'IVF_FLAT':
self.vector_index = tiledb_vs.ivf_flat_index.IVFFlatIndex(uri=
self.vector_index_uri, config=self.config, timestamp=self.
timestamp, **kwargs)
|
Initialize with necessary components.
|
lc_secrets
|
return {'replicate_api_token': 'REPLICATE_API_TOKEN'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'replicate_api_token': 'REPLICATE_API_TOKEN'}
| null |
resolve_criteria
|
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
"""
if criteria is None:
return {'helpfulness': _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]}
if isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
else:
if not criteria:
raise ValueError(
'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.'
)
criteria_ = dict(criteria)
return criteria_
|
def resolve_criteria(criteria: Optional[Union[CRITERIA_TYPE, str]]) ->Dict[
str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
"""
if criteria is None:
return {'helpfulness': _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]}
if isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
else:
if not criteria:
raise ValueError(
'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.'
)
criteria_ = dict(criteria)
return criteria_
|
Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
|
test_ideation
|
responses = ['Idea 1', 'Idea 2', 'Idea 3']
llm = FakeListLLM(responses=responses)
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert results == responses
for i in range(1, 5):
responses = [f'Idea {j + 1}' for j in range(i)]
llm = FakeListLLM(responses=responses)
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=i)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert len(results) == i
|
def test_ideation() ->None:
responses = ['Idea 1', 'Idea 2', 'Idea 3']
llm = FakeListLLM(responses=responses)
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert results == responses
for i in range(1, 5):
responses = [f'Idea {j + 1}' for j in range(i)]
llm = FakeListLLM(responses=responses)
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=i)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
results = chain._ideate()
assert len(results) == i
| null |
test_chroma_mmr_by_vector
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=embeddings)
embedded_query = embeddings.embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query, k=1)
assert output == [Document(page_content='foo')]
|
def test_chroma_mmr_by_vector() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
embeddings = FakeEmbeddings()
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=embeddings)
embedded_query = embeddings.embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query,
k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
step
|
"""Take step."""
|
@abstractmethod
def step(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any
) ->StepResponse:
"""Take step."""
|
Take step.
|
_get_mock_confluence_loader
|
confluence_loader = ConfluenceLoader(self.CONFLUENCE_URL, username=self.
MOCK_USERNAME, api_key=self.MOCK_API_TOKEN)
confluence_loader.confluence = mock_confluence
return confluence_loader
|
def _get_mock_confluence_loader(self, mock_confluence: MagicMock
) ->ConfluenceLoader:
confluence_loader = ConfluenceLoader(self.CONFLUENCE_URL, username=self
.MOCK_USERNAME, api_key=self.MOCK_API_TOKEN)
confluence_loader.confluence = mock_confluence
return confluence_loader
| null |
_get_functions_multi_prompt
|
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown() for
d in dfs])
suffix_to_use = suffix_to_use.format(dfs_head=dfs_head)
elif include_df_in_prompt:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown() for d in
dfs])
suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format(dfs_head=dfs_head)
else:
suffix_to_use = ''
if prefix is None:
prefix = MULTI_DF_PREFIX_FUNCTIONS
prefix = prefix.format(num_dfs=str(len(dfs)))
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f'df{i + 1}'] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
|
def _get_functions_multi_prompt(dfs: Any, prefix: Optional[str]=None,
suffix: Optional[str]=None, include_df_in_prompt: Optional[bool]=True,
number_of_head_rows: int=5) ->Tuple[BasePromptTemplate, List[
PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown
() for d in dfs])
suffix_to_use = suffix_to_use.format(dfs_head=dfs_head)
elif include_df_in_prompt:
dfs_head = '\n\n'.join([d.head(number_of_head_rows).to_markdown() for
d in dfs])
suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format(dfs_head=dfs_head)
else:
suffix_to_use = ''
if prefix is None:
prefix = MULTI_DF_PREFIX_FUNCTIONS
prefix = prefix.format(num_dfs=str(len(dfs)))
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f'df{i + 1}'] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
| null |
test_non_zero_distance_pairwise
|
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = 'I like to eat apples.'
reference = 'I like apples.'
result = eval_chain.evaluate_string_pairs(prediction=prediction,
prediction_b=reference)
assert 'score' in result
assert 0 < result['score'] < 1.0
|
@pytest.mark.requires('rapidfuzz')
@pytest.mark.parametrize('distance', valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) ->None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = 'I like to eat apples.'
reference = 'I like apples.'
result = eval_chain.evaluate_string_pairs(prediction=prediction,
prediction_b=reference)
assert 'score' in result
assert 0 < result['score'] < 1.0
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(values,
'huggingfacehub_api_token', 'HUGGINGFACEHUB_API_TOKEN')
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values['repo_id']
client = InferenceApi(repo_id=repo_id, token=huggingfacehub_api_token,
task=values.get('task'))
if client.task not in VALID_TASKS:
raise ValueError(
f'Got invalid task {client.task}, currently only {VALID_TASKS} are supported'
)
values['client'] = client
except ImportError:
raise ValueError(
'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(values,
'huggingfacehub_api_token', 'HUGGINGFACEHUB_API_TOKEN')
try:
from huggingface_hub.inference_api import InferenceApi
repo_id = values['repo_id']
client = InferenceApi(repo_id=repo_id, token=
huggingfacehub_api_token, task=values.get('task'))
if client.task not in VALID_TASKS:
raise ValueError(
f'Got invalid task {client.task}, currently only {VALID_TASKS} are supported'
)
values['client'] = client
except ImportError:
raise ValueError(
'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.'
)
return values
|
Validate that api key and python package exists in environment.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(values, 'octoai_api_token',
'OCTOAI_API_TOKEN')
values['endpoint_url'] = get_from_dict_or_env(values, 'endpoint_url',
'ENDPOINT_URL')
values['octoai_api_token'] = octoai_api_token
return values
|
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
octoai_api_token = get_from_dict_or_env(values, 'octoai_api_token',
'OCTOAI_API_TOKEN')
values['endpoint_url'] = get_from_dict_or_env(values, 'endpoint_url',
'ENDPOINT_URL')
values['octoai_api_token'] = octoai_api_token
return values
|
Validate that api key and python package exists in environment.
|
test_load_returns_list_of_documents
|
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': [
'2022-01-01', '2022-01-02']}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
|
@pytest.mark.skipif(not xorbits_installed, reason='xorbits not installed')
def test_load_returns_list_of_documents() ->None:
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date':
['2022-01-01', '2022-01-02']}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
| null |
test_llamacpp_embedding_query
|
"""Test llamacpp embeddings."""
document = 'foo bar'
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path)
output = embedding.embed_query(document)
assert len(output) == 512
|
def test_llamacpp_embedding_query() ->None:
"""Test llamacpp embeddings."""
document = 'foo bar'
model_path = get_model()
embedding = LlamaCppEmbeddings(model_path=model_path)
output = embedding.embed_query(document)
assert len(output) == 512
|
Test llamacpp embeddings.
|
_attribute_to_filter_field
|
if attribute in [tf.name for tf in self._schema.text]:
return RedisText(attribute)
elif attribute in [tf.name for tf in self._schema.tag or []]:
return RedisTag(attribute)
elif attribute in [tf.name for tf in self._schema.numeric or []]:
return RedisNum(attribute)
else:
raise ValueError(
f"""Invalid attribute {attribute} not in vector store schema. Schema is:
{self._schema.as_dict()}"""
)
|
def _attribute_to_filter_field(self, attribute: str) ->RedisFilterField:
if attribute in [tf.name for tf in self._schema.text]:
return RedisText(attribute)
elif attribute in [tf.name for tf in self._schema.tag or []]:
return RedisTag(attribute)
elif attribute in [tf.name for tf in self._schema.numeric or []]:
return RedisNum(attribute)
else:
raise ValueError(
f"""Invalid attribute {attribute} not in vector store schema. Schema is:
{self._schema.as_dict()}"""
)
| null |
_create_retry_decorator
|
"""
Creates and returns a preconfigured tenacity retry decorator.
The retry decorator is configured to handle specific Google API exceptions
such as ResourceExhausted and ServiceUnavailable. It uses an exponential
backoff strategy for retries.
Returns:
Callable[[Any], Any]: A retry decorator configured for handling specific
Google API exceptions.
"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.exceptions.
ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions
.ServiceUnavailable) | retry_if_exception_type(google.api_core.
exceptions.GoogleAPIError), before_sleep=before_sleep_log(logger,
logging.WARNING))
|
def _create_retry_decorator() ->Callable[[Any], Any]:
"""
Creates and returns a preconfigured tenacity retry decorator.
The retry decorator is configured to handle specific Google API exceptions
such as ResourceExhausted and ServiceUnavailable. It uses an exponential
backoff strategy for retries.
Returns:
Callable[[Any], Any]: A retry decorator configured for handling specific
Google API exceptions.
"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.
exceptions.ResourceExhausted) | retry_if_exception_type(google.
api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(
google.api_core.exceptions.GoogleAPIError), before_sleep=
before_sleep_log(logger, logging.WARNING))
|
Creates and returns a preconfigured tenacity retry decorator.
The retry decorator is configured to handle specific Google API exceptions
such as ResourceExhausted and ServiceUnavailable. It uses an exponential
backoff strategy for retries.
Returns:
Callable[[Any], Any]: A retry decorator configured for handling specific
Google API exceptions.
|
texts_metadatas_as_zep_documents
|
from zep_python.document import Document as ZepDocument
return [ZepDocument(content='Test Document', metadata={'key': 'value'}) for
_ in range(2)]
|
@pytest.fixture
def texts_metadatas_as_zep_documents() ->List['ZepDocument']:
from zep_python.document import Document as ZepDocument
return [ZepDocument(content='Test Document', metadata={'key': 'value'}) for
_ in range(2)]
| null |
_generate
|
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(self, messages=messages, run_manager=
run_manager, **params)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {'token_usage': full_response['usage'], 'model_name': self.
model_name}
return LLMResult(generations=[[Generation(text=full_response['choices'][0][
'message']['content'])]], llm_output=llm_output)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(self, messages=messages,
run_manager=run_manager, **params)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {'token_usage': full_response['usage'], 'model_name': self
.model_name}
return LLMResult(generations=[[Generation(text=full_response['choices']
[0]['message']['content'])]], llm_output=llm_output)
| null |
_get_invocation_params
|
"""Get the parameters used to invoke the model."""
return {'model': self.model, **super()._get_invocation_params(stop=stop),
**self._default_params, **kwargs}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {'model': self.model, **super()._get_invocation_params(stop=stop
), **self._default_params, **kwargs}
|
Get the parameters used to invoke the model.
|
on_llm_start
|
"""Start a trace for an LLM run."""
parent_run_id_ = str(parent_run_id) if parent_run_id else None
execution_order = self._get_execution_order(parent_run_id_)
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({'metadata': metadata})
llm_run = Run(id=run_id, parent_run_id=parent_run_id, serialized=serialized,
inputs={'prompts': prompts}, extra=kwargs, events=[{'name': 'start',
'time': start_time}], start_time=start_time, execution_order=
execution_order, child_execution_order=execution_order, run_type='llm',
tags=tags or [], name=name)
self._start_trace(llm_run)
self._on_llm_start(llm_run)
return llm_run
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], *,
run_id: UUID, tags: Optional[List[str]]=None, parent_run_id: Optional[
UUID]=None, metadata: Optional[Dict[str, Any]]=None, name: Optional[str
]=None, **kwargs: Any) ->Run:
"""Start a trace for an LLM run."""
parent_run_id_ = str(parent_run_id) if parent_run_id else None
execution_order = self._get_execution_order(parent_run_id_)
start_time = datetime.now(timezone.utc)
if metadata:
kwargs.update({'metadata': metadata})
llm_run = Run(id=run_id, parent_run_id=parent_run_id, serialized=
serialized, inputs={'prompts': prompts}, extra=kwargs, events=[{
'name': 'start', 'time': start_time}], start_time=start_time,
execution_order=execution_order, child_execution_order=
execution_order, run_type='llm', tags=tags or [], name=name)
self._start_trace(llm_run)
self._on_llm_start(llm_run)
return llm_run
|
Start a trace for an LLM run.
|
from_llm
|
llm_chain = LLMChain(llm=llm, prompt=prompt)
if selection_scorer is SENTINEL:
selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm)
return PickBest(llm_chain=llm_chain, prompt=prompt, selection_scorer=
selection_scorer, **kwargs)
|
@classmethod
def from_llm(cls: Type[PickBest], llm: BaseLanguageModel, prompt:
BasePromptTemplate, selection_scorer: Union[base.AutoSelectionScorer,
object]=SENTINEL, **kwargs: Any) ->PickBest:
llm_chain = LLMChain(llm=llm, prompt=prompt)
if selection_scorer is SENTINEL:
selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm)
return PickBest(llm_chain=llm_chain, prompt=prompt, selection_scorer=
selection_scorer, **kwargs)
| null |
test_api_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
llm = Anyscale(anyscale_api_key='secret-api-key', anyscale_api_base='test',
model_name='test')
print(llm.anyscale_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('openai')
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
llm = Anyscale(anyscale_api_key='secret-api-key', anyscale_api_base=
'test', model_name='test')
print(llm.anyscale_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
_call
|
if self.be_correct:
return {'bar': 'baz'}
else:
return {'baz': 'bar'}
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
if self.be_correct:
return {'bar': 'baz'}
else:
return {'baz': 'bar'}
| null |
_call
|
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter.format_request_payload(prompt,
_model_kwargs)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload)
return generated_text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter.format_request_payload(prompt,
_model_kwargs)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload)
return generated_text
|
Call out to an AzureML Managed Online endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
|
config_specs
|
return self.bound.config_specs
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
return self.bound.config_specs
| null |
test_awadb_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_awadb_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and search.
|
load
|
"""Eagerly load the content."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Eagerly load the content."""
return list(self.lazy_load())
|
Eagerly load the content.
|
lazy_load
|
ps = list(Path(self.file_path).glob('**/*.md'))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {'source': str(p.name), 'path': str(p), **front_matter}
yield Document(page_content=text, metadata=metadata)
|
def lazy_load(self) ->Iterator[Document]:
ps = list(Path(self.file_path).glob('**/*.md'))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {'source': str(p.name), 'path': str(p), **front_matter}
yield Document(page_content=text, metadata=metadata)
| null |
always_verbose
|
"""Whether to call verbose callbacks even if verbose is False."""
return True
|
@property
def always_verbose(self) ->bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
|
Whether to call verbose callbacks even if verbose is False.
|
_identifying_params
|
"""Get the identifying parameters."""
return {'model': self.model, 'model_type': self.model_type, 'model_file':
self.model_file, 'config': self.config}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {'model': self.model, 'model_type': self.model_type,
'model_file': self.model_file, 'config': self.config}
|
Get the identifying parameters.
|
test_chat_openai_generate
|
"""Test AzureChatOpenAI wrapper with generate."""
chat = _get_llm(max_tokens=10, n=2)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
@pytest.mark.scheduled
def test_chat_openai_generate() ->None:
"""Test AzureChatOpenAI wrapper with generate."""
chat = _get_llm(max_tokens=10, n=2)
message = HumanMessage(content='Hello')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
Test AzureChatOpenAI wrapper with generate.
|
test_forefrontai_api_key_is_secret_string
|
"""Test that the API key is stored as a SecretStr."""
llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2)
assert isinstance(llm.forefrontai_api_key, SecretStr)
|
def test_forefrontai_api_key_is_secret_string() ->None:
"""Test that the API key is stored as a SecretStr."""
llm = ForefrontAI(forefrontai_api_key='secret-api-key', temperature=0.2)
assert isinstance(llm.forefrontai_api_key, SecretStr)
|
Test that the API key is stored as a SecretStr.
|
is_lc_serializable
|
"""Return whether this model can be serialized by Langchain."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return True
|
Return whether this model can be serialized by Langchain.
|
clear
|
"""Clear memory contents."""
|
def clear(self) ->None:
"""Clear memory contents."""
|
Clear memory contents.
|
on_chain_error
|
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
Run when chain errors.
|
_get_relevant_documents
|
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
dense_vec = self.embeddings.embed_query(query)
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec['values'] = [float(s1) for s1 in sparse_vec['values']]
result = self.index.query(vector=dense_vec, sparse_vector=sparse_vec, top_k
=self.top_k, include_metadata=True, namespace=self.namespace)
final_result = []
for res in result['matches']:
context = res['metadata'].pop('context')
final_result.append(Document(page_content=context, metadata=res[
'metadata']))
return final_result
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
dense_vec = self.embeddings.embed_query(query)
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self
.alpha)
sparse_vec['values'] = [float(s1) for s1 in sparse_vec['values']]
result = self.index.query(vector=dense_vec, sparse_vector=sparse_vec,
top_k=self.top_k, include_metadata=True, namespace=self.namespace)
final_result = []
for res in result['matches']:
context = res['metadata'].pop('context')
final_result.append(Document(page_content=context, metadata=res[
'metadata']))
return final_result
| null |
_get_functions_prompt_and_tools
|
try:
import pandas as pd
pd.set_option('display.max_columns', None)
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`')
if input_variables is not None:
raise ValueError('`input_variables` is not supported at the moment.')
if include_df_in_prompt is not None and suffix is not None:
raise ValueError(
'If suffix is specified, include_df_in_prompt should not be.')
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_functions_multi_prompt(df, prefix=prefix, suffix=suffix,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_functions_single_prompt(df, prefix=prefix, suffix=suffix,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows)
|
def _get_functions_prompt_and_tools(df: Any, prefix: Optional[str]=None,
suffix: Optional[str]=None, input_variables: Optional[List[str]]=None,
include_df_in_prompt: Optional[bool]=True, number_of_head_rows: int=5
) ->Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
try:
import pandas as pd
pd.set_option('display.max_columns', None)
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`'
)
if input_variables is not None:
raise ValueError('`input_variables` is not supported at the moment.')
if include_df_in_prompt is not None and suffix is not None:
raise ValueError(
'If suffix is specified, include_df_in_prompt should not be.')
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_functions_multi_prompt(df, prefix=prefix, suffix=suffix,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_functions_single_prompt(df, prefix=prefix, suffix=
suffix, include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows)
| null |
clear
|
def scan_and_delete(cursor: int) ->int:
cursor, keys_to_delete = self.redis_client.scan(cursor,
f'{self.full_key_prefix}:*')
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
|
def clear(self) ->None:
def scan_and_delete(cursor: int) ->int:
cursor, keys_to_delete = self.redis_client.scan(cursor,
f'{self.full_key_prefix}:*')
self.redis_client.delete(*keys_to_delete)
return cursor
cursor = scan_and_delete(0)
while cursor != 0:
scan_and_delete(cursor)
| null |
_invocation_params
|
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError('`stop` found in both the input and default params.')
elif self.stop_sequences is not None:
params['stop'] = self.stop_sequences
else:
params['stop'] = stop_sequences
if self.model_kwargs:
params.update(self.model_kwargs)
return {**params, **kwargs}
|
def _invocation_params(self, stop_sequences: Optional[List[str]], **kwargs: Any
) ->dict:
params = self._default_params
if self.stop_sequences is not None and stop_sequences is not None:
raise ValueError('`stop` found in both the input and default params.')
elif self.stop_sequences is not None:
params['stop'] = self.stop_sequences
else:
params['stop'] = stop_sequences
if self.model_kwargs:
params.update(self.model_kwargs)
return {**params, **kwargs}
| null |
on_tool_end
|
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Tool End'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]
)) + '\n')
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Tool End'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
resp])) + '\n')
|
Run when tool ends running.
|
load
|
return list(self.lazy_load())
|
def load(self) ->List[Document]:
return list(self.lazy_load())
| null |
_import_clarifai
|
from langchain_community.llms.clarifai import Clarifai
return Clarifai
|
def _import_clarifai() ->Any:
from langchain_community.llms.clarifai import Clarifai
return Clarifai
| null |
_load_documents_from_folder
|
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build('drive', 'v3', credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
if file_types:
_files = [f for f in files if f['mimeType'] in file_types]
else:
_files = files
returns = []
for file in _files:
if file['trashed'] and not self.load_trashed_files:
continue
elif file['mimeType'] == 'application/vnd.google-apps.document':
returns.append(self._load_document_from_id(file['id']))
elif file['mimeType'] == 'application/vnd.google-apps.spreadsheet':
returns.extend(self._load_sheet_from_id(file['id']))
elif file['mimeType'
] == 'application/pdf' or self.file_loader_cls is not None:
returns.extend(self._load_file_from_id(file['id']))
else:
pass
return returns
|
def _load_documents_from_folder(self, folder_id: str, *, file_types:
Optional[Sequence[str]]=None) ->List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build('drive', 'v3', credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
if file_types:
_files = [f for f in files if f['mimeType'] in file_types]
else:
_files = files
returns = []
for file in _files:
if file['trashed'] and not self.load_trashed_files:
continue
elif file['mimeType'] == 'application/vnd.google-apps.document':
returns.append(self._load_document_from_id(file['id']))
elif file['mimeType'] == 'application/vnd.google-apps.spreadsheet':
returns.extend(self._load_sheet_from_id(file['id']))
elif file['mimeType'
] == 'application/pdf' or self.file_loader_cls is not None:
returns.extend(self._load_file_from_id(file['id']))
else:
pass
return returns
|
Load documents from a folder.
|
__init__
|
self.inputs = inputs
self.selected = selected
|
def __init__(self, inputs: Dict[str, Any], selected: Optional[TSelected]=None):
self.inputs = inputs
self.selected = selected
| null |
get_parameters_for_operation
|
"""Get the components for a given operation."""
from openapi_pydantic import Reference
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
|
def get_parameters_for_operation(self, operation: Operation) ->List[Parameter]:
"""Get the components for a given operation."""
from openapi_pydantic import Reference
parameters = []
if operation.parameters:
for parameter in operation.parameters:
if isinstance(parameter, Reference):
parameter = self._get_root_referenced_parameter(parameter)
parameters.append(parameter)
return parameters
|
Get the components for a given operation.
|
_response_json
|
"""Use requests to run request to DataForSEO SERP API and return results."""
request_details = self._prepare_request(url)
response = requests.post(request_details['url'], headers=request_details[
'headers'], json=request_details['data'])
response.raise_for_status()
return self._check_response(response.json())
|
def _response_json(self, url: str) ->dict:
"""Use requests to run request to DataForSEO SERP API and return results."""
request_details = self._prepare_request(url)
response = requests.post(request_details['url'], headers=
request_details['headers'], json=request_details['data'])
response.raise_for_status()
return self._check_response(response.json())
|
Use requests to run request to DataForSEO SERP API and return results.
|
test_forefrontai_call
|
"""Test valid call to forefrontai."""
llm = ForefrontAI(length=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_forefrontai_call() ->None:
"""Test valid call to forefrontai."""
llm = ForefrontAI(length=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to forefrontai.
|
ignore_agent
|
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
@property
def ignore_agent(self) ->bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
Whether to ignore agent callbacks.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.