method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
save_context
|
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
|
Save context from this conversation to buffer.
|
is_relative_to
|
"""Check if path is relative to root."""
if sys.version_info >= (3, 9):
return path.is_relative_to(root)
try:
path.relative_to(root)
return True
except ValueError:
return False
|
def is_relative_to(path: Path, root: Path) ->bool:
"""Check if path is relative to root."""
if sys.version_info >= (3, 9):
return path.is_relative_to(root)
try:
path.relative_to(root)
return True
except ValueError:
return False
|
Check if path is relative to root.
|
input_keys
|
"""Input keys."""
return ['question', 'chat_history']
|
@property
def input_keys(self) ->List[str]:
"""Input keys."""
return ['question', 'chat_history']
|
Input keys.
|
get_token_ids
|
"""Get the tokens present in the text with tiktoken package."""
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
|
def get_token_ids(self, text: str) ->List[int]:
"""Get the tokens present in the text with tiktoken package."""
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
|
Get the tokens present in the text with tiktoken package.
|
_set_value
|
if operator not in self.OPERATORS:
raise ValueError(
f'Operator {operator} not supported by {self.__class__.__name__}. ' +
f'Supported operators are {self.OPERATORS.values()}.')
if not isinstance(val, val_type):
raise TypeError(
f'Right side argument passed to operator {self.OPERATORS[operator]} with left side argument {self.__class__.__name__} must be of type {val_type}, received value {val}'
)
self._value = val
self._operator = operator
|
def _set_value(self, val: Any, val_type: Tuple[Any], operator:
RedisFilterOperator) ->None:
if operator not in self.OPERATORS:
raise ValueError(
f'Operator {operator} not supported by {self.__class__.__name__}. '
+ f'Supported operators are {self.OPERATORS.values()}.')
if not isinstance(val, val_type):
raise TypeError(
f'Right side argument passed to operator {self.OPERATORS[operator]} with left side argument {self.__class__.__name__} must be of type {val_type}, received value {val}'
)
self._value = val
self._operator = operator
| null |
_collapse_docs_func
|
return self._collapse_chain.run(input_documents=docs, callbacks=callbacks,
**kwargs)
|
def _collapse_docs_func(docs: List[Document], **kwargs: Any) ->str:
return self._collapse_chain.run(input_documents=docs, callbacks=
callbacks, **kwargs)
| null |
_merge_splits
|
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if total + _len + (separator_len if len(current_doc) > 0 else 0
) > self._chunk_size:
if total > self._chunk_size:
logger.warning(
f'Created a chunk of size {total}, which is longer than the specified {self._chunk_size}'
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
while total > self._chunk_overlap or total + _len + (separator_len
if len(current_doc) > 0 else 0
) > self._chunk_size and total > 0:
total -= self._length_function(current_doc[0]) + (separator_len
if len(current_doc) > 1 else 0)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
|
def _merge_splits(self, splits: Iterable[str], separator: str) ->List[str]:
separator_len = self._length_function(separator)
docs = []
current_doc: List[str] = []
total = 0
for d in splits:
_len = self._length_function(d)
if total + _len + (separator_len if len(current_doc) > 0 else 0
) > self._chunk_size:
if total > self._chunk_size:
logger.warning(
f'Created a chunk of size {total}, which is longer than the specified {self._chunk_size}'
)
if len(current_doc) > 0:
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
while total > self._chunk_overlap or total + _len + (
separator_len if len(current_doc) > 0 else 0
) > self._chunk_size and total > 0:
total -= self._length_function(current_doc[0]) + (
separator_len if len(current_doc) > 1 else 0)
current_doc = current_doc[1:]
current_doc.append(d)
total += _len + (separator_len if len(current_doc) > 1 else 0)
doc = self._join_docs(current_doc, separator)
if doc is not None:
docs.append(doc)
return docs
| null |
convert_messages
|
history = ChatMessageHistory()
for item in input:
history.add_user_message(item['result']['question'])
history.add_ai_message(item['result']['answer'])
return history
|
def convert_messages(input: List[Dict[str, Any]]) ->ChatMessageHistory:
history = ChatMessageHistory()
for item in input:
history.add_user_message(item['result']['question'])
history.add_ai_message(item['result']['answer'])
return history
| null |
test_openai_model_kwargs
|
llm = OpenAI(model_kwargs={'foo': 'bar'})
assert llm.model_kwargs == {'foo': 'bar'}
|
@pytest.mark.requires('openai')
def test_openai_model_kwargs() ->None:
llm = OpenAI(model_kwargs={'foo': 'bar'})
assert llm.model_kwargs == {'foo': 'bar'}
| null |
_call
|
"""Return next response"""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Return next response"""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
|
Return next response
|
test_runnable_branch_invoke_callbacks
|
"""Verify that callbacks are correctly used in invoke."""
tracer = FakeTracer()
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
branch = RunnableBranch[int, int]((lambda x: x > 100, raise_value_error),
lambda x: x - 1)
assert branch.invoke(1, config={'callbacks': [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {'output': 0}
with pytest.raises(ValueError):
branch.invoke(1000, config={'callbacks': [tracer]})
assert len(tracer.runs) == 2
assert "ValueError('x is too large')" in str(tracer.runs[1].error)
assert tracer.runs[1].outputs is None
|
def test_runnable_branch_invoke_callbacks() ->None:
"""Verify that callbacks are correctly used in invoke."""
tracer = FakeTracer()
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
branch = RunnableBranch[int, int]((lambda x: x > 100, raise_value_error
), lambda x: x - 1)
assert branch.invoke(1, config={'callbacks': [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {'output': 0}
with pytest.raises(ValueError):
branch.invoke(1000, config={'callbacks': [tracer]})
assert len(tracer.runs) == 2
assert "ValueError('x is too large')" in str(tracer.runs[1].error)
assert tracer.runs[1].outputs is None
|
Verify that callbacks are correctly used in invoke.
|
_chain_type
|
return 'pal_chain'
|
@property
def _chain_type(self) ->str:
return 'pal_chain'
| null |
_get_top_k_docs
|
top_docs = [item.to_doc(self.page_content_formatter) for item in
result_items[:self.top_k]]
return top_docs
|
def _get_top_k_docs(self, result_items: Sequence[ResultItem]) ->List[Document]:
top_docs = [item.to_doc(self.page_content_formatter) for item in
result_items[:self.top_k]]
return top_docs
| null |
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
lazy_load
|
"""Lazy load given path as pages."""
if self.file_path is not None:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse_folder(blob)
else:
yield from self.parser.parse_url(self.url_path)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load given path as pages."""
if self.file_path is not None:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse_folder(blob)
else:
yield from self.parser.parse_url(self.url_path)
|
Lazy load given path as pages.
|
test_multiple_sessions
|
file_chat_message_history.add_user_message('Hello, AI!')
file_chat_message_history.add_ai_message('Hello, how can I help you?')
file_chat_message_history.add_user_message('Tell me a joke.')
file_chat_message_history.add_ai_message(
'Why did the chicken cross the road? To get to the other side!')
messages = file_chat_message_history.messages
assert len(messages) == 4
assert messages[0].content == 'Hello, AI!'
assert messages[1].content == 'Hello, how can I help you?'
assert messages[2].content == 'Tell me a joke.'
expected_content = (
'Why did the chicken cross the road? To get to the other side!')
assert messages[3].content == expected_content
file_path = file_chat_message_history.file_path
second_session_chat_message_history = FileChatMessageHistory(file_path=str(
file_path))
messages = second_session_chat_message_history.messages
assert len(messages) == 4
assert messages[0].content == 'Hello, AI!'
assert messages[1].content == 'Hello, how can I help you?'
assert messages[2].content == 'Tell me a joke.'
expected_content = (
'Why did the chicken cross the road? To get to the other side!')
assert messages[3].content == expected_content
|
def test_multiple_sessions(file_chat_message_history: FileChatMessageHistory
) ->None:
file_chat_message_history.add_user_message('Hello, AI!')
file_chat_message_history.add_ai_message('Hello, how can I help you?')
file_chat_message_history.add_user_message('Tell me a joke.')
file_chat_message_history.add_ai_message(
'Why did the chicken cross the road? To get to the other side!')
messages = file_chat_message_history.messages
assert len(messages) == 4
assert messages[0].content == 'Hello, AI!'
assert messages[1].content == 'Hello, how can I help you?'
assert messages[2].content == 'Tell me a joke.'
expected_content = (
'Why did the chicken cross the road? To get to the other side!')
assert messages[3].content == expected_content
file_path = file_chat_message_history.file_path
second_session_chat_message_history = FileChatMessageHistory(file_path=
str(file_path))
messages = second_session_chat_message_history.messages
assert len(messages) == 4
assert messages[0].content == 'Hello, AI!'
assert messages[1].content == 'Hello, how can I help you?'
assert messages[2].content == 'Tell me a joke.'
expected_content = (
'Why did the chicken cross the road? To get to the other side!')
assert messages[3].content == expected_content
| null |
_ingest
|
loader = PyPDFLoader(url)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(data)
_ = MongoDBAtlasVectorSearch.from_documents(documents=docs, embedding=
OpenAIEmbeddings(disallowed_special=()), collection=MONGODB_COLLECTION,
index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME)
return {}
|
def _ingest(url: str) ->dict:
loader = PyPDFLoader(url)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
chunk_overlap=0)
docs = text_splitter.split_documents(data)
_ = MongoDBAtlasVectorSearch.from_documents(documents=docs, embedding=
OpenAIEmbeddings(disallowed_special=()), collection=
MONGODB_COLLECTION, index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME)
return {}
| null |
embeddings
|
"""Access the query embedding object if available."""
if isinstance(self._embedding, Embeddings):
return self._embedding
return None
|
@property
def embeddings(self) ->Optional[Embeddings]:
"""Access the query embedding object if available."""
if isinstance(self._embedding, Embeddings):
return self._embedding
return None
|
Access the query embedding object if available.
|
similarity_search_with_score
|
"""The most k similar documents and scores of the specified query.
Args:
query: Text query.
k: The k most similar documents to the text query.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by metadata. Defaults to None.
kwargs: Any possible extend parameters in the future.
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
results: List[Tuple[Document, float]] = []
not_include_fields: Set[str] = {'text_embedding', '_id'}
retrieval_docs = self.similarity_search_by_vector(embedding, k,
text_in_page_content=text_in_page_content, meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields)
for doc in retrieval_docs:
score = doc.metadata['score']
del doc.metadata['score']
doc_tuple = doc, score
results.append(doc_tuple)
return results
|
def similarity_search_with_score(self, query: str, k: int=DEFAULT_TOPN,
text_in_page_content: Optional[str]=None, meta_filter: Optional[dict]=
None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""The most k similar documents and scores of the specified query.
Args:
query: Text query.
k: The k most similar documents to the text query.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by metadata. Defaults to None.
kwargs: Any possible extend parameters in the future.
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(
query)
else:
from awadb import AwaEmbedding
embedding = AwaEmbedding().Embedding(query)
results: List[Tuple[Document, float]] = []
not_include_fields: Set[str] = {'text_embedding', '_id'}
retrieval_docs = self.similarity_search_by_vector(embedding, k,
text_in_page_content=text_in_page_content, meta_filter=meta_filter,
not_include_fields_in_metadata=not_include_fields)
for doc in retrieval_docs:
score = doc.metadata['score']
del doc.metadata['score']
doc_tuple = doc, score
results.append(doc_tuple)
return results
|
The most k similar documents and scores of the specified query.
Args:
query: Text query.
k: The k most similar documents to the text query.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by metadata. Defaults to None.
kwargs: Any possible extend parameters in the future.
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
|
make_input_files
|
files = []
for target_path, file_info in self.files.items():
files.append({'pathname': target_path, 'contentsBasesixtyfour':
file_to_base64(file_info.source_path)})
return files
|
def make_input_files(self) ->List[dict]:
files = []
for target_path, file_info in self.files.items():
files.append({'pathname': target_path, 'contentsBasesixtyfour':
file_to_base64(file_info.source_path)})
return files
| null |
stream
|
return self.transform(iter([input]), config, **kwargs)
|
def stream(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Optional[Any]) ->Iterator[Output]:
return self.transform(iter([input]), config, **kwargs)
| null |
_import_office365_send_event
|
from langchain_community.tools.office365.send_event import O365SendEvent
return O365SendEvent
|
def _import_office365_send_event() ->Any:
from langchain_community.tools.office365.send_event import O365SendEvent
return O365SendEvent
| null |
__str__
|
return str([str(step) for step in self.steps])
|
def __str__(self) ->str:
return str([str(step) for step in self.steps])
| null |
test_load_fail_wrong_dataset_name
|
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets(dataset_name='wrong_dataset_name', split_name='test',
load_max_docs=MAX_DOCS, sample_to_document_function=
mlqaen_example_to_document)
assert 'the dataset name is spelled correctly' in str(exc_info.value)
|
def test_load_fail_wrong_dataset_name() ->None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasets(dataset_name='wrong_dataset_name', split_name=
'test', load_max_docs=MAX_DOCS, sample_to_document_function=
mlqaen_example_to_document)
assert 'the dataset name is spelled correctly' in str(exc_info.value)
|
Test that fails to load
|
test_prompt_template_params
|
prompt = ChatPromptTemplate.from_template(
'Respond to the following question: {question}')
result = prompt.invoke({'question': 'test', 'topic': 'test'})
assert result == ChatPromptValue(messages=[HumanMessage(content=
'Respond to the following question: test')])
with pytest.raises(KeyError):
prompt.invoke({})
|
def test_prompt_template_params() ->None:
prompt = ChatPromptTemplate.from_template(
'Respond to the following question: {question}')
result = prompt.invoke({'question': 'test', 'topic': 'test'})
assert result == ChatPromptValue(messages=[HumanMessage(content=
'Respond to the following question: test')])
with pytest.raises(KeyError):
prompt.invoke({})
| null |
test_mget
|
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
values = store.mget(['key1', 'key2'])
assert values == ['value1', 'value2']
non_existent_value = store.mget(['key3'])
assert non_existent_value == [None]
|
def test_mget() ->None:
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
values = store.mget(['key1', 'key2'])
assert values == ['value1', 'value2']
non_existent_value = store.mget(['key3'])
assert non_existent_value == [None]
| null |
get_media_metadata_location
|
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/metadata/' + query)
return response.json()
|
def get_media_metadata_location(self, query: str) ->str:
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/metadata/' + query)
return response.json()
| null |
_build_insert_sql
|
ks = ','.join(column_names)
_data = []
for n in transac:
n = ','.join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f'({n})')
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
|
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]
) ->str:
ks = ','.join(column_names)
_data = []
for n in transac:
n = ','.join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f'({n})')
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
| null |
process_xls
|
import io
import os
try:
import xlrd
except ImportError:
raise ImportError('`xlrd` package not found, please run `pip install xlrd`'
)
try:
import pandas as pd
except ImportError:
raise ImportError(
'`pandas` package not found, please run `pip install pandas`')
response = self.confluence.request(path=link, absolute=True)
text = ''
if response.status_code != 200 or response.content == b'' or response.content is None:
return text
filename = os.path.basename(link)
file_extension = os.path.splitext(filename)[1]
if file_extension.startswith('.csv'):
content_string = response.content.decode('utf-8')
df = pd.read_csv(io.StringIO(content_string))
text += df.to_string(index=False, header=False) + '\n\n'
else:
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f'{sheet.name}:\n'
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f'{sheet.cell_value(row, col)}\t'
text += '\n'
text += '\n'
return text
|
def process_xls(self, link: str) ->str:
import io
import os
try:
import xlrd
except ImportError:
raise ImportError(
'`xlrd` package not found, please run `pip install xlrd`')
try:
import pandas as pd
except ImportError:
raise ImportError(
'`pandas` package not found, please run `pip install pandas`')
response = self.confluence.request(path=link, absolute=True)
text = ''
if (response.status_code != 200 or response.content == b'' or response.
content is None):
return text
filename = os.path.basename(link)
file_extension = os.path.splitext(filename)[1]
if file_extension.startswith('.csv'):
content_string = response.content.decode('utf-8')
df = pd.read_csv(io.StringIO(content_string))
text += df.to_string(index=False, header=False) + '\n\n'
else:
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f'{sheet.name}:\n'
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f'{sheet.cell_value(row, col)}\t'
text += '\n'
text += '\n'
return text
| null |
_import_openweathermap_tool
|
from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun
return OpenWeatherMapQueryRun
|
def _import_openweathermap_tool() ->Any:
from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun
return OpenWeatherMapQueryRun
| null |
__init__
|
"""Initialize with dataframe object.
Args:
data_frame: Polars DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
import polars as pl
if not isinstance(data_frame, pl.DataFrame):
raise ValueError(
f'Expected data_frame to be a pl.DataFrame, got {type(data_frame)}')
super().__init__(data_frame, page_content_column=page_content_column)
|
def __init__(self, data_frame: Any, *, page_content_column: str='text'):
"""Initialize with dataframe object.
Args:
data_frame: Polars DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
import polars as pl
if not isinstance(data_frame, pl.DataFrame):
raise ValueError(
f'Expected data_frame to be a pl.DataFrame, got {type(data_frame)}'
)
super().__init__(data_frame, page_content_column=page_content_column)
|
Initialize with dataframe object.
Args:
data_frame: Polars DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
|
_default_params
|
"""Default parameters"""
return {}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Default parameters"""
return {}
|
Default parameters
|
_resolve_model_id
|
"""Resolve the model_id from the LLM's inference_server_url"""
from huggingface_hub import list_inference_endpoints
available_endpoints = list_inference_endpoints('*')
if isinstance(self.llm, HuggingFaceTextGenInference):
endpoint_url = self.llm.inference_server_url
elif isinstance(self.llm, HuggingFaceEndpoint):
endpoint_url = self.llm.endpoint_url
elif isinstance(self.llm, HuggingFaceHub):
self.model_id = self.llm.repo_id
return
else:
raise ValueError(f'Unknown LLM type: {type(self.llm)}')
for endpoint in available_endpoints:
if endpoint.url == endpoint_url:
self.model_id = endpoint.repository
if not self.model_id:
raise ValueError(
f'Failed to resolve model_idCould not find model id for inference server provided: {endpoint_url}Make sure that your Hugging Face token has access to the endpoint.'
)
|
def _resolve_model_id(self) ->None:
"""Resolve the model_id from the LLM's inference_server_url"""
from huggingface_hub import list_inference_endpoints
available_endpoints = list_inference_endpoints('*')
if isinstance(self.llm, HuggingFaceTextGenInference):
endpoint_url = self.llm.inference_server_url
elif isinstance(self.llm, HuggingFaceEndpoint):
endpoint_url = self.llm.endpoint_url
elif isinstance(self.llm, HuggingFaceHub):
self.model_id = self.llm.repo_id
return
else:
raise ValueError(f'Unknown LLM type: {type(self.llm)}')
for endpoint in available_endpoints:
if endpoint.url == endpoint_url:
self.model_id = endpoint.repository
if not self.model_id:
raise ValueError(
f'Failed to resolve model_idCould not find model id for inference server provided: {endpoint_url}Make sure that your Hugging Face token has access to the endpoint.'
)
|
Resolve the model_id from the LLM's inference_server_url
|
_default_params
|
"""Get the default parameters for calling Ollama."""
return {'model': self.model, 'options': {'mirostat': self.mirostat,
'mirostat_eta': self.mirostat_eta, 'mirostat_tau': self.mirostat_tau,
'num_ctx': self.num_ctx, 'num_gpu': self.num_gpu, 'num_thread': self.
num_thread, 'repeat_last_n': self.repeat_last_n, 'repeat_penalty': self
.repeat_penalty, 'temperature': self.temperature, 'stop': self.stop,
'tfs_z': self.tfs_z, 'top_k': self.top_k, 'top_p': self.top_p}}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {'model': self.model, 'options': {'mirostat': self.mirostat,
'mirostat_eta': self.mirostat_eta, 'mirostat_tau': self.
mirostat_tau, 'num_ctx': self.num_ctx, 'num_gpu': self.num_gpu,
'num_thread': self.num_thread, 'repeat_last_n': self.repeat_last_n,
'repeat_penalty': self.repeat_penalty, 'temperature': self.
temperature, 'stop': self.stop, 'tfs_z': self.tfs_z, 'top_k': self.
top_k, 'top_p': self.top_p}}
|
Get the default parameters for calling Ollama.
|
new
|
"""
Creates a new template package.
"""
computed_name = name if name != '.' else Path.cwd().name
destination_dir = Path.cwd() / name if name != '.' else Path.cwd()
project_template_dir = Path(__file__).parents[1] / 'package_template'
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=name ==
'.')
package_name_split = computed_name.split('/')
package_name = package_name_split[-2] if len(package_name_split
) > 1 and package_name_split[-1] == '' else package_name_split[-1]
module_name = re.sub('[^a-zA-Z0-9_]', '_', package_name)
chain_name = f'{module_name}_chain'
app_route_code = f"""from {module_name} import chain as {chain_name}
add_routes(app, {chain_name}, path="/{package_name}")"""
pyproject = destination_dir / 'pyproject.toml'
pyproject_contents = pyproject.read_text()
pyproject.write_text(pyproject_contents.replace('__package_name__',
package_name).replace('__module_name__', module_name))
package_dir = destination_dir / module_name
shutil.move(destination_dir / 'package_template', package_dir)
init = package_dir / '__init__.py'
init_contents = init.read_text()
init.write_text(init_contents.replace('__module_name__', module_name))
readme = destination_dir / 'README.md'
readme_contents = readme.read_text()
readme.write_text(readme_contents.replace('__package_name__', package_name)
.replace('__app_route_code__', app_route_code))
if with_poetry:
subprocess.run(['poetry', 'install'], cwd=destination_dir)
|
@package_cli.command()
def new(name: Annotated[str, typer.Argument(help=
'The name of the folder to create')], with_poetry: Annotated[bool,
typer.Option('--with-poetry/--no-poetry', help=
"Don't run poetry install")]=False):
"""
Creates a new template package.
"""
computed_name = name if name != '.' else Path.cwd().name
destination_dir = Path.cwd() / name if name != '.' else Path.cwd()
project_template_dir = Path(__file__).parents[1] / 'package_template'
shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=
name == '.')
package_name_split = computed_name.split('/')
package_name = package_name_split[-2] if len(package_name_split
) > 1 and package_name_split[-1] == '' else package_name_split[-1]
module_name = re.sub('[^a-zA-Z0-9_]', '_', package_name)
chain_name = f'{module_name}_chain'
app_route_code = f"""from {module_name} import chain as {chain_name}
add_routes(app, {chain_name}, path="/{package_name}")"""
pyproject = destination_dir / 'pyproject.toml'
pyproject_contents = pyproject.read_text()
pyproject.write_text(pyproject_contents.replace('__package_name__',
package_name).replace('__module_name__', module_name))
package_dir = destination_dir / module_name
shutil.move(destination_dir / 'package_template', package_dir)
init = package_dir / '__init__.py'
init_contents = init.read_text()
init.write_text(init_contents.replace('__module_name__', module_name))
readme = destination_dir / 'README.md'
readme_contents = readme.read_text()
readme.write_text(readme_contents.replace('__package_name__',
package_name).replace('__app_route_code__', app_route_code))
if with_poetry:
subprocess.run(['poetry', 'install'], cwd=destination_dir)
|
Creates a new template package.
|
__init__
|
if functions is not None:
assert len(functions) == len(runnables)
assert all(func['name'] in runnables for func in functions)
router = JsonOutputFunctionsParser(args_only=False) | {'key': itemgetter(
'name'), 'input': itemgetter('arguments')} | RouterRunnable(runnables)
super().__init__(bound=router, kwargs={}, functions=functions)
|
def __init__(self, runnables: Mapping[str, Union[Runnable[dict, Any],
Callable[[dict], Any]]], functions: Optional[List[OpenAIFunction]]=None):
if functions is not None:
assert len(functions) == len(runnables)
assert all(func['name'] in runnables for func in functions)
router = JsonOutputFunctionsParser(args_only=False) | {'key':
itemgetter('name'), 'input': itemgetter('arguments')} | RouterRunnable(
runnables)
super().__init__(bound=router, kwargs={}, functions=functions)
| null |
_kendra_query
|
kendra_kwargs = {'IndexId': self.index_id, 'QueryText': query.strip()[0:999
], 'PageSize': self.top_k}
if self.attribute_filter is not None:
kendra_kwargs['AttributeFilter'] = self.attribute_filter
if self.user_context is not None:
kendra_kwargs['UserContext'] = self.user_context
response = self.client.retrieve(**kendra_kwargs)
r_result = RetrieveResult.parse_obj(response)
if r_result.ResultItems:
return r_result.ResultItems
response = self.client.query(**kendra_kwargs)
q_result = QueryResult.parse_obj(response)
return q_result.ResultItems
|
def _kendra_query(self, query: str) ->Sequence[ResultItem]:
kendra_kwargs = {'IndexId': self.index_id, 'QueryText': query.strip()[0
:999], 'PageSize': self.top_k}
if self.attribute_filter is not None:
kendra_kwargs['AttributeFilter'] = self.attribute_filter
if self.user_context is not None:
kendra_kwargs['UserContext'] = self.user_context
response = self.client.retrieve(**kendra_kwargs)
r_result = RetrieveResult.parse_obj(response)
if r_result.ResultItems:
return r_result.ResultItems
response = self.client.query(**kendra_kwargs)
q_result = QueryResult.parse_obj(response)
return q_result.ResultItems
| null |
_llm_type
|
"""Return type of llm."""
return 'fake'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake'
|
Return type of llm.
|
_validate_metadata_func
|
"""Check if the metadata_func output is valid"""
sample = data.first()
if self._metadata_func is not None:
sample_metadata = self._metadata_func(sample, {})
if not isinstance(sample_metadata, dict):
raise ValueError(
f'Expected the metadata_func to return a dict but got `{type(sample_metadata)}`'
)
|
def _validate_metadata_func(self, data: Any) ->None:
"""Check if the metadata_func output is valid"""
sample = data.first()
if self._metadata_func is not None:
sample_metadata = self._metadata_func(sample, {})
if not isinstance(sample_metadata, dict):
raise ValueError(
f'Expected the metadata_func to return a dict but got `{type(sample_metadata)}`'
)
|
Check if the metadata_func output is valid
|
test_load_list
|
"""Loads page_content of type List"""
page_content_column = 'list'
name = 'v1'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
doc = loader.load()[0]
assert doc.page_content == '["List item 1", "List item 2", "List item 3"]'
assert doc.metadata.keys() == {'split', 'text', 'dict'}
|
@pytest.mark.requires('datasets')
@pytest.fixture
def test_load_list() ->None:
"""Loads page_content of type List"""
page_content_column = 'list'
name = 'v1'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
doc = loader.load()[0]
assert doc.page_content == '["List item 1", "List item 2", "List item 3"]'
assert doc.metadata.keys() == {'split', 'text', 'dict'}
|
Loads page_content of type List
|
_generate_clients
|
from qdrant_client import AsyncQdrantClient, QdrantClient
sync_client = QdrantClient(location=location, url=url, port=port, grpc_port
=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=api_key,
prefix=prefix, timeout=timeout, host=host, path=path, **kwargs)
if location == ':memory:' or path is not None:
async_client = None
else:
async_client = AsyncQdrantClient(location=location, url=url, port=port,
grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=
api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs
)
return sync_client, async_client
|
@staticmethod
def _generate_clients(location: Optional[str]=None, url: Optional[str]=None,
port: Optional[int]=6333, grpc_port: int=6334, prefer_grpc: bool=False,
https: Optional[bool]=None, api_key: Optional[str]=None, prefix:
Optional[str]=None, timeout: Optional[float]=None, host: Optional[str]=
None, path: Optional[str]=None, **kwargs: Any) ->Tuple[Any, Any]:
from qdrant_client import AsyncQdrantClient, QdrantClient
sync_client = QdrantClient(location=location, url=url, port=port,
grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https, api_key=
api_key, prefix=prefix, timeout=timeout, host=host, path=path, **kwargs
)
if location == ':memory:' or path is not None:
async_client = None
else:
async_client = AsyncQdrantClient(location=location, url=url, port=
port, grpc_port=grpc_port, prefer_grpc=prefer_grpc, https=https,
api_key=api_key, prefix=prefix, timeout=timeout, host=host,
path=path, **kwargs)
return sync_client, async_client
| null |
test_hnsw_vector_field_optional_values
|
"""Test optional values for HNSWVectorField."""
hnsw_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm':
'HNSW', 'initial_cap': 2000, 'm': 10, 'ef_construction': 250,
'ef_runtime': 15, 'epsilon': 0.05}
hnsw_vector = HNSWVectorField(**hnsw_vector_field_data)
assert hnsw_vector.initial_cap == 2000
assert hnsw_vector.m == 10
assert hnsw_vector.ef_construction == 250
assert hnsw_vector.ef_runtime == 15
assert hnsw_vector.epsilon == 0.05
|
def test_hnsw_vector_field_optional_values() ->None:
"""Test optional values for HNSWVectorField."""
hnsw_vector_field_data = {'name': 'example', 'dims': 100, 'algorithm':
'HNSW', 'initial_cap': 2000, 'm': 10, 'ef_construction': 250,
'ef_runtime': 15, 'epsilon': 0.05}
hnsw_vector = HNSWVectorField(**hnsw_vector_field_data)
assert hnsw_vector.initial_cap == 2000
assert hnsw_vector.m == 10
assert hnsw_vector.ef_construction == 250
assert hnsw_vector.ef_runtime == 15
assert hnsw_vector.epsilon == 0.05
|
Test optional values for HNSWVectorField.
|
query
|
"""Query the ArangoDB database."""
import itertools
cursor = self.__db.aql.execute(query, **kwargs)
return [doc for doc in itertools.islice(cursor, top_k)]
|
def query(self, query: str, top_k: Optional[int]=None, **kwargs: Any) ->List[
Dict[str, Any]]:
"""Query the ArangoDB database."""
import itertools
cursor = self.__db.aql.execute(query, **kwargs)
return [doc for doc in itertools.islice(cursor, top_k)]
|
Query the ArangoDB database.
|
__init__
|
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop('load_fn_kwargs', {})
load_fn_kwargs['model_id'] = load_fn_kwargs.get('model_id', DEFAULT_MODEL_NAME)
load_fn_kwargs['instruct'] = load_fn_kwargs.get('instruct', False)
load_fn_kwargs['device'] = load_fn_kwargs.get('device', 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
|
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop('load_fn_kwargs', {})
load_fn_kwargs['model_id'] = load_fn_kwargs.get('model_id',
DEFAULT_MODEL_NAME)
load_fn_kwargs['instruct'] = load_fn_kwargs.get('instruct', False)
load_fn_kwargs['device'] = load_fn_kwargs.get('device', 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
|
Initialize the remote inference function.
|
_stream
|
params: Dict[str, Any] = self._invocation_params(messages=messages, stop=
stop, stream=True, **kwargs)
for stream_resp in self.stream_completion_with_retry(**params):
chunk = ChatGenerationChunk(**self._chat_generation_from_qwen_resp(
stream_resp, is_chunk=True))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
params: Dict[str, Any] = self._invocation_params(messages=messages,
stop=stop, stream=True, **kwargs)
for stream_resp in self.stream_completion_with_retry(**params):
chunk = ChatGenerationChunk(**self._chat_generation_from_qwen_resp(
stream_resp, is_chunk=True))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| null |
test_llm_on_llm_dataset
|
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
run_on_dataset(client=client, dataset_name=llm_dataset_name,
llm_or_chain_factory=llm, evaluation=eval_config, project_name=
eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
|
def test_llm_on_llm_dataset(llm_dataset_name: str, eval_project_name: str,
client: Client) ->None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
run_on_dataset(client=client, dataset_name=llm_dataset_name,
llm_or_chain_factory=llm, evaluation=eval_config, project_name=
eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
| null |
__init__
|
from surrealdb import Surreal
self.collection = kwargs.pop('collection', 'documents')
self.ns = kwargs.pop('ns', 'langchain')
self.db = kwargs.pop('db', 'database')
self.dburl = kwargs.pop('dburl', 'ws://localhost:8000/rpc')
self.embedding_function = embedding_function
self.sdb = Surreal(self.dburl)
self.kwargs = kwargs
|
def __init__(self, embedding_function: Embeddings, **kwargs: Any) ->None:
from surrealdb import Surreal
self.collection = kwargs.pop('collection', 'documents')
self.ns = kwargs.pop('ns', 'langchain')
self.db = kwargs.pop('db', 'database')
self.dburl = kwargs.pop('dburl', 'ws://localhost:8000/rpc')
self.embedding_function = embedding_function
self.sdb = Surreal(self.dburl)
self.kwargs = kwargs
| null |
test_character_text_splitter
|
"""Test splitting by character count."""
text = 'foo bar baz 123'
splitter = CharacterTextSplitter(separator=' ', chunk_size=7, chunk_overlap=3)
output = splitter.split_text(text)
expected_output = ['foo bar', 'bar baz', 'baz 123']
assert output == expected_output
|
def test_character_text_splitter() ->None:
"""Test splitting by character count."""
text = 'foo bar baz 123'
splitter = CharacterTextSplitter(separator=' ', chunk_size=7,
chunk_overlap=3)
output = splitter.split_text(text)
expected_output = ['foo bar', 'bar baz', 'baz 123']
assert output == expected_output
|
Test splitting by character count.
|
invoke
|
return len(input)
|
def invoke(self, input: str, config: Optional[RunnableConfig]=None) ->int:
return len(input)
| null |
from_texts
|
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding, **kwargs
)
deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return deeplake_dataset
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None,
metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None,
dataset_path: str=_LANGCHAIN_DEFAULT_DEEPLAKE_PATH, **kwargs: Any
) ->DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
"""
deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding,
**kwargs)
deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return deeplake_dataset
|
Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
|
format_request_payload
|
prompt: Dict[str, Any] = {}
user_content: List[str] = []
assistant_content: List[str] = []
for message in messages:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
user_content = user_content + [content]
elif isinstance(message, AIMessage):
assistant_content = assistant_content + [content]
elif isinstance(message, SystemMessage):
prompt['system_prompt'] = content
elif isinstance(message, ChatMessage) and message.role in ['user',
'assistant', 'system']:
if message.role == 'system':
prompt['system_prompt'] = content
elif message.role == 'user':
user_content = user_content + [content]
elif message.role == 'assistant':
assistant_content = assistant_content + [content]
else:
supported = ','.join([role for role in ['user', 'assistant', 'system']]
)
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
prompt['prompt'] = user_content[len(user_content) - 1]
history = [history_item for _, history_item in enumerate(zip(user_content[:
-1], assistant_content))]
prompt['history'] = history
return {**prompt, **model_kwargs}
|
def format_request_payload(self, messages: List[BaseMessage], **
model_kwargs: Any) ->dict:
prompt: Dict[str, Any] = {}
user_content: List[str] = []
assistant_content: List[str] = []
for message in messages:
"""Converts message to a dict according to role"""
content = cast(str, message.content)
if isinstance(message, HumanMessage):
user_content = user_content + [content]
elif isinstance(message, AIMessage):
assistant_content = assistant_content + [content]
elif isinstance(message, SystemMessage):
prompt['system_prompt'] = content
elif isinstance(message, ChatMessage) and message.role in ['user',
'assistant', 'system']:
if message.role == 'system':
prompt['system_prompt'] = content
elif message.role == 'user':
user_content = user_content + [content]
elif message.role == 'assistant':
assistant_content = assistant_content + [content]
else:
supported = ','.join([role for role in ['user', 'assistant',
'system']])
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
prompt['prompt'] = user_content[len(user_content) - 1]
history = [history_item for _, history_item in enumerate(zip(
user_content[:-1], assistant_content))]
prompt['history'] = history
return {**prompt, **model_kwargs}
| null |
_get_indices_infos
|
mappings = self.database.indices.get_mapping(index=','.join(indices))
if self.sample_documents_in_index_info > 0:
for k, v in mappings.items():
hits = self.database.search(index=k, query={'match_all': {}}, size=
self.sample_documents_in_index_info)['hits']['hits']
hits = [str(hit['_source']) for hit in hits]
mappings[k]['mappings'] = str(v) + '\n\n/*\n' + '\n'.join(hits
) + '\n*/'
return '\n\n'.join(["""Mapping for index {}:
{}""".format(index, mappings[
index]['mappings']) for index in mappings])
|
def _get_indices_infos(self, indices: List[str]) ->str:
mappings = self.database.indices.get_mapping(index=','.join(indices))
if self.sample_documents_in_index_info > 0:
for k, v in mappings.items():
hits = self.database.search(index=k, query={'match_all': {}},
size=self.sample_documents_in_index_info)['hits']['hits']
hits = [str(hit['_source']) for hit in hits]
mappings[k]['mappings'] = str(v) + '\n\n/*\n' + '\n'.join(hits
) + '\n*/'
return '\n\n'.join(['Mapping for index {}:\n{}'.format(index, mappings[
index]['mappings']) for index in mappings])
| null |
__init__
|
self.known_texts: List[str] = []
self.dimensionality = dimensionality
|
def __init__(self, dimensionality: int=10) ->None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
| null |
test_pyspark_loader_load_valid_data
|
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
data = [(random.choice(string.ascii_letters), random.randint(0, 1)) for _ in
range(3)]
df = spark.createDataFrame(data, ['text', 'label'])
expected_docs = [Document(page_content=data[0][0], metadata={'label': data[
0][1]}), Document(page_content=data[1][0], metadata={'label': data[1][1
]}), Document(page_content=data[2][0], metadata={'label': data[2][1]})]
loader = PySparkDataFrameLoader(spark_session=spark, df=df,
page_content_column='text')
result = loader.load()
assert result == expected_docs
|
def test_pyspark_loader_load_valid_data() ->None:
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
data = [(random.choice(string.ascii_letters), random.randint(0, 1)) for
_ in range(3)]
df = spark.createDataFrame(data, ['text', 'label'])
expected_docs = [Document(page_content=data[0][0], metadata={'label':
data[0][1]}), Document(page_content=data[1][0], metadata={'label':
data[1][1]}), Document(page_content=data[2][0], metadata={'label':
data[2][1]})]
loader = PySparkDataFrameLoader(spark_session=spark, df=df,
page_content_column='text')
result = loader.load()
assert result == expected_docs
| null |
_call
|
"""Compute the score for a prediction and reference.
Args:
inputs (Dict[str, Any]): The input data.
run_manager (Optional[CallbackManagerForChainRun], optional):
The callback manager.
Returns:
Dict[str, Any]: The computed score.
"""
vectors = np.array(self.embeddings.embed_documents([inputs['prediction'],
inputs['reference']]))
score = self._compute_score(vectors)
return {'score': score}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Compute the score for a prediction and reference.
Args:
inputs (Dict[str, Any]): The input data.
run_manager (Optional[CallbackManagerForChainRun], optional):
The callback manager.
Returns:
Dict[str, Any]: The computed score.
"""
vectors = np.array(self.embeddings.embed_documents([inputs['prediction'
], inputs['reference']]))
score = self._compute_score(vectors)
return {'score': score}
|
Compute the score for a prediction and reference.
Args:
inputs (Dict[str, Any]): The input data.
run_manager (Optional[CallbackManagerForChainRun], optional):
The callback manager.
Returns:
Dict[str, Any]: The computed score.
|
create_collection
|
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(session, self.collection_name, cmetadata=
self.collection_metadata)
|
def create_collection(self) ->None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(session, self.collection_name,
cmetadata=self.collection_metadata)
| null |
save
|
if self.example_selector:
raise ValueError('Saving an example selector is not currently supported')
return super().save(file_path)
|
def save(self, file_path: Union[Path, str]) ->None:
if self.example_selector:
raise ValueError(
'Saving an example selector is not currently supported')
return super().save(file_path)
| null |
create_pbi_chat_agent
|
"""Construct a Power BI agent from a Chat LLM and tools.
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
"""
from langchain.agents import AgentExecutor
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.memory import ConversationBufferMemory
if toolkit is None:
if powerbi is None:
raise ValueError('Must provide either a toolkit or powerbi dataset')
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools,
system_message=prefix.format(top_k=top_k).format(tables=tables),
human_message=suffix, input_variables=input_variables, callback_manager
=callback_manager, output_parser=output_parser, verbose=verbose, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, memory=memory or
ConversationBufferMemory(memory_key='chat_history', return_messages=
True), verbose=verbose, **agent_executor_kwargs or {})
|
def create_pbi_chat_agent(llm: BaseChatModel, toolkit: Optional[
PowerBIToolkit]=None, powerbi: Optional[PowerBIDataset]=None,
callback_manager: Optional[BaseCallbackManager]=None, output_parser:
Optional[AgentOutputParser]=None, prefix: str=POWERBI_CHAT_PREFIX,
suffix: str=POWERBI_CHAT_SUFFIX, examples: Optional[str]=None,
input_variables: Optional[List[str]]=None, memory: Optional[
BaseChatMemory]=None, top_k: int=10, verbose: bool=False,
agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any
) ->AgentExecutor:
"""Construct a Power BI agent from a Chat LLM and tools.
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
"""
from langchain.agents import AgentExecutor
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.memory import ConversationBufferMemory
if toolkit is None:
if powerbi is None:
raise ValueError('Must provide either a toolkit or powerbi dataset'
)
toolkit = PowerBIToolkit(powerbi=powerbi, llm=llm, examples=examples)
tools = toolkit.get_tools()
tables = powerbi.table_names if powerbi else toolkit.powerbi.table_names
agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools,
system_message=prefix.format(top_k=top_k).format(tables=tables),
human_message=suffix, input_variables=input_variables,
callback_manager=callback_manager, output_parser=output_parser,
verbose=verbose, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, memory=memory or
ConversationBufferMemory(memory_key='chat_history', return_messages
=True), verbose=verbose, **agent_executor_kwargs or {})
|
Construct a Power BI agent from a Chat LLM and tools.
If you supply only a toolkit and no Power BI dataset, the same LLM is used for both.
|
test_openai_streaming_best_of_error
|
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
OpenAI(best_of=2, streaming=True)
|
def test_openai_streaming_best_of_error() ->None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
OpenAI(best_of=2, streaming=True)
|
Test validation for streaming fails if best_of is not 1.
|
completion_with_retry_batching
|
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs['prompt']
del kwargs['prompt']
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(prompt: str) ->Any:
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
def batch_sync_run() ->List:
with ThreadPoolExecutor() as executor:
results = list(executor.map(_completion_with_retry, prompt))
return results
return batch_sync_run()
|
def completion_with_retry_batching(llm: Fireworks, use_retry: bool, *,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs['prompt']
del kwargs['prompt']
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(prompt: str) ->Any:
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
def batch_sync_run() ->List:
with ThreadPoolExecutor() as executor:
results = list(executor.map(_completion_with_retry, prompt))
return results
return batch_sync_run()
|
Use tenacity to retry the completion call.
|
_import_singlestoredb
|
from langchain_community.vectorstores.singlestoredb import SingleStoreDB
return SingleStoreDB
|
def _import_singlestoredb() ->Any:
from langchain_community.vectorstores.singlestoredb import SingleStoreDB
return SingleStoreDB
| null |
test_custom_base_prompt_fail
|
"""Test validating an invalid custom prompt."""
base_prompt = 'Test. {zapier_description}.'
with pytest.raises(ValueError):
ZapierNLARunAction(action_id='test', zapier_description='test', params=
{'test': 'test'}, base_prompt=base_prompt, api_wrapper=
ZapierNLAWrapper(zapier_nla_api_key='test'))
|
def test_custom_base_prompt_fail() ->None:
"""Test validating an invalid custom prompt."""
base_prompt = 'Test. {zapier_description}.'
with pytest.raises(ValueError):
ZapierNLARunAction(action_id='test', zapier_description='test',
params={'test': 'test'}, base_prompt=base_prompt, api_wrapper=
ZapierNLAWrapper(zapier_nla_api_key='test'))
|
Test validating an invalid custom prompt.
|
from_llm
|
"""Load from LLM."""
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
return cls(llm_chain=llm_chain, objective=objective, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, objective: str, **kwargs: Any
) ->NatBotChain:
"""Load from LLM."""
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
return cls(llm_chain=llm_chain, objective=objective, **kwargs)
|
Load from LLM.
|
escape_special_characters
|
"""Escapes any special characters in `prompt`"""
escape_map = {'\\': '\\\\', '"': '\\"', '\x08': '\\b', '\x0c': '\\f', '\n':
'\\n', '\r': '\\r', '\t': '\\t'}
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
|
@staticmethod
def escape_special_characters(prompt: str) ->str:
"""Escapes any special characters in `prompt`"""
escape_map = {'\\': '\\\\', '"': '\\"', '\x08': '\\b', '\x0c': '\\f',
'\n': '\\n', '\r': '\\r', '\t': '\\t'}
for escape_sequence, escaped_sequence in escape_map.items():
prompt = prompt.replace(escape_sequence, escaped_sequence)
return prompt
|
Escapes any special characters in `prompt`
|
validate_environment
|
values['hunyuan_api_base'] = get_from_dict_or_env(values,
'hunyuan_api_base', 'HUNYUAN_API_BASE', DEFAULT_API_BASE)
values['hunyuan_app_id'] = get_from_dict_or_env(values, 'hunyuan_app_id',
'HUNYUAN_APP_ID')
values['hunyuan_secret_id'] = get_from_dict_or_env(values,
'hunyuan_secret_id', 'HUNYUAN_SECRET_ID')
values['hunyuan_secret_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'hunyuan_secret_key', 'HUNYUAN_SECRET_KEY'))
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
values['hunyuan_api_base'] = get_from_dict_or_env(values,
'hunyuan_api_base', 'HUNYUAN_API_BASE', DEFAULT_API_BASE)
values['hunyuan_app_id'] = get_from_dict_or_env(values,
'hunyuan_app_id', 'HUNYUAN_APP_ID')
values['hunyuan_secret_id'] = get_from_dict_or_env(values,
'hunyuan_secret_id', 'HUNYUAN_SECRET_ID')
values['hunyuan_secret_key'] = convert_to_secret_str(get_from_dict_or_env
(values, 'hunyuan_secret_key', 'HUNYUAN_SECRET_KEY'))
return values
| null |
_chain_type
|
return 'llm_symbolic_math_chain'
|
@property
def _chain_type(self) ->str:
return 'llm_symbolic_math_chain'
| null |
_human_assistant_format
|
if input_text.count('Human:') == 0 or input_text.find('Human:'
) > input_text.find('Assistant:') and 'Assistant:' in input_text:
input_text = HUMAN_PROMPT + ' ' + input_text
if input_text.count('Assistant:') == 0:
input_text = input_text + ASSISTANT_PROMPT
if input_text[:len('Human:')] == 'Human:':
input_text = '\n\n' + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
for i in range(len(input_text)):
if input_text[i:i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f' Received {input_text}')
if input_text[i:i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f' Received {input_text}')
if count % 2 == 1:
input_text = input_text + ASSISTANT_PROMPT
return input_text
|
def _human_assistant_format(input_text: str) ->str:
if input_text.count('Human:') == 0 or input_text.find('Human:'
) > input_text.find('Assistant:') and 'Assistant:' in input_text:
input_text = HUMAN_PROMPT + ' ' + input_text
if input_text.count('Assistant:') == 0:
input_text = input_text + ASSISTANT_PROMPT
if input_text[:len('Human:')] == 'Human:':
input_text = '\n\n' + input_text
input_text = _add_newlines_before_ha(input_text)
count = 0
for i in range(len(input_text)):
if input_text[i:i + len(HUMAN_PROMPT)] == HUMAN_PROMPT:
if count % 2 == 0:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f' Received {input_text}')
if input_text[i:i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
warnings.warn(ALTERNATION_ERROR + f' Received {input_text}')
if count % 2 == 1:
input_text = input_text + ASSISTANT_PROMPT
return input_text
| null |
test_analyticdb_with_metadatas_with_scores
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_analyticdb_with_metadatas_with_scores() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
test_visit_structured_query
|
from timescale_vector import client
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1)
expected = query, {'predicates': client.Predicates(('foo', '<', 1))}
structured_query = StructuredQuery(query=query, filter=comp)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.GT, attribute='abc', value=2.0)])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'predicates': client.Predicates(client.Predicates(('foo',
'<', 2)), client.Predicates(('bar', '==', 'baz')), client.Predicates((
'abc', '>', 2.0)))}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
@pytest.mark.requires('timescale_vector')
def test_visit_structured_query() ->None:
from timescale_vector import client
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1)
expected = query, {'predicates': client.Predicates(('foo', '<', 1))}
structured_query = StructuredQuery(query=query, filter=comp)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.GT, attribute='abc', value=2.0)])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'predicates': client.Predicates(client.Predicates((
'foo', '<', 2)), client.Predicates(('bar', '==', 'baz')), client.
Predicates(('abc', '>', 2.0)))}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
from_llm
|
assert sql_cmd_parser, '`sql_cmd_parser` must be set in VectorSQLDatabaseChain.'
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, sql_cmd_parser=sql_cmd_parser,
**kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, db: SQLDatabase, prompt: Optional
[BasePromptTemplate]=None, sql_cmd_parser: Optional[
VectorSQLOutputParser]=None, **kwargs: Any) ->VectorSQLDatabaseChain:
assert sql_cmd_parser, '`sql_cmd_parser` must be set in VectorSQLDatabaseChain.'
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, sql_cmd_parser=
sql_cmd_parser, **kwargs)
| null |
from_existing_graph
|
"""
Initialize and return a Neo4jVector instance from an existing graph.
This method initializes a Neo4jVector instance using the provided
parameters and the existing graph. It validates the existence of
the indices and creates new ones if they don't exist.
Returns:
Neo4jVector: An instance of Neo4jVector initialized with the provided parameters
and existing graph.
Example:
>>> neo4j_vector = Neo4jVector.from_existing_graph(
... embedding=my_embedding,
... node_label="Document",
... embedding_node_property="embedding",
... text_node_properties=["title", "content"]
... )
Note:
Neo4j credentials are required in the form of `url`, `username`, and `password`,
and optional `database` parameters passed as additional keyword arguments.
"""
if not text_node_properties:
raise ValueError(
'Parameter `text_node_properties` must not be an empty list')
if not retrieval_query:
retrieval_query = (
f"RETURN reduce(str='', k IN {text_node_properties} | str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, node {{.*, `"
+ embedding_node_property + '`: Null, id: Null, ' + ', '.join([
f'`{prop}`: Null' for prop in text_node_properties]) +
'} AS metadata, score')
store = cls(embedding=embedding, index_name=index_name, keyword_index_name=
keyword_index_name, search_type=search_type, retrieval_query=
retrieval_query, node_label=node_label, embedding_node_property=
embedding_node_property, **kwargs)
embedding_dimension = store.retrieve_existing_index()
if not embedding_dimension:
store.create_new_index()
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"""Index with name {store.index_name} already exists.The provided embedding function and vector index dimensions do not match.
Embedding function dimension: {store.embedding_dimension}
Vector index dimension: {embedding_dimension}"""
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index(text_node_properties)
if not fts_node_label:
store.create_new_keyword_index(text_node_properties)
elif not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label")
while True:
fetch_query = (
f"MATCH (n:`{node_label}`) WHERE n.{embedding_node_property} IS null AND any(k in $props WHERE n[k] IS NOT null) RETURN elementId(n) AS id, reduce(str='',k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text LIMIT 1000"
)
data = store.query(fetch_query, params={'props': text_node_properties})
text_embeddings = embedding.embed_documents([el['text'] for el in data])
params = {'data': [{'id': el['id'], 'embedding': embedding} for el,
embedding in zip(data, text_embeddings)]}
store.query(
f"UNWIND $data AS row MATCH (n:`{node_label}`) WHERE elementId(n) = row.id CALL db.create.setVectorProperty(n, '{embedding_node_property}', row.embedding) YIELD node RETURN count(*)"
, params=params)
if len(data) < 1000:
break
return store
|
@classmethod
def from_existing_graph(cls: Type[Neo4jVector], embedding: Embeddings,
node_label: str, embedding_node_property: str, text_node_properties:
List[str], *, keyword_index_name: Optional[str]='keyword', index_name:
str='vector', search_type: SearchType=DEFAULT_SEARCH_TYPE,
retrieval_query: str='', **kwargs: Any) ->Neo4jVector:
"""
Initialize and return a Neo4jVector instance from an existing graph.
This method initializes a Neo4jVector instance using the provided
parameters and the existing graph. It validates the existence of
the indices and creates new ones if they don't exist.
Returns:
Neo4jVector: An instance of Neo4jVector initialized with the provided parameters
and existing graph.
Example:
>>> neo4j_vector = Neo4jVector.from_existing_graph(
... embedding=my_embedding,
... node_label="Document",
... embedding_node_property="embedding",
... text_node_properties=["title", "content"]
... )
Note:
Neo4j credentials are required in the form of `url`, `username`, and `password`,
and optional `database` parameters passed as additional keyword arguments.
"""
if not text_node_properties:
raise ValueError(
'Parameter `text_node_properties` must not be an empty list')
if not retrieval_query:
retrieval_query = (
f"RETURN reduce(str='', k IN {text_node_properties} | str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, node {{.*, `"
+ embedding_node_property + '`: Null, id: Null, ' + ', '.join(
[f'`{prop}`: Null' for prop in text_node_properties]) +
'} AS metadata, score')
store = cls(embedding=embedding, index_name=index_name,
keyword_index_name=keyword_index_name, search_type=search_type,
retrieval_query=retrieval_query, node_label=node_label,
embedding_node_property=embedding_node_property, **kwargs)
embedding_dimension = store.retrieve_existing_index()
if not embedding_dimension:
store.create_new_index()
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"""Index with name {store.index_name} already exists.The provided embedding function and vector index dimensions do not match.
Embedding function dimension: {store.embedding_dimension}
Vector index dimension: {embedding_dimension}"""
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index(text_node_properties
)
if not fts_node_label:
store.create_new_keyword_index(text_node_properties)
elif not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label")
while True:
fetch_query = (
f"MATCH (n:`{node_label}`) WHERE n.{embedding_node_property} IS null AND any(k in $props WHERE n[k] IS NOT null) RETURN elementId(n) AS id, reduce(str='',k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text LIMIT 1000"
)
data = store.query(fetch_query, params={'props': text_node_properties})
text_embeddings = embedding.embed_documents([el['text'] for el in data]
)
params = {'data': [{'id': el['id'], 'embedding': embedding} for el,
embedding in zip(data, text_embeddings)]}
store.query(
f"UNWIND $data AS row MATCH (n:`{node_label}`) WHERE elementId(n) = row.id CALL db.create.setVectorProperty(n, '{embedding_node_property}', row.embedding) YIELD node RETURN count(*)"
, params=params)
if len(data) < 1000:
break
return store
|
Initialize and return a Neo4jVector instance from an existing graph.
This method initializes a Neo4jVector instance using the provided
parameters and the existing graph. It validates the existence of
the indices and creates new ones if they don't exist.
Returns:
Neo4jVector: An instance of Neo4jVector initialized with the provided parameters
and existing graph.
Example:
>>> neo4j_vector = Neo4jVector.from_existing_graph(
... embedding=my_embedding,
... node_label="Document",
... embedding_node_property="embedding",
... text_node_properties=["title", "content"]
... )
Note:
Neo4j credentials are required in the form of `url`, `username`, and `password`,
and optional `database` parameters passed as additional keyword arguments.
|
_async_retry_decorator
|
import openai
min_seconds = 4
max_seconds = 10
async_retrying = AsyncRetrying(reraise=True, stop=stop_after_attempt(
embeddings.max_retries), wait=wait_exponential(multiplier=1, min=
min_seconds, max=max_seconds), retry=retry_if_exception_type(openai.
error.Timeout) | retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
def wrap(func: Callable) ->Callable:
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
return wrap
|
def _async_retry_decorator(embeddings: LocalAIEmbeddings) ->Any:
import openai
min_seconds = 4
max_seconds = 10
async_retrying = AsyncRetrying(reraise=True, stop=stop_after_attempt(
embeddings.max_retries), wait=wait_exponential(multiplier=1, min=
min_seconds, max=max_seconds), retry=retry_if_exception_type(openai
.error.Timeout) | retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
def wrap(func: Callable) ->Callable:
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
return wrap
| null |
test_serializable_mapping
|
serializable_modules = import_all_modules('langchain')
missing = set(SERIALIZABLE_MAPPING).difference(serializable_modules)
assert missing == set()
extra = set(serializable_modules).difference(SERIALIZABLE_MAPPING)
assert extra == set()
for k, import_path in serializable_modules.items():
import_dir, import_obj = import_path[:-1], import_path[-1]
mod = importlib.import_module('.'.join(import_dir))
cls = getattr(mod, import_obj)
assert list(k) == cls.lc_id()
|
def test_serializable_mapping() ->None:
serializable_modules = import_all_modules('langchain')
missing = set(SERIALIZABLE_MAPPING).difference(serializable_modules)
assert missing == set()
extra = set(serializable_modules).difference(SERIALIZABLE_MAPPING)
assert extra == set()
for k, import_path in serializable_modules.items():
import_dir, import_obj = import_path[:-1], import_path[-1]
mod = importlib.import_module('.'.join(import_dir))
cls = getattr(mod, import_obj)
assert list(k) == cls.lc_id()
| null |
validate_environment
|
"""Validate that iam token exists in environment."""
iam_token = get_from_dict_or_env(values, 'iam_token', 'YC_IAM_TOKEN', '')
values['iam_token'] = iam_token
api_key = get_from_dict_or_env(values, 'api_key', 'YC_API_KEY', '')
values['api_key'] = api_key
folder_id = get_from_dict_or_env(values, 'folder_id', 'YC_FOLDER_ID', '')
values['folder_id'] = folder_id
if api_key == '' and iam_token == '':
raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.")
if values['iam_token']:
values['_grpc_metadata'] = [('authorization',
f"Bearer {values['iam_token']}")]
if values['folder_id']:
values['_grpc_metadata'].append(('x-folder-id', values['folder_id']))
else:
values['_grpc_metadata'] = ('authorization', f"Api-Key {values['api_key']}"
),
if values['model_uri'] == '' and values['folder_id'] == '':
raise ValueError("Either 'model_uri' or 'folder_id' must be provided.")
if not values['model_uri']:
values['model_uri'] = (
f"gpt://{values['folder_id']}/{values['model_name']}/{values['model_version']}"
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that iam token exists in environment."""
iam_token = get_from_dict_or_env(values, 'iam_token', 'YC_IAM_TOKEN', '')
values['iam_token'] = iam_token
api_key = get_from_dict_or_env(values, 'api_key', 'YC_API_KEY', '')
values['api_key'] = api_key
folder_id = get_from_dict_or_env(values, 'folder_id', 'YC_FOLDER_ID', '')
values['folder_id'] = folder_id
if api_key == '' and iam_token == '':
raise ValueError(
"Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.")
if values['iam_token']:
values['_grpc_metadata'] = [('authorization',
f"Bearer {values['iam_token']}")]
if values['folder_id']:
values['_grpc_metadata'].append(('x-folder-id', values[
'folder_id']))
else:
values['_grpc_metadata'] = ('authorization',
f"Api-Key {values['api_key']}"),
if values['model_uri'] == '' and values['folder_id'] == '':
raise ValueError("Either 'model_uri' or 'folder_id' must be provided.")
if not values['model_uri']:
values['model_uri'] = (
f"gpt://{values['folder_id']}/{values['model_name']}/{values['model_version']}"
)
return values
|
Validate that iam token exists in environment.
|
get_metadata
|
"""Get metadata for the given entry."""
publication = entry.get('journal') or entry.get('booktitle')
if 'url' in entry:
url = entry['url']
elif 'doi' in entry:
url = f"https://doi.org/{entry['doi']}"
else:
url = None
meta = {'id': entry.get('ID'), 'published_year': entry.get('year'), 'title':
entry.get('title'), 'publication': publication, 'authors': entry.get(
'author'), 'abstract': entry.get('abstract'), 'url': url}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None}
|
def get_metadata(self, entry: Mapping[str, Any], load_extra: bool=False
) ->Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get('journal') or entry.get('booktitle')
if 'url' in entry:
url = entry['url']
elif 'doi' in entry:
url = f"https://doi.org/{entry['doi']}"
else:
url = None
meta = {'id': entry.get('ID'), 'published_year': entry.get('year'),
'title': entry.get('title'), 'publication': publication, 'authors':
entry.get('author'), 'abstract': entry.get('abstract'), 'url': url}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None}
|
Get metadata for the given entry.
|
test_api_key_masked_when_passed_via_constructor
|
llm = Petals(huggingface_api_key='secret-api-key')
print(llm.huggingface_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = Petals(huggingface_api_key='secret-api-key')
print(llm.huggingface_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_missing_url_raises_validation_error
|
with self.assertRaises(ValueError) as cm:
RSpaceLoader(api_key=TestRSpaceLoader.api_key, global_id=
TestRSpaceLoader.global_id)
e = cm.exception
self.assertRegex(str(e), 'Did not find url')
|
def test_missing_url_raises_validation_error(self) ->None:
with self.assertRaises(ValueError) as cm:
RSpaceLoader(api_key=TestRSpaceLoader.api_key, global_id=
TestRSpaceLoader.global_id)
e = cm.exception
self.assertRegex(str(e), 'Did not find url')
| null |
test_chat_google_raises_with_invalid_top_p
|
pytest.importorskip('google.generativeai')
with pytest.raises(ValueError) as e:
ChatGooglePalm(google_api_key='fake', top_p=2.0)
assert 'must be in the range' in str(e)
|
def test_chat_google_raises_with_invalid_top_p() ->None:
pytest.importorskip('google.generativeai')
with pytest.raises(ValueError) as e:
ChatGooglePalm(google_api_key='fake', top_p=2.0)
assert 'must be in the range' in str(e)
| null |
test_from_texts
|
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 3
|
def test_from_texts(texts: List[str]) ->None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 3
|
Test end to end construction and simple similarity search.
|
_on_chain_error
|
"""Process the Chain Run upon error."""
|
def _on_chain_error(self, run: Run) ->None:
"""Process the Chain Run upon error."""
|
Process the Chain Run upon error.
|
_import_office365_send_message
|
from langchain_community.tools.office365.send_message import O365SendMessage
return O365SendMessage
|
def _import_office365_send_message() ->Any:
from langchain_community.tools.office365.send_message import O365SendMessage
return O365SendMessage
| null |
__hash__
|
return hash((self.id, self.annotation))
|
def __hash__(self) ->int:
return hash((self.id, self.annotation))
| null |
_evaluate_strings
|
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
It is recommended that the dictionary contain the following keys:
- score: the score of the evaluation, if applicable.
- value: the string value of the evaluation, if applicable.
- reasoning: the reasoning for the evaluation, if applicable.
"""
|
@abstractmethod
def _evaluate_strings(self, *, prediction: Union[str, Any], reference:
Optional[Union[str, Any]]=None, input: Optional[Union[str, Any]]=None,
**kwargs: Any) ->dict:
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
It is recommended that the dictionary contain the following keys:
- score: the score of the evaluation, if applicable.
- value: the string value of the evaluation, if applicable.
- reasoning: the reasoning for the evaluation, if applicable.
"""
|
Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
It is recommended that the dictionary contain the following keys:
- score: the score of the evaluation, if applicable.
- value: the string value of the evaluation, if applicable.
- reasoning: the reasoning for the evaluation, if applicable.
|
input_keys
|
"""Will be whatever keys the prompt expects.
:meta private:
"""
return self.prompt.input_variables
|
@property
def input_keys(self) ->List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return self.prompt.input_variables
|
Will be whatever keys the prompt expects.
:meta private:
|
map
|
contexts = [copy_context() for _ in range(len(iterables[0]))]
def _wrapped_fn(*args: Any) ->T:
return contexts.pop().run(fn, *args)
return super().map(_wrapped_fn, *iterables, timeout=timeout, chunksize=
chunksize)
|
def map(self, fn: Callable[..., T], *iterables: Iterable[Any], timeout: (
float | None)=None, chunksize: int=1) ->Iterator[T]:
contexts = [copy_context() for _ in range(len(iterables[0]))]
def _wrapped_fn(*args: Any) ->T:
return contexts.pop().run(fn, *args)
return super().map(_wrapped_fn, *iterables, timeout=timeout, chunksize=
chunksize)
| null |
_get_default_output_parser
|
return MRKLOutputParser()
|
@classmethod
def _get_default_output_parser(cls, **kwargs: Any) ->AgentOutputParser:
return MRKLOutputParser()
| null |
load_messages
|
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError('Container not initialized')
try:
from azure.cosmos.exceptions import CosmosHttpResponseError
except ImportError as exc:
raise ImportError(
'You must install the azure-cosmos package to use the CosmosDBChatMessageHistory.Please install it with `pip install azure-cosmos`.'
) from exc
try:
item = self._container.read_item(item=self.session_id, partition_key=
self.user_id)
except CosmosHttpResponseError:
logger.info('no session found')
return
if 'messages' in item and len(item['messages']) > 0:
self.messages = messages_from_dict(item['messages'])
|
def load_messages(self) ->None:
"""Retrieve the messages from Cosmos"""
if not self._container:
raise ValueError('Container not initialized')
try:
from azure.cosmos.exceptions import CosmosHttpResponseError
except ImportError as exc:
raise ImportError(
'You must install the azure-cosmos package to use the CosmosDBChatMessageHistory.Please install it with `pip install azure-cosmos`.'
) from exc
try:
item = self._container.read_item(item=self.session_id,
partition_key=self.user_id)
except CosmosHttpResponseError:
logger.info('no session found')
return
if 'messages' in item and len(item['messages']) > 0:
self.messages = messages_from_dict(item['messages'])
|
Retrieve the messages from Cosmos
|
_extract_tokens_and_log_probs
|
tokens = []
log_probs = []
for gen in generations:
if gen.generation_info is None:
raise ValueError
tokens.extend(gen.generation_info['logprobs']['tokens'])
log_probs.extend(gen.generation_info['logprobs']['token_logprobs'])
return tokens, log_probs
|
def _extract_tokens_and_log_probs(self, generations: List[Generation]) ->Tuple[
Sequence[str], Sequence[float]]:
tokens = []
log_probs = []
for gen in generations:
if gen.generation_info is None:
raise ValueError
tokens.extend(gen.generation_info['logprobs']['tokens'])
log_probs.extend(gen.generation_info['logprobs']['token_logprobs'])
return tokens, log_probs
| null |
test_chat_google_genai_invoke_multimodal_too_many_messages
|
messages: list = [HumanMessage(content='Hi there'), AIMessage(content=
'Hi, how are you?'), HumanMessage(content=[{'type': 'text', 'text':
"I'm doing great! Guess what's in this picture!"}, {'type': 'image_url',
'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_VISION_MODEL)
with pytest.raises(ChatGoogleGenerativeAIError):
llm.invoke(messages)
|
def test_chat_google_genai_invoke_multimodal_too_many_messages() ->None:
messages: list = [HumanMessage(content='Hi there'), AIMessage(content=
'Hi, how are you?'), HumanMessage(content=[{'type': 'text', 'text':
"I'm doing great! Guess what's in this picture!"}, {'type':
'image_url', 'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_VISION_MODEL)
with pytest.raises(ChatGoogleGenerativeAIError):
llm.invoke(messages)
| null |
_import_requests_tool_RequestsPutTool
|
from langchain_community.tools.requests.tool import RequestsPutTool
return RequestsPutTool
|
def _import_requests_tool_RequestsPutTool() ->Any:
from langchain_community.tools.requests.tool import RequestsPutTool
return RequestsPutTool
| null |
_agent_type
|
raise ValueError
|
@property
def _agent_type(self) ->str:
raise ValueError
| null |
on_tool_start
|
pass
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
pass
| null |
jinja2_formatter
|
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
'jinja2 not installed, which is needed to use the jinja2_formatter. Please install it with `pip install jinja2`.Please be cautious when using jinja2 templates. Do not expand jinja2 templates using unverified or user-controlled inputs as that can result in arbitrary Python code execution.'
)
return SandboxedEnvironment().from_string(template).render(**kwargs)
|
def jinja2_formatter(template: str, **kwargs: Any) ->str:
"""Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
"""
try:
from jinja2.sandbox import SandboxedEnvironment
except ImportError:
raise ImportError(
'jinja2 not installed, which is needed to use the jinja2_formatter. Please install it with `pip install jinja2`.Please be cautious when using jinja2 templates. Do not expand jinja2 templates using unverified or user-controlled inputs as that can result in arbitrary Python code execution.'
)
return SandboxedEnvironment().from_string(template).render(**kwargs)
|
Format a template using jinja2.
*Security warning*: As of LangChain 0.0.329, this method uses Jinja2's
SandboxedEnvironment by default. However, this sand-boxing should
be treated as a best-effort approach rather than a guarantee of security.
Do not accept jinja2 templates from untrusted sources as they may lead
to arbitrary Python code execution.
https://jinja.palletsprojects.com/en/3.1.x/sandbox/
|
llm_chain
|
"""llm_chain is legacy name kept for backwards compatibility."""
return self.query_constructor
|
@property
def llm_chain(self) ->Runnable:
"""llm_chain is legacy name kept for backwards compatibility."""
return self.query_constructor
|
llm_chain is legacy name kept for backwards compatibility.
|
_batch
|
"""
splits Lists of text parts into batches of size max `self._batch_size`
When encoding vector database,
Args:
texts (List[str]): List of sentences
self._batch_size (int, optional): max batch size of one request.
Returns:
List[List[str]]: Batches of List of sentences
"""
if len(texts) == 1:
return [texts]
batches = []
for start_index in range(0, len(texts), self._batch_size):
batches.append(texts[start_index:start_index + self._batch_size])
return batches
|
def _batch(self, texts: List[str]) ->List[List[str]]:
"""
splits Lists of text parts into batches of size max `self._batch_size`
When encoding vector database,
Args:
texts (List[str]): List of sentences
self._batch_size (int, optional): max batch size of one request.
Returns:
List[List[str]]: Batches of List of sentences
"""
if len(texts) == 1:
return [texts]
batches = []
for start_index in range(0, len(texts), self._batch_size):
batches.append(texts[start_index:start_index + self._batch_size])
return batches
|
splits Lists of text parts into batches of size max `self._batch_size`
When encoding vector database,
Args:
texts (List[str]): List of sentences
self._batch_size (int, optional): max batch size of one request.
Returns:
List[List[str]]: Batches of List of sentences
|
_import_playwright_GetElementsTool
|
from langchain_community.tools.playwright import GetElementsTool
return GetElementsTool
|
def _import_playwright_GetElementsTool() ->Any:
from langchain_community.tools.playwright import GetElementsTool
return GetElementsTool
| null |
_create_llm_result
|
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get('results')
if results:
finish_reason = results[0].get('stop_reason')
gen = Generation(text=results[0].get('generated_text'),
generation_info={'finish_reason': finish_reason})
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
llm_output = {'token_usage': final_token_usage, 'model_id': self.model_id,
'deployment_id': self.deployment_id}
return LLMResult(generations=generations, llm_output=llm_output)
|
def _create_llm_result(self, response: List[dict]) ->LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get('results')
if results:
finish_reason = results[0].get('stop_reason')
gen = Generation(text=results[0].get('generated_text'),
generation_info={'finish_reason': finish_reason})
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
llm_output = {'token_usage': final_token_usage, 'model_id': self.
model_id, 'deployment_id': self.deployment_id}
return LLMResult(generations=generations, llm_output=llm_output)
|
Create the LLMResult from the choices and prompts.
|
test_anonymize_with_custom_operator
|
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
custom_operator = {'PERSON': OperatorConfig('replace', {'new_value': 'NAME'})}
anonymizer = PresidioAnonymizer(operators=custom_operator)
text = 'Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'NAME was here.'
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_anonymize_with_custom_operator() ->None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioAnonymizer
custom_operator = {'PERSON': OperatorConfig('replace', {'new_value':
'NAME'})}
anonymizer = PresidioAnonymizer(operators=custom_operator)
text = 'Jane Doe was here.'
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == 'NAME was here.'
|
Test anonymize a name with a custom operator
|
_prepare_and_validate_batches
|
"""Prepares text batches with one-time validation of batch size.
Batch size varies between GCP regions and individual project quotas.
# Returns embeddings of the first text batch that went through,
# and text batches for the rest of the texts.
"""
from google.api_core.exceptions import InvalidArgument
batches = VertexAIEmbeddings._prepare_batches(texts, self.instance[
'batch_size'])
if len(batches[0]) <= self.instance['min_good_batch_size']:
return [], batches
with self.instance['lock']:
if self.instance['batch_size_validated']:
if len(batches[0]) <= self.instance['batch_size']:
return [], batches
else:
return [], VertexAIEmbeddings._prepare_batches(texts, self.
instance['batch_size'])
first_batch = batches[0]
first_result = []
had_failure = False
while True:
try:
first_result = self._get_embeddings_with_retry(first_batch,
embeddings_type)
break
except InvalidArgument:
had_failure = True
first_batch_len = len(first_batch)
if first_batch_len == self.instance['min_batch_size']:
raise
first_batch_len = max(self.instance['min_batch_size'], int(
first_batch_len / 2))
first_batch = first_batch[:first_batch_len]
first_batch_len = len(first_batch)
self.instance['min_good_batch_size'] = max(self.instance[
'min_good_batch_size'], first_batch_len)
if had_failure or first_batch_len == self.instance['max_batch_size']:
self.instance['batch_size'] = first_batch_len
self.instance['batch_size_validated'] = True
if first_batch_len != self.instance['max_batch_size']:
batches = VertexAIEmbeddings._prepare_batches(texts[
first_batch_len:], self.instance['batch_size'])
else:
batches = batches[1:]
return first_result, batches
|
def _prepare_and_validate_batches(self, texts: List[str], embeddings_type:
Optional[str]=None) ->Tuple[List[List[float]], List[List[str]]]:
"""Prepares text batches with one-time validation of batch size.
Batch size varies between GCP regions and individual project quotas.
# Returns embeddings of the first text batch that went through,
# and text batches for the rest of the texts.
"""
from google.api_core.exceptions import InvalidArgument
batches = VertexAIEmbeddings._prepare_batches(texts, self.instance[
'batch_size'])
if len(batches[0]) <= self.instance['min_good_batch_size']:
return [], batches
with self.instance['lock']:
if self.instance['batch_size_validated']:
if len(batches[0]) <= self.instance['batch_size']:
return [], batches
else:
return [], VertexAIEmbeddings._prepare_batches(texts, self.
instance['batch_size'])
first_batch = batches[0]
first_result = []
had_failure = False
while True:
try:
first_result = self._get_embeddings_with_retry(first_batch,
embeddings_type)
break
except InvalidArgument:
had_failure = True
first_batch_len = len(first_batch)
if first_batch_len == self.instance['min_batch_size']:
raise
first_batch_len = max(self.instance['min_batch_size'], int(
first_batch_len / 2))
first_batch = first_batch[:first_batch_len]
first_batch_len = len(first_batch)
self.instance['min_good_batch_size'] = max(self.instance[
'min_good_batch_size'], first_batch_len)
if had_failure or first_batch_len == self.instance['max_batch_size']:
self.instance['batch_size'] = first_batch_len
self.instance['batch_size_validated'] = True
if first_batch_len != self.instance['max_batch_size']:
batches = VertexAIEmbeddings._prepare_batches(texts[
first_batch_len:], self.instance['batch_size'])
else:
batches = batches[1:]
return first_result, batches
|
Prepares text batches with one-time validation of batch size.
Batch size varies between GCP regions and individual project quotas.
# Returns embeddings of the first text batch that went through,
# and text batches for the rest of the texts.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.