method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_import_sql_database_tool_BaseSQLDatabaseTool
|
from langchain_community.tools.sql_database.tool import BaseSQLDatabaseTool
return BaseSQLDatabaseTool
|
def _import_sql_database_tool_BaseSQLDatabaseTool() ->Any:
from langchain_community.tools.sql_database.tool import BaseSQLDatabaseTool
return BaseSQLDatabaseTool
| null |
test_adding_document_already_exists
|
"""Test that error is raised if document id already exists."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
new_dict = {'foo': Document(page_content='foo')}
with pytest.raises(ValueError):
docstore.add(new_dict)
bar_output = docstore.search('foo')
assert isinstance(bar_output, Document)
assert bar_output.page_content == 'bar'
|
def test_adding_document_already_exists() ->None:
"""Test that error is raised if document id already exists."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
new_dict = {'foo': Document(page_content='foo')}
with pytest.raises(ValueError):
docstore.add(new_dict)
bar_output = docstore.search('foo')
assert isinstance(bar_output, Document)
assert bar_output.page_content == 'bar'
|
Test that error is raised if document id already exists.
|
test_ensemble_retriever_get_relevant_docs
|
doc_list = ['I like apples', 'I like oranges', 'Apples and oranges are fruits']
dummy_retriever = BM25Retriever.from_texts(doc_list)
dummy_retriever.k = 1
ensemble_retriever = EnsembleRetriever(retrievers=[dummy_retriever,
dummy_retriever])
docs = ensemble_retriever.get_relevant_documents('I like apples')
assert len(docs) == 1
|
@pytest.mark.requires('rank_bm25')
def test_ensemble_retriever_get_relevant_docs() ->None:
doc_list = ['I like apples', 'I like oranges',
'Apples and oranges are fruits']
dummy_retriever = BM25Retriever.from_texts(doc_list)
dummy_retriever.k = 1
ensemble_retriever = EnsembleRetriever(retrievers=[dummy_retriever,
dummy_retriever])
docs = ensemble_retriever.get_relevant_documents('I like apples')
assert len(docs) == 1
| null |
get_sql_model_class
|
"""Get the SQLAlchemy model class."""
raise NotImplementedError
|
@abstractmethod
def get_sql_model_class(self) ->Any:
"""Get the SQLAlchemy model class."""
raise NotImplementedError
|
Get the SQLAlchemy model class.
|
test_query_chain
|
"""
Test query chain correctly transforms
the LLM's text completion into a query-like object.
"""
query_chain = QueryChain.from_univariate_prompt(llm=self.fake_llm)
output = query_chain('how many pets does jan have? ')
expected_output = {'chain_answer': None, 'chain_data': QueryModel(
narrative_input='how many pets does jan have? ', llm_error_msg='',
expression="SELECT name, value FROM df WHERE name = 'jan'"),
'narrative_input': 'how many pets does jan have? '}
assert output == expected_output
|
def test_query_chain(self) ->None:
"""
Test query chain correctly transforms
the LLM's text completion into a query-like object.
"""
query_chain = QueryChain.from_univariate_prompt(llm=self.fake_llm)
output = query_chain('how many pets does jan have? ')
expected_output = {'chain_answer': None, 'chain_data': QueryModel(
narrative_input='how many pets does jan have? ', llm_error_msg='',
expression="SELECT name, value FROM df WHERE name = 'jan'"),
'narrative_input': 'how many pets does jan have? '}
assert output == expected_output
|
Test query chain correctly transforms
the LLM's text completion into a query-like object.
|
test_calls_convert_agent_action_to_messages
|
additional_kwargs1 = {'function_call': {'name': 'tool1', 'arguments': 'input1'}
}
message1 = AIMessage(content='', additional_kwargs=additional_kwargs1)
action1 = AgentActionMessageLog(tool='tool1', tool_input='input1', log=
'log1', message_log=[message1])
additional_kwargs2 = {'function_call': {'name': 'tool2', 'arguments': 'input2'}
}
message2 = AIMessage(content='', additional_kwargs=additional_kwargs2)
action2 = AgentActionMessageLog(tool='tool2', tool_input='input2', log=
'log2', message_log=[message2])
additional_kwargs3 = {'function_call': {'name': 'tool3', 'arguments': 'input3'}
}
message3 = AIMessage(content='', additional_kwargs=additional_kwargs3)
action3 = AgentActionMessageLog(tool='tool3', tool_input='input3', log=
'log3', message_log=[message3])
intermediate_steps = [(action1, 'observation1'), (action2, 'observation2'),
(action3, 'observation3')]
expected_messages = [message1, FunctionMessage(name='tool1', content=
'observation1'), message2, FunctionMessage(name='tool2', content=
'observation2'), message3, FunctionMessage(name='tool3', content=
'observation3')]
output = format_to_openai_function_messages(intermediate_steps)
assert output == expected_messages
|
def test_calls_convert_agent_action_to_messages() ->None:
additional_kwargs1 = {'function_call': {'name': 'tool1', 'arguments':
'input1'}}
message1 = AIMessage(content='', additional_kwargs=additional_kwargs1)
action1 = AgentActionMessageLog(tool='tool1', tool_input='input1', log=
'log1', message_log=[message1])
additional_kwargs2 = {'function_call': {'name': 'tool2', 'arguments':
'input2'}}
message2 = AIMessage(content='', additional_kwargs=additional_kwargs2)
action2 = AgentActionMessageLog(tool='tool2', tool_input='input2', log=
'log2', message_log=[message2])
additional_kwargs3 = {'function_call': {'name': 'tool3', 'arguments':
'input3'}}
message3 = AIMessage(content='', additional_kwargs=additional_kwargs3)
action3 = AgentActionMessageLog(tool='tool3', tool_input='input3', log=
'log3', message_log=[message3])
intermediate_steps = [(action1, 'observation1'), (action2,
'observation2'), (action3, 'observation3')]
expected_messages = [message1, FunctionMessage(name='tool1', content=
'observation1'), message2, FunctionMessage(name='tool2', content=
'observation2'), message3, FunctionMessage(name='tool3', content=
'observation3')]
output = format_to_openai_function_messages(intermediate_steps)
assert output == expected_messages
| null |
_llm_type
|
return 'orca-style'
|
@property
def _llm_type(self) ->str:
return 'orca-style'
| null |
_call
|
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.'
) from e
data: Dict[str, Any] = {'prompt': prompt, **self.params.dict() if self.
params else {}}
if (s := stop or (self.params.stop if self.params else None)):
data['stop'] = s
resp = mlflow.gateway.query(self.route, data=data)
return resp['candidates'][0]['text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.'
) from e
data: Dict[str, Any] = {'prompt': prompt, **self.params.dict() if self.
params else {}}
if (s := stop or (self.params.stop if self.params else None)):
data['stop'] = s
resp = mlflow.gateway.query(self.route, data=data)
return resp['candidates'][0]['text']
| null |
_import_zilliz
|
from langchain_community.vectorstores.zilliz import Zilliz
return Zilliz
|
def _import_zilliz() ->Any:
from langchain_community.vectorstores.zilliz import Zilliz
return Zilliz
| null |
_identifying_params
|
"""Get the identifying parameters."""
return self._default_params
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return self._default_params
|
Get the identifying parameters.
|
_get_embedding
|
np.random.seed(seed)
return list(np.random.normal(size=self.size))
|
def _get_embedding(self, seed: int) ->List[float]:
np.random.seed(seed)
return list(np.random.normal(size=self.size))
| null |
on_llm_start
|
self.on_llm_start_common()
|
def on_llm_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_llm_start_common()
| null |
format_request_payload
|
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'input_data': {'input_string': [f'"{prompt}"'
]}, 'parameters': model_kwargs})
return str.encode(request_payload)
|
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'input_data': {'input_string': [
f'"{prompt}"']}, 'parameters': model_kwargs})
return str.encode(request_payload)
| null |
test_public_api
|
"""Test for regressions or changes in the public API."""
assert set(public_api) == set(_EXPECTED)
|
def test_public_api() ->None:
"""Test for regressions or changes in the public API."""
assert set(public_api) == set(_EXPECTED)
|
Test for regressions or changes in the public API.
|
_completion_with_retry
|
return _make_request(llm, **_kwargs)
|
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
return _make_request(llm, **_kwargs)
| null |
visit_comparison
|
field = f'metadata.{comparison.attribute}'
if comparison.comparator in [Comparator.LT, Comparator.LTE, Comparator.GT,
Comparator.GTE]:
return {'range': {field: {self._format_func(comparison.comparator):
comparison.value}}}
if comparison.comparator == Comparator.LIKE:
return {self._format_func(comparison.comparator): {field: {'value':
comparison.value}}}
field = f'{field}.keyword' if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}}
|
def visit_comparison(self, comparison: Comparison) ->Dict:
field = f'metadata.{comparison.attribute}'
if comparison.comparator in [Comparator.LT, Comparator.LTE, Comparator.
GT, Comparator.GTE]:
return {'range': {field: {self._format_func(comparison.comparator):
comparison.value}}}
if comparison.comparator == Comparator.LIKE:
return {self._format_func(comparison.comparator): {field: {'value':
comparison.value}}}
field = f'{field}.keyword' if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}
}
| null |
test_saving_loading_llm
|
"""Test saving/loading an AI21 LLM."""
llm = AI21(maxTokens=10)
llm.save(file_path=tmp_path / 'ai21.yaml')
loaded_llm = load_llm(tmp_path / 'ai21.yaml')
assert llm == loaded_llm
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an AI21 LLM."""
llm = AI21(maxTokens=10)
llm.save(file_path=tmp_path / 'ai21.yaml')
loaded_llm = load_llm(tmp_path / 'ai21.yaml')
assert llm == loaded_llm
|
Test saving/loading an AI21 LLM.
|
trim_query
|
"""Trim the query to only include Cypher keywords."""
keywords = ('CALL', 'CREATE', 'DELETE', 'DETACH', 'LIMIT', 'MATCH', 'MERGE',
'OPTIONAL', 'ORDER', 'REMOVE', 'RETURN', 'SET', 'SKIP', 'UNWIND',
'WITH', 'WHERE', '//')
lines = query.split('\n')
new_query = ''
for line in lines:
if line.strip().upper().startswith(keywords):
new_query += line + '\n'
return new_query
|
def trim_query(query: str) ->str:
"""Trim the query to only include Cypher keywords."""
keywords = ('CALL', 'CREATE', 'DELETE', 'DETACH', 'LIMIT', 'MATCH',
'MERGE', 'OPTIONAL', 'ORDER', 'REMOVE', 'RETURN', 'SET', 'SKIP',
'UNWIND', 'WITH', 'WHERE', '//')
lines = query.split('\n')
new_query = ''
for line in lines:
if line.strip().upper().startswith(keywords):
new_query += line + '\n'
return new_query
|
Trim the query to only include Cypher keywords.
|
_call
|
if self.sequential_responses:
response = self.queries[list(self.queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
else:
prompt = messages[0].content
return self.queries[prompt]
|
def _call(self, messages: List[BaseMessage], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
if self.sequential_responses:
response = self.queries[list(self.queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
else:
prompt = messages[0].content
return self.queries[prompt]
| null |
run
|
"""Get the current exchange rate for a specified currency pair."""
if to_currency not in self.standard_currencies:
from_currency, to_currency = to_currency, from_currency
data = self._get_exchange_rate(from_currency, to_currency)
return data['Realtime Currency Exchange Rate']
|
def run(self, from_currency: str, to_currency: str) ->str:
"""Get the current exchange rate for a specified currency pair."""
if to_currency not in self.standard_currencies:
from_currency, to_currency = to_currency, from_currency
data = self._get_exchange_rate(from_currency, to_currency)
return data['Realtime Currency Exchange Rate']
|
Get the current exchange rate for a specified currency pair.
|
get
|
"""Just return the specified output."""
return self.output
|
def get(self, url: str, **kwargs: Any) ->str:
"""Just return the specified output."""
return self.output
|
Just return the specified output.
|
_import_mosaicml
|
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
|
def _import_mosaicml() ->Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
| null |
test_gradient_llm_sync
|
with patch.dict(sys.modules, {'gradientai': MockGradientaiPackage()}):
embedder = GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL,
gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id=
_GRADIENT_WORKSPACE_ID, model=_MODEL_ID)
assert embedder.gradient_access_token == _GRADIENT_SECRET
assert embedder.gradient_api_url == _GRADIENT_BASE_URL
assert embedder.gradient_workspace_id == _GRADIENT_WORKSPACE_ID
assert embedder.model == _MODEL_ID
response = embedder.embed_documents(_DOCUMENTS)
want = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.1], [0.0, 0.9, 0.0], [1.0, 0.0,
0.1], [0.0, 0.9, 0.1]]
assert response == want
|
def test_gradient_llm_sync() ->None:
with patch.dict(sys.modules, {'gradientai': MockGradientaiPackage()}):
embedder = GradientEmbeddings(gradient_api_url=_GRADIENT_BASE_URL,
gradient_access_token=_GRADIENT_SECRET, gradient_workspace_id=
_GRADIENT_WORKSPACE_ID, model=_MODEL_ID)
assert embedder.gradient_access_token == _GRADIENT_SECRET
assert embedder.gradient_api_url == _GRADIENT_BASE_URL
assert embedder.gradient_workspace_id == _GRADIENT_WORKSPACE_ID
assert embedder.model == _MODEL_ID
response = embedder.embed_documents(_DOCUMENTS)
want = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.1], [0.0, 0.9, 0.0], [1.0,
0.0, 0.1], [0.0, 0.9, 0.1]]
assert response == want
| null |
test_yaml_output_parser
|
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(pydantic_object
=TestModel)
model = yaml_parser.parse_folder(result)
print('parse_result:', result)
assert DEF_EXPECTED_RESULT == model
|
@pytest.mark.parametrize('result', [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) ->None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(pydantic_object
=TestModel)
model = yaml_parser.parse_folder(result)
print('parse_result:', result)
assert DEF_EXPECTED_RESULT == model
|
Test yamlOutputParser.
|
_prepare_metadata
|
"""
Prepare metadata for indexing in Redis by sanitizing its values.
- String, integer, and float values remain unchanged.
- None or empty values are replaced with empty strings.
- Lists/tuples of strings are joined into a single string with a comma separator.
Args:
metadata (Dict[str, Any]): A dictionary where keys are metadata
field names and values are the metadata values.
Returns:
Dict[str, Any]: A sanitized dictionary ready for indexing in Redis.
Raises:
ValueError: If any metadata value is not one of the known
types (string, int, float, or list of strings).
"""
def raise_error(key: str, value: Any) ->None:
raise ValueError(
f"Metadata value for key '{key}' must be a string, int, " +
f'float, or list of strings. Got {type(value).__name__}')
clean_meta: Dict[str, Union[str, float, int]] = {}
for key, value in metadata.items():
if value is None:
clean_meta[key] = ''
continue
if isinstance(value, (str, int, float)):
clean_meta[key] = value
elif isinstance(value, (list, tuple)):
if not value or isinstance(value[0], str):
clean_meta[key] = REDIS_TAG_SEPARATOR.join(value)
else:
raise_error(key, value)
else:
raise_error(key, value)
return clean_meta
|
def _prepare_metadata(metadata: Dict[str, Any]) ->Dict[str, Any]:
"""
Prepare metadata for indexing in Redis by sanitizing its values.
- String, integer, and float values remain unchanged.
- None or empty values are replaced with empty strings.
- Lists/tuples of strings are joined into a single string with a comma separator.
Args:
metadata (Dict[str, Any]): A dictionary where keys are metadata
field names and values are the metadata values.
Returns:
Dict[str, Any]: A sanitized dictionary ready for indexing in Redis.
Raises:
ValueError: If any metadata value is not one of the known
types (string, int, float, or list of strings).
"""
def raise_error(key: str, value: Any) ->None:
raise ValueError(
f"Metadata value for key '{key}' must be a string, int, " +
f'float, or list of strings. Got {type(value).__name__}')
clean_meta: Dict[str, Union[str, float, int]] = {}
for key, value in metadata.items():
if value is None:
clean_meta[key] = ''
continue
if isinstance(value, (str, int, float)):
clean_meta[key] = value
elif isinstance(value, (list, tuple)):
if not value or isinstance(value[0], str):
clean_meta[key] = REDIS_TAG_SEPARATOR.join(value)
else:
raise_error(key, value)
else:
raise_error(key, value)
return clean_meta
|
Prepare metadata for indexing in Redis by sanitizing its values.
- String, integer, and float values remain unchanged.
- None or empty values are replaced with empty strings.
- Lists/tuples of strings are joined into a single string with a comma separator.
Args:
metadata (Dict[str, Any]): A dictionary where keys are metadata
field names and values are the metadata values.
Returns:
Dict[str, Any]: A sanitized dictionary ready for indexing in Redis.
Raises:
ValueError: If any metadata value is not one of the known
types (string, int, float, or list of strings).
|
test_multi_input_errors
|
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=['baz'])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
|
def test_multi_input_errors() ->None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=[
'baz'])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
|
Test simple sequential errors if multiple input variables are expected.
|
_transform
|
final: Optional[Input] = None
for ichunk in input:
if final is None:
final = ichunk
else:
try:
final = final + ichunk
except TypeError:
final = ichunk
if inspect.isgeneratorfunction(self.func):
output: Optional[Output] = None
for chunk in call_func_with_variable_args(self.func, cast(Input, final),
config, run_manager, **kwargs):
yield chunk
if output is None:
output = chunk
else:
try:
output = output + chunk
except TypeError:
output = chunk
else:
output = call_func_with_variable_args(self.func, cast(Input, final),
config, run_manager, **kwargs)
if isinstance(output, Runnable):
recursion_limit = config['recursion_limit']
if recursion_limit <= 0:
raise RecursionError(
f'Recursion limit reached when invoking {self} with input {final}.'
)
for chunk in output.stream(final, patch_config(config, callbacks=
run_manager.get_child(), recursion_limit=recursion_limit - 1)):
yield chunk
elif not inspect.isgeneratorfunction(self.func):
yield cast(Output, output)
|
def _transform(self, input: Iterator[Input], run_manager:
CallbackManagerForChainRun, config: RunnableConfig, **kwargs: Any
) ->Iterator[Output]:
final: Optional[Input] = None
for ichunk in input:
if final is None:
final = ichunk
else:
try:
final = final + ichunk
except TypeError:
final = ichunk
if inspect.isgeneratorfunction(self.func):
output: Optional[Output] = None
for chunk in call_func_with_variable_args(self.func, cast(Input,
final), config, run_manager, **kwargs):
yield chunk
if output is None:
output = chunk
else:
try:
output = output + chunk
except TypeError:
output = chunk
else:
output = call_func_with_variable_args(self.func, cast(Input, final),
config, run_manager, **kwargs)
if isinstance(output, Runnable):
recursion_limit = config['recursion_limit']
if recursion_limit <= 0:
raise RecursionError(
f'Recursion limit reached when invoking {self} with input {final}.'
)
for chunk in output.stream(final, patch_config(config, callbacks=
run_manager.get_child(), recursion_limit=recursion_limit - 1)):
yield chunk
elif not inspect.isgeneratorfunction(self.func):
yield cast(Output, output)
| null |
seq_naive_rag
|
context = ['Hi there!', 'How are you?', "What's your name?"]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template('{context} {question}')
llm = FakeListLLM(responses=['hello'])
return Context.setter('input') | {'context': retriever | Context.setter(
'context'), 'question': RunnablePassthrough()
} | prompt | llm | StrOutputParser() | {'result': RunnablePassthrough(),
'context': Context.getter('context'), 'input': Context.getter('input')}
|
def seq_naive_rag() ->Runnable:
context = ['Hi there!', 'How are you?', "What's your name?"]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template('{context} {question}')
llm = FakeListLLM(responses=['hello'])
return Context.setter('input') | {'context': retriever | Context.setter
('context'), 'question': RunnablePassthrough()
} | prompt | llm | StrOutputParser() | {'result':
RunnablePassthrough(), 'context': Context.getter('context'),
'input': Context.getter('input')}
| null |
similarity_search_by_vector_with_relevance_scores
|
"""
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
results = self.__query_collection(query_embeddings=embedding, n_results=k,
where=filter, where_document=where_document, **kwargs)
return _results_to_docs_and_scores(results)
|
def similarity_search_by_vector_with_relevance_scores(self, embedding: List
[float], k: int=DEFAULT_K, filter: Optional[Dict[str, str]]=None,
where_document: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Tuple
[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
results = self.__query_collection(query_embeddings=embedding, n_results
=k, where=filter, where_document=where_document, **kwargs)
return _results_to_docs_and_scores(results)
|
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
|
delete
|
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
"""
filter = kwargs.get('filter')
delete_all = kwargs.get('delete_all')
self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all)
return True
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
"""
filter = kwargs.get('filter')
delete_all = kwargs.get('delete_all')
self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all)
return True
|
Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
**kwargs: Other keyword arguments that subclasses might use.
- filter (Optional[Dict[str, str]], optional): The filter to delete by.
- delete_all (Optional[bool], optional): Whether to drop the dataset.
Returns:
bool: Whether the delete operation was successful.
|
from_documents
|
"""Create an AwaDB vectorstore from a list of documents.
If a log_and_data_dir specified, the table will be persisted there.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory to persist the table.
client (Optional[awadb.Client]): AwaDB client.
Any: Any possible parameters in the future
Returns:
AwaDB: AwaDB vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=metadatas,
table_name=table_name, log_and_data_dir=log_and_data_dir, client=client)
|
@classmethod
def from_documents(cls: Type[AwaDB], documents: List[Document], embedding:
Optional[Embeddings]=None, table_name: str=_DEFAULT_TABLE_NAME,
log_and_data_dir: Optional[str]=None, client: Optional[awadb.Client]=
None, **kwargs: Any) ->AwaDB:
"""Create an AwaDB vectorstore from a list of documents.
If a log_and_data_dir specified, the table will be persisted there.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory to persist the table.
client (Optional[awadb.Client]): AwaDB client.
Any: Any possible parameters in the future
Returns:
AwaDB: AwaDB vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=
metadatas, table_name=table_name, log_and_data_dir=log_and_data_dir,
client=client)
|
Create an AwaDB vectorstore from a list of documents.
If a log_and_data_dir specified, the table will be persisted there.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory to persist the table.
client (Optional[awadb.Client]): AwaDB client.
Any: Any possible parameters in the future
Returns:
AwaDB: AwaDB vectorstore.
|
test_resolve_criteria_enum
|
assert CriteriaEvalChain.resolve_criteria(criterion) == {criterion.value:
_SUPPORTED_CRITERIA[criterion]}
|
@pytest.mark.parametrize('criterion', list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) ->None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {criterion.
value: _SUPPORTED_CRITERIA[criterion]}
| null |
_get_current_entities
|
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
|
def _get_current_entities(self, inputs: Dict[str, Any]) ->List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
|
Get the current entities in the conversation.
|
test_chroma_with_relevance_score
|
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas,
collection_metadata={'hnsw:space': 'l2'})
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}), 0.8), (
Document(page_content='baz', metadata={'page': '2'}), 0.5)]
|
def test_chroma_with_relevance_score() ->None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas,
collection_metadata={'hnsw:space': 'l2'})
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}), 0.8),
(Document(page_content='baz', metadata={'page': '2'}), 0.5)]
|
Test to make sure the relevance score is scaled to 0-1.
|
load
|
"""Load all documents."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load all documents."""
return list(self.lazy_load())
|
Load all documents.
|
clear
|
"""Clear memory contents."""
super().clear()
self.buffer = ''
|
def clear(self) ->None:
"""Clear memory contents."""
super().clear()
self.buffer = ''
|
Clear memory contents.
|
raise_deprecation
|
if 'llm' in values:
warnings.warn(
'Directly instantiating an LLMSummarizationCheckerChain with an llm is deprecated. Please instantiate with sequential_chain argument or using the from_llm class method.'
)
if 'sequential_chain' not in values and values['llm'] is not None:
values['sequential_chain'] = _load_sequential_chain(values['llm'],
values.get('create_assertions_prompt', CREATE_ASSERTIONS_PROMPT
), values.get('check_assertions_prompt',
CHECK_ASSERTIONS_PROMPT), values.get('revised_summary_prompt',
REVISED_SUMMARY_PROMPT), values.get('are_all_true_prompt',
ARE_ALL_TRUE_PROMPT), verbose=values.get('verbose', False))
return values
|
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) ->Dict:
if 'llm' in values:
warnings.warn(
'Directly instantiating an LLMSummarizationCheckerChain with an llm is deprecated. Please instantiate with sequential_chain argument or using the from_llm class method.'
)
if 'sequential_chain' not in values and values['llm'] is not None:
values['sequential_chain'] = _load_sequential_chain(values[
'llm'], values.get('create_assertions_prompt',
CREATE_ASSERTIONS_PROMPT), values.get(
'check_assertions_prompt', CHECK_ASSERTIONS_PROMPT), values
.get('revised_summary_prompt', REVISED_SUMMARY_PROMPT),
values.get('are_all_true_prompt', ARE_ALL_TRUE_PROMPT),
verbose=values.get('verbose', False))
return values
| null |
_load_prompt_from_file
|
"""Load prompt from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
elif file_path.suffix == '.py':
spec = importlib.util.spec_from_loader('prompt', loader=None, origin=
str(file_path))
if spec is None:
raise ValueError('could not load spec')
helper = importlib.util.module_from_spec(spec)
with open(file_path, 'rb') as f:
exec(f.read(), helper.__dict__)
if not isinstance(helper.PROMPT, BasePromptTemplate):
raise ValueError('Did not get object of type BasePromptTemplate.')
return helper.PROMPT
else:
raise ValueError(f'Got unsupported file type {file_path.suffix}')
return load_prompt_from_config(config)
|
def _load_prompt_from_file(file: Union[str, Path]) ->BasePromptTemplate:
"""Load prompt from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
elif file_path.suffix == '.py':
spec = importlib.util.spec_from_loader('prompt', loader=None,
origin=str(file_path))
if spec is None:
raise ValueError('could not load spec')
helper = importlib.util.module_from_spec(spec)
with open(file_path, 'rb') as f:
exec(f.read(), helper.__dict__)
if not isinstance(helper.PROMPT, BasePromptTemplate):
raise ValueError('Did not get object of type BasePromptTemplate.')
return helper.PROMPT
else:
raise ValueError(f'Got unsupported file type {file_path.suffix}')
return load_prompt_from_config(config)
|
Load prompt from file.
|
test_api_key_masked_when_passed_from_env
|
monkeypatch.setenv('NEBULA_API_KEY', 'secret-api-key')
llm = Nebula()
print(llm.nebula_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
monkeypatch.setenv('NEBULA_API_KEY', 'secret-api-key')
llm = Nebula()
print(llm.nebula_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
test_tracer_llm_run_errors_no_start
|
"""Test tracer on an LLM run without a start."""
tracer = FakeTracer()
with pytest.raises(TracerException):
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid4())
|
@freeze_time('2023-01-01')
def test_tracer_llm_run_errors_no_start() ->None:
"""Test tracer on an LLM run without a start."""
tracer = FakeTracer()
with pytest.raises(TracerException):
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid4())
|
Test tracer on an LLM run without a start.
|
__enter__
|
return self
|
def __enter__(self) ->'Tee[T]':
return self
| null |
clear
|
"""Clear session memory from db"""
with self.Session() as session:
session.query(self.sql_model_class).filter(getattr(self.sql_model_class,
self.session_id_field_name) == self.session_id).delete()
session.commit()
|
def clear(self) ->None:
"""Clear session memory from db"""
with self.Session() as session:
session.query(self.sql_model_class).filter(getattr(self.
sql_model_class, self.session_id_field_name) == self.session_id
).delete()
session.commit()
|
Clear session memory from db
|
_handle_status
|
if code >= 500:
raise Exception(f'DeepInfra Server: Error {code}')
elif code >= 400:
raise ValueError(f'DeepInfra received an invalid payload: {text}')
elif code != 200:
raise Exception(
f'DeepInfra returned an unexpected response with status {code}: {text}'
)
|
def _handle_status(self, code: int, text: Any) ->None:
if code >= 500:
raise Exception(f'DeepInfra Server: Error {code}')
elif code >= 400:
raise ValueError(f'DeepInfra received an invalid payload: {text}')
elif code != 200:
raise Exception(
f'DeepInfra returned an unexpected response with status {code}: {text}'
)
| null |
true
|
return True
|
def true(self) ->bool:
return True
| null |
embed_query
|
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(self, texts=[text], embed_type=self.
embed_type_query)
return embeddings[0]
|
def embed_query(self, text: str) ->List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(self, texts=[text], embed_type=self.
embed_type_query)
return embeddings[0]
|
Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
test_anthropic_model_param
|
llm = ChatAnthropic(model='foo')
assert llm.model == 'foo'
|
@pytest.mark.requires('anthropic')
def test_anthropic_model_param() ->None:
llm = ChatAnthropic(model='foo')
assert llm.model == 'foo'
| null |
run
|
"""Run query through Golden Query API and return the JSON raw result."""
headers = {'apikey': self.golden_api_key or ''}
response = requests.post(f'{GOLDEN_BASE_URL}/api/v2/public/queries/', json=
{'prompt': query}, headers=headers, timeout=GOLDEN_TIMEOUT)
if response.status_code != 201:
return response.text
content = json.loads(response.content)
query_id = content['id']
response = requests.get(
f'{GOLDEN_BASE_URL}/api/v2/public/queries/{query_id}/results/?pageSize=10',
headers=headers, timeout=GOLDEN_TIMEOUT)
return response.text
|
def run(self, query: str) ->str:
"""Run query through Golden Query API and return the JSON raw result."""
headers = {'apikey': self.golden_api_key or ''}
response = requests.post(f'{GOLDEN_BASE_URL}/api/v2/public/queries/',
json={'prompt': query}, headers=headers, timeout=GOLDEN_TIMEOUT)
if response.status_code != 201:
return response.text
content = json.loads(response.content)
query_id = content['id']
response = requests.get(
f'{GOLDEN_BASE_URL}/api/v2/public/queries/{query_id}/results/?pageSize=10'
, headers=headers, timeout=GOLDEN_TIMEOUT)
return response.text
|
Run query through Golden Query API and return the JSON raw result.
|
delete_index
|
"""Deletes the index specified during instance construction if it exists"""
if self.index_exists():
self._collection.drop_index(self._index_name)
|
def delete_index(self) ->None:
"""Deletes the index specified during instance construction if it exists"""
if self.index_exists():
self._collection.drop_index(self._index_name)
|
Deletes the index specified during instance construction if it exists
|
get_session_history
|
if (user_id, conversation_id) not in store:
store[user_id, conversation_id] = ChatMessageHistory()
return store[user_id, conversation_id]
|
def get_session_history(user_id: str, conversation_id: str
) ->ChatMessageHistory:
if (user_id, conversation_id) not in store:
store[user_id, conversation_id] = ChatMessageHistory()
return store[user_id, conversation_id]
| null |
test_default_base_prompt
|
"""Test that the default prompt is being inserted."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
assert tool.base_prompt == BASE_ZAPIER_TOOL_PROMPT
assert tool.description == BASE_ZAPIER_TOOL_PROMPT.format(zapier_description
='test', params=str(list({'test': 'test'}.keys())))
|
def test_default_base_prompt() ->None:
"""Test that the default prompt is being inserted."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
assert tool.base_prompt == BASE_ZAPIER_TOOL_PROMPT
assert tool.description == BASE_ZAPIER_TOOL_PROMPT.format(
zapier_description='test', params=str(list({'test': 'test'}.keys())))
|
Test that the default prompt is being inserted.
|
load
|
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
text = ''.join(concatenate_rows(message) for message in d['messages'] if
message.get('content') and isinstance(message['content'], str))
metadata = {'source': str(p)}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
text = ''.join(concatenate_rows(message) for message in d['messages'] if
message.get('content') and isinstance(message['content'], str))
metadata = {'source': str(p)}
return [Document(page_content=text, metadata=metadata)]
|
Load documents.
|
embeddings
|
return self.embedding
|
@property
def embeddings(self) ->Embeddings:
return self.embedding
| null |
_import_llamacpp
|
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
|
def _import_llamacpp() ->Any:
from langchain_community.llms.llamacpp import LlamaCpp
return LlamaCpp
| null |
from_connection_string
|
"""Creates an Instance of AzureCosmosDBVectorSearch from a Connection String
Args:
connection_string: The MongoDB vCore instance connection string
namespace: The namespace (database.collection)
embedding: The embedding utility
**kwargs: Dynamic keyword arguments
Returns:
an instance of the vector store
"""
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
'Could not import pymongo, please install it with `pip install pymongo`.'
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split('.')
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
|
@classmethod
def from_connection_string(cls, connection_string: str, namespace: str,
embedding: Embeddings, **kwargs: Any) ->AzureCosmosDBVectorSearch:
"""Creates an Instance of AzureCosmosDBVectorSearch from a Connection String
Args:
connection_string: The MongoDB vCore instance connection string
namespace: The namespace (database.collection)
embedding: The embedding utility
**kwargs: Dynamic keyword arguments
Returns:
an instance of the vector store
"""
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
'Could not import pymongo, please install it with `pip install pymongo`.'
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split('.')
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
|
Creates an Instance of AzureCosmosDBVectorSearch from a Connection String
Args:
connection_string: The MongoDB vCore instance connection string
namespace: The namespace (database.collection)
embedding: The embedding utility
**kwargs: Dynamic keyword arguments
Returns:
an instance of the vector store
|
test_chat_prompt_template_from_messages_using_role_strings
|
"""Test creating a chat prompt template from role string messages."""
template = ChatPromptTemplate.from_messages([('system',
'You are a helpful AI bot. Your name is {name}.'), ('human',
'Hello, how are you doing?'), ('ai', "I'm doing well, thanks!"), (
'human', '{user_input}')])
messages = template.format_messages(name='Bob', user_input='What is your name?'
)
assert messages == [SystemMessage(content=
'You are a helpful AI bot. Your name is Bob.', additional_kwargs={}),
HumanMessage(content='Hello, how are you doing?', additional_kwargs={},
example=False), AIMessage(content="I'm doing well, thanks!",
additional_kwargs={}, example=False), HumanMessage(content=
'What is your name?', additional_kwargs={}, example=False)]
|
def test_chat_prompt_template_from_messages_using_role_strings() ->None:
"""Test creating a chat prompt template from role string messages."""
template = ChatPromptTemplate.from_messages([('system',
'You are a helpful AI bot. Your name is {name}.'), ('human',
'Hello, how are you doing?'), ('ai', "I'm doing well, thanks!"), (
'human', '{user_input}')])
messages = template.format_messages(name='Bob', user_input=
'What is your name?')
assert messages == [SystemMessage(content=
'You are a helpful AI bot. Your name is Bob.', additional_kwargs={}
), HumanMessage(content='Hello, how are you doing?',
additional_kwargs={}, example=False), AIMessage(content=
"I'm doing well, thanks!", additional_kwargs={}, example=False),
HumanMessage(content='What is your name?', additional_kwargs={},
example=False)]
|
Test creating a chat prompt template from role string messages.
|
messages_from_dict
|
"""Convert a sequence of messages from dicts to Message objects.
Args:
messages: Sequence of messages (as dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_from_dict(m) for m in messages]
|
def messages_from_dict(messages: Sequence[dict]) ->List[BaseMessage]:
"""Convert a sequence of messages from dicts to Message objects.
Args:
messages: Sequence of messages (as dicts) to convert.
Returns:
List of messages (BaseMessages).
"""
return [_message_from_dict(m) for m in messages]
|
Convert a sequence of messages from dicts to Message objects.
Args:
messages: Sequence of messages (as dicts) to convert.
Returns:
List of messages (BaseMessages).
|
_call_after_predict_before_llm
|
...
|
@abstractmethod
def _call_after_predict_before_llm(self, inputs: Dict[str, Any], event:
TEvent, prediction: Any) ->Tuple[Dict[str, Any], TEvent]:
...
| null |
test_visit_comparison
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=4)
expected = '( foo < 4 )'
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=4)
expected = '( foo < 4 )'
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
validate_environment
|
"""Validate that api key exists in environment."""
ai21_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'ai21_api_key', 'AI21_API_KEY'))
values['ai21_api_key'] = ai21_api_key
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
ai21_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'ai21_api_key', 'AI21_API_KEY'))
values['ai21_api_key'] = ai21_api_key
return values
|
Validate that api key exists in environment.
|
from_texts
|
azure_search = cls(azure_search_endpoint, azure_search_key, index_name,
embedding.embed_query)
azure_search.add_texts(texts, metadatas, **kwargs)
return azure_search
|
@classmethod
def from_texts(cls: Type[AzureSearch], texts: List[str], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, azure_search_endpoint:
str='', azure_search_key: str='', index_name: str='langchain-index', **
kwargs: Any) ->AzureSearch:
azure_search = cls(azure_search_endpoint, azure_search_key, index_name,
embedding.embed_query)
azure_search.add_texts(texts, metadatas, **kwargs)
return azure_search
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_id': self.model_id, 'writer_org_id': self.writer_org_id},
**self._default_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_id': self.model_id, 'writer_org_id': self.
writer_org_id}, **self._default_params}
|
Get the identifying parameters.
|
on_agent_action
|
"""Run on agent action."""
self.metrics['step'] += 1
self.metrics['tool_starts'] += 1
self.metrics['starts'] += 1
tool_starts = self.metrics['tool_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input':
action.tool_input, 'log': action.log})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_agent_action_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'agent_action_{tool_starts}')
|
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any:
"""Run on agent action."""
self.metrics['step'] += 1
self.metrics['tool_starts'] += 1
self.metrics['starts'] += 1
tool_starts = self.metrics['tool_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_agent_action', 'tool': action.tool,
'tool_input': action.tool_input, 'log': action.log})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_agent_action_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'agent_action_{tool_starts}')
|
Run on agent action.
|
on_llm_new_token
|
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.run.log(resp)
|
Run when LLM generates a new token.
|
flush_tracker
|
"""Reset the steps and delete the temporary local directory."""
self._reset()
shutil.rmtree(self.temp_dir)
|
def flush_tracker(self) ->None:
"""Reset the steps and delete the temporary local directory."""
self._reset()
shutil.rmtree(self.temp_dir)
|
Reset the steps and delete the temporary local directory.
|
test_markdown_code_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.MARKDOWN,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
# Sample Document
## Section
This is the content of the section.
## Lists
- Item 1
- Item 2
- Item 3
### Horizontal lines
***********
____________
-------------------
#### Code blocks
```
This is a code block
# sample code
a = 1
b = 2
```
"""
chunks = splitter.split_text(code)
assert chunks == ['# Sample', 'Document', '## Section', 'This is the',
'content of the', 'section.', '## Lists', '- Item 1', '- Item 2',
'- Item 3', '### Horizontal', 'lines', '***********', '____________',
'---------------', '----', '#### Code', 'blocks', '```',
'This is a code', 'block', '# sample code', 'a = 1\nb = 2', '```']
code = """harry
***
babylon is"""
chunks = splitter.split_text(code)
assert chunks == ['harry', '***\nbabylon is']
|
def test_markdown_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.
MARKDOWN, chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
# Sample Document
## Section
This is the content of the section.
## Lists
- Item 1
- Item 2
- Item 3
### Horizontal lines
***********
____________
-------------------
#### Code blocks
```
This is a code block
# sample code
a = 1
b = 2
```
"""
chunks = splitter.split_text(code)
assert chunks == ['# Sample', 'Document', '## Section', 'This is the',
'content of the', 'section.', '## Lists', '- Item 1', '- Item 2',
'- Item 3', '### Horizontal', 'lines', '***********',
'____________', '---------------', '----', '#### Code', 'blocks',
'```', 'This is a code', 'block', '# sample code', 'a = 1\nb = 2',
'```']
code = 'harry\n***\nbabylon is'
chunks = splitter.split_text(code)
assert chunks == ['harry', '***\nbabylon is']
| null |
_get_prefixed_key
|
"""Get the key with the namespace prefix.
Args:
key (str): The original key.
Returns:
str: The key with the namespace prefix.
"""
delimiter = '/'
if self.namespace:
return f'{self.namespace}{delimiter}{key}'
return key
|
def _get_prefixed_key(self, key: str) ->str:
"""Get the key with the namespace prefix.
Args:
key (str): The original key.
Returns:
str: The key with the namespace prefix.
"""
delimiter = '/'
if self.namespace:
return f'{self.namespace}{delimiter}{key}'
return key
|
Get the key with the namespace prefix.
Args:
key (str): The original key.
Returns:
str: The key with the namespace prefix.
|
_import_powerbi
|
from langchain_community.utilities.powerbi import PowerBIDataset
return PowerBIDataset
|
def _import_powerbi() ->Any:
from langchain_community.utilities.powerbi import PowerBIDataset
return PowerBIDataset
| null |
get_num_tokens
|
"""Return number of tokens."""
return len(text.split())
|
def get_num_tokens(self, text: str) ->int:
"""Return number of tokens."""
return len(text.split())
|
Return number of tokens.
|
_generate
|
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ''
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(message=lc_msg, generation_info=dict(finish_reason
='stop'))
return ChatResult(generations=[gen], llm_output={'token_usage': {},
'model_name': self.model})
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(message=lc_msg, generation_info={'finish_reason':
'stop', **response_payload.get('body', {})})
token_usage = response_payload.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ''
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(message=lc_msg, generation_info=dict(
finish_reason='stop'))
return ChatResult(generations=[gen], llm_output={'token_usage': {},
'model_name': self.model})
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(message=lc_msg, generation_info={'finish_reason':
'stop', **response_payload.get('body', {})})
token_usage = response_payload.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
|
Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
|
_chat_message_history
|
from cassandra.cluster import Cluster
keyspace = 'cmh_test_keyspace'
table_name = 'cmh_test_table'
if 'CASSANDRA_CONTACT_POINTS' in os.environ:
contact_points = os.environ['CONTACT_POINTS'].split(',')
cluster = Cluster(contact_points)
else:
cluster = Cluster()
session = cluster.connect()
session.execute(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
if drop:
session.execute(f'DROP TABLE IF EXISTS {keyspace}.{table_name}')
return CassandraChatMessageHistory(session_id=session_id, session=session,
keyspace=keyspace, table_name=table_name, **{} if ttl_seconds is None else
{'ttl_seconds': ttl_seconds})
|
def _chat_message_history(session_id: str='test-session', drop: bool=True,
ttl_seconds: Optional[int]=None) ->CassandraChatMessageHistory:
from cassandra.cluster import Cluster
keyspace = 'cmh_test_keyspace'
table_name = 'cmh_test_table'
if 'CASSANDRA_CONTACT_POINTS' in os.environ:
contact_points = os.environ['CONTACT_POINTS'].split(',')
cluster = Cluster(contact_points)
else:
cluster = Cluster()
session = cluster.connect()
session.execute(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
if drop:
session.execute(f'DROP TABLE IF EXISTS {keyspace}.{table_name}')
return CassandraChatMessageHistory(session_id=session_id, session=
session, keyspace=keyspace, table_name=table_name, **{} if
ttl_seconds is None else {'ttl_seconds': ttl_seconds})
| null |
__init__
|
"""Initialize with LLMRails API."""
self._datastore_id = datastore_id or os.environ.get('LLM_RAILS_DATASTORE_ID')
self._api_key = api_key or os.environ.get('LLM_RAILS_API_KEY')
if self._api_key is None:
logging.warning("Can't find Rails credentials in environment.")
self._session = requests.Session()
self.datastore_id = datastore_id
self.base_url = 'https://api.llmrails.com/v1'
|
def __init__(self, datastore_id: Optional[str]=None, api_key: Optional[str]
=None):
"""Initialize with LLMRails API."""
self._datastore_id = datastore_id or os.environ.get(
'LLM_RAILS_DATASTORE_ID')
self._api_key = api_key or os.environ.get('LLM_RAILS_API_KEY')
if self._api_key is None:
logging.warning("Can't find Rails credentials in environment.")
self._session = requests.Session()
self.datastore_id = datastore_id
self.base_url = 'https://api.llmrails.com/v1'
|
Initialize with LLMRails API.
|
on_tool_error
|
"""Run when tool errors."""
self.step += 1
self.errors += 1
|
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
|
Run when tool errors.
|
test_gptcache_caching
|
"""Test gptcache default caching behavior."""
set_llm_cache(GPTCache(init_func))
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
_ = llm.generate(['foo', 'bar', 'foo'])
cache_output = get_llm_cache().lookup('foo', llm_string)
assert cache_output == [Generation(text='fizz')]
get_llm_cache().clear()
assert get_llm_cache().lookup('bar', llm_string) is None
|
@pytest.mark.skipif(not gptcache_installed, reason='gptcache not installed')
@pytest.mark.parametrize('init_func', [None, init_gptcache_map,
init_gptcache_map_with_llm])
def test_gptcache_caching(init_func: Union[Callable[[Any, str], None],
Callable[[Any], None], None]) ->None:
"""Test gptcache default caching behavior."""
set_llm_cache(GPTCache(init_func))
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
_ = llm.generate(['foo', 'bar', 'foo'])
cache_output = get_llm_cache().lookup('foo', llm_string)
assert cache_output == [Generation(text='fizz')]
get_llm_cache().clear()
assert get_llm_cache().lookup('bar', llm_string) is None
|
Test gptcache default caching behavior.
|
test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb
|
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_1))
encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_2))
named_actions = {'action1': rl_chain.Embed([{'a': str1, 'b': str1}, str2,
str3])}
context = {'context1': rl_chain.Embed(ctx_str_1), 'context2': rl_chain.
Embed(ctx_str_2)}
expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2}
0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1}
|action1 {encoded_str2}
|action1 {encoded_str3} """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0
)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(
) ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_1))
encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_2))
named_actions = {'action1': rl_chain.Embed([{'a': str1, 'b': str1},
str2, str3])}
context = {'context1': rl_chain.Embed(ctx_str_1), 'context2': rl_chain.
Embed(ctx_str_2)}
expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2}
0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1}
|action1 {encoded_str2}
|action1 {encoded_str3} """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0,
score=0.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
| null |
similarity_search_by_vector
|
"""Look up similar documents by embedding vector in Weaviate."""
vector = {'vector': embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
if kwargs.get('additional'):
query_obj = query_obj.with_additional(kwargs.get('additional'))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Look up similar documents by embedding vector in Weaviate."""
vector = {'vector': embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get('where_filter'):
query_obj = query_obj.with_where(kwargs.get('where_filter'))
if kwargs.get('tenant'):
query_obj = query_obj.with_tenant(kwargs.get('tenant'))
if kwargs.get('additional'):
query_obj = query_obj.with_additional(kwargs.get('additional'))
result = query_obj.with_near_vector(vector).with_limit(k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
Look up similar documents by embedding vector in Weaviate.
|
from_document
|
"""Create a HashedDocument from a Document."""
return cls(uid=uid, page_content=document.page_content, metadata=document.
metadata)
|
@classmethod
def from_document(cls, document: Document, *, uid: Optional[str]=None
) ->_HashedDocument:
"""Create a HashedDocument from a Document."""
return cls(uid=uid, page_content=document.page_content, metadata=
document.metadata)
|
Create a HashedDocument from a Document.
|
add_message
|
"""Add a self-created message to the store"""
self.messages.append(message)
|
def add_message(self, message: BaseMessage) ->None:
"""Add a self-created message to the store"""
self.messages.append(message)
|
Add a self-created message to the store
|
_import_vald
|
from langchain_community.vectorstores.vald import Vald
return Vald
|
def _import_vald() ->Any:
from langchain_community.vectorstores.vald import Vald
return Vald
| null |
_raise_functions_not_supported
|
raise ValueError(
'Function messages are not supported by Databricks. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
|
@staticmethod
def _raise_functions_not_supported() ->None:
raise ValueError(
'Function messages are not supported by Databricks. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
| null |
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
teardown_class
|
index_stats = cls.index.describe_index_stats()
for _namespace_name in index_stats['namespaces'].keys():
cls.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
|
@classmethod
def teardown_class(cls) ->None:
index_stats = cls.index.describe_index_stats()
for _namespace_name in index_stats['namespaces'].keys():
cls.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
| null |
test_execute
|
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(self.username, self.password, self.address, self.
port, self.graph)
query = 'g.V().limit(10)'
result = huge_graph.query(query)
self.assertIsInstance(result, MagicMock)
|
@patch('hugegraph.connection.PyHugeGraph')
def test_execute(self, mock_client: Any) ->None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(self.username, self.password, self.address, self
.port, self.graph)
query = 'g.V().limit(10)'
result = huge_graph.query(query)
self.assertIsInstance(result, MagicMock)
| null |
__init__
|
"""Initialize with a Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ImportError(
'Could not import chromadb python package. Please install it with `pip install chromadb`.'
)
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
client_settings.persist_directory = (persist_directory or
client_settings.persist_directory)
if client_settings.persist_directory is not None:
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
client_settings.chroma_db_impl = 'duckdb+parquet'
_client_settings = client_settings
elif persist_directory:
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
_client_settings = chromadb.config.Settings(chroma_db_impl=
'duckdb+parquet')
else:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (_client_settings.persist_directory or
persist_directory)
self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection(name=
collection_name, embedding_function=None, metadata=collection_metadata)
self.override_relevance_score_fn = relevance_score_fn
|
def __init__(self, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings]=None, persist_directory:
Optional[str]=None, client_settings: Optional[chromadb.config.Settings]
=None, collection_metadata: Optional[Dict]=None, client: Optional[
chromadb.Client]=None, relevance_score_fn: Optional[Callable[[float],
float]]=None) ->None:
"""Initialize with a Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ImportError(
'Could not import chromadb python package. Please install it with `pip install chromadb`.'
)
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
client_settings.persist_directory = (persist_directory or
client_settings.persist_directory)
if client_settings.persist_directory is not None:
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
client_settings.chroma_db_impl = 'duckdb+parquet'
_client_settings = client_settings
elif persist_directory:
major, minor, _ = chromadb.__version__.split('.')
if int(major) == 0 and int(minor) < 4:
_client_settings = chromadb.config.Settings(chroma_db_impl=
'duckdb+parquet')
else:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (_client_settings.persist_directory or
persist_directory)
self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection(name=
collection_name, embedding_function=None, metadata=collection_metadata)
self.override_relevance_score_fn = relevance_score_fn
|
Initialize with a Chroma client.
|
_llm_type
|
"""Return type of llm."""
return 'minimax'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'minimax'
|
Return type of llm.
|
test_exclude_types
|
structured_schema = {'node_props': {'Movie': [{'property': 'title', 'type':
'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'}], 'Person':
[{'property': 'name', 'type': 'STRING'}]}, 'rel_props': {},
'relationships': [{'start': 'Actor', 'end': 'Movie', 'type': 'ACTED_IN'
}, {'start': 'Person', 'end': 'Movie', 'type': 'DIRECTED'}]}
exclude_types = ['Person', 'DIRECTED']
output = construct_schema(structured_schema, [], exclude_types)
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert output == expected_schema
|
def test_exclude_types() ->None:
structured_schema = {'node_props': {'Movie': [{'property': 'title',
'type': 'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'
}], 'Person': [{'property': 'name', 'type': 'STRING'}]},
'rel_props': {}, 'relationships': [{'start': 'Actor', 'end':
'Movie', 'type': 'ACTED_IN'}, {'start': 'Person', 'end': 'Movie',
'type': 'DIRECTED'}]}
exclude_types = ['Person', 'DIRECTED']
output = construct_schema(structured_schema, [], exclude_types)
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert output == expected_schema
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'output']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'output']
|
Get the namespace of the langchain object.
|
_is_url
|
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
|
def _is_url(s: str) ->bool:
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
| null |
test_deep_stream_assign
|
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.'
) + '{question}'
llm = FakeStreamingListLLM(responses=['foo-lish'])
chain: Runnable = prompt | llm | {'str': StrOutputParser()}
stream = chain.stream({'question': 'What up'})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len('foo-lish')
assert add(chunks) == {'str': 'foo-lish'}
chain_with_assign = chain.assign(hello=itemgetter('str') | llm)
assert chain_with_assign.input_schema.schema() == {'title': 'PromptInput',
'type': 'object', 'properties': {'question': {'title': 'Question',
'type': 'string'}}}
assert chain_with_assign.output_schema.schema() == {'title':
'RunnableSequenceOutput', 'type': 'object', 'properties': {'str': {
'title': 'Str', 'type': 'string'}, 'hello': {'title': 'Hello', 'type':
'string'}}}
chunks = []
for chunk in chain_with_assign.stream({'question': 'What up'}):
chunks.append(chunk)
assert len(chunks) == len('foo-lish') * 2
assert chunks == [{'str': 'f'}, {'str': 'o'}, {'str': 'o'}, {'str': '-'}, {
'str': 'l'}, {'str': 'i'}, {'str': 's'}, {'str': 'h'}, {'hello': 'f'},
{'hello': 'o'}, {'hello': 'o'}, {'hello': '-'}, {'hello': 'l'}, {
'hello': 'i'}, {'hello': 's'}, {'hello': 'h'}]
assert add(chunks) == {'str': 'foo-lish', 'hello': 'foo-lish'}
assert chain_with_assign.invoke({'question': 'What up'}) == {'str':
'foo-lish', 'hello': 'foo-lish'}
chain_with_assign_shadow = chain.assign(str=lambda _: 'shadow', hello=
itemgetter('str') | llm)
assert chain_with_assign_shadow.input_schema.schema() == {'title':
'PromptInput', 'type': 'object', 'properties': {'question': {'title':
'Question', 'type': 'string'}}}
assert chain_with_assign_shadow.output_schema.schema() == {'title':
'RunnableSequenceOutput', 'type': 'object', 'properties': {'str': {
'title': 'Str'}, 'hello': {'title': 'Hello', 'type': 'string'}}}
chunks = []
for chunk in chain_with_assign_shadow.stream({'question': 'What up'}):
chunks.append(chunk)
assert len(chunks) == len('foo-lish') + 1
assert add(chunks) == {'str': 'shadow', 'hello': 'foo-lish'}
assert chain_with_assign_shadow.invoke({'question': 'What up'}) == {'str':
'shadow', 'hello': 'foo-lish'}
|
def test_deep_stream_assign() ->None:
prompt = SystemMessagePromptTemplate.from_template(
'You are a nice assistant.') + '{question}'
llm = FakeStreamingListLLM(responses=['foo-lish'])
chain: Runnable = prompt | llm | {'str': StrOutputParser()}
stream = chain.stream({'question': 'What up'})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len('foo-lish')
assert add(chunks) == {'str': 'foo-lish'}
chain_with_assign = chain.assign(hello=itemgetter('str') | llm)
assert chain_with_assign.input_schema.schema() == {'title':
'PromptInput', 'type': 'object', 'properties': {'question': {
'title': 'Question', 'type': 'string'}}}
assert chain_with_assign.output_schema.schema() == {'title':
'RunnableSequenceOutput', 'type': 'object', 'properties': {'str': {
'title': 'Str', 'type': 'string'}, 'hello': {'title': 'Hello',
'type': 'string'}}}
chunks = []
for chunk in chain_with_assign.stream({'question': 'What up'}):
chunks.append(chunk)
assert len(chunks) == len('foo-lish') * 2
assert chunks == [{'str': 'f'}, {'str': 'o'}, {'str': 'o'}, {'str': '-'
}, {'str': 'l'}, {'str': 'i'}, {'str': 's'}, {'str': 'h'}, {'hello':
'f'}, {'hello': 'o'}, {'hello': 'o'}, {'hello': '-'}, {'hello': 'l'
}, {'hello': 'i'}, {'hello': 's'}, {'hello': 'h'}]
assert add(chunks) == {'str': 'foo-lish', 'hello': 'foo-lish'}
assert chain_with_assign.invoke({'question': 'What up'}) == {'str':
'foo-lish', 'hello': 'foo-lish'}
chain_with_assign_shadow = chain.assign(str=lambda _: 'shadow', hello=
itemgetter('str') | llm)
assert chain_with_assign_shadow.input_schema.schema() == {'title':
'PromptInput', 'type': 'object', 'properties': {'question': {
'title': 'Question', 'type': 'string'}}}
assert chain_with_assign_shadow.output_schema.schema() == {'title':
'RunnableSequenceOutput', 'type': 'object', 'properties': {'str': {
'title': 'Str'}, 'hello': {'title': 'Hello', 'type': 'string'}}}
chunks = []
for chunk in chain_with_assign_shadow.stream({'question': 'What up'}):
chunks.append(chunk)
assert len(chunks) == len('foo-lish') + 1
assert add(chunks) == {'str': 'shadow', 'hello': 'foo-lish'}
assert chain_with_assign_shadow.invoke({'question': 'What up'}) == {'str':
'shadow', 'hello': 'foo-lish'}
| null |
_extract_images_from_page
|
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ''
images = []
for img in page.images:
if img['stream']['Filter'].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(np.frombuffer(img['stream'].get_data(), dtype=np.
uint8).reshape(img['stream']['Height'], img['stream']['Width'], -1)
)
elif img['stream']['Filter'].name in _PDF_FILTER_WITH_LOSS:
images.append(img['stream'].get_data())
else:
warnings.warn('Unknown PDF Filter!')
return extract_from_images_with_rapidocr(images)
|
def _extract_images_from_page(self, page: pdfplumber.page.Page) ->str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ''
images = []
for img in page.images:
if img['stream']['Filter'].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(np.frombuffer(img['stream'].get_data(), dtype=np.
uint8).reshape(img['stream']['Height'], img['stream'][
'Width'], -1))
elif img['stream']['Filter'].name in _PDF_FILTER_WITH_LOSS:
images.append(img['stream'].get_data())
else:
warnings.warn('Unknown PDF Filter!')
return extract_from_images_with_rapidocr(images)
|
Extract images from page and get the text with RapidOCR.
|
embed_query
|
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
def embed_query(self, text: str) ->List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
Return simple embeddings.
|
test__validate_example_inputs_for_language_model
|
mock_ = mock.MagicMock()
mock_.inputs = inputs
_validate_example_inputs_for_language_model(mock_, None)
|
@pytest.mark.parametrize('inputs', _VALID_PROMPTS)
def test__validate_example_inputs_for_language_model(inputs: Dict[str, Any]
) ->None:
mock_ = mock.MagicMock()
mock_.inputs = inputs
_validate_example_inputs_for_language_model(mock_, None)
| null |
validate_search_type
|
"""Validate search type."""
if 'search_type' in values:
search_type = values['search_type']
if search_type not in ('similarity', 'mmr'):
raise ValueError(f'search_type of {search_type} not allowed.')
return values
|
@root_validator()
def validate_search_type(cls, values: Dict) ->Dict:
"""Validate search type."""
if 'search_type' in values:
search_type = values['search_type']
if search_type not in ('similarity', 'mmr'):
raise ValueError(f'search_type of {search_type} not allowed.')
return values
|
Validate search type.
|
_invocation_params
|
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {}
if not is_openai_v1():
openai_creds.update({'api_key': self.openai_api_key, 'api_base': self.
openai_api_base, 'organization': self.openai_organization})
if self.openai_proxy:
import openai
openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy}
return {**openai_creds, **self._default_params}
|
@property
def _invocation_params(self) ->Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {}
if not is_openai_v1():
openai_creds.update({'api_key': self.openai_api_key, 'api_base':
self.openai_api_base, 'organization': self.openai_organization})
if self.openai_proxy:
import openai
openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy}
return {**openai_creds, **self._default_params}
|
Get the parameters used to invoke the model.
|
test_xinference_embedding_documents
|
"""Test xinference embeddings for documents."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, model_format='ggmlv3', quantization='q4_0')
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
documents = ['foo bar', 'bar foo']
output = xinference.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 4096
|
def test_xinference_embedding_documents(setup: Tuple[str, str]) ->None:
"""Test xinference embeddings for documents."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, model_format='ggmlv3', quantization='q4_0')
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
documents = ['foo bar', 'bar foo']
output = xinference.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 4096
|
Test xinference embeddings for documents.
|
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
documents = [d[0] for d in docs_and_scores]
return documents
|
def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter
)
documents = [d[0] for d in docs_and_scores]
return documents
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
|
test_maximal_marginal_relevance_query_dim
|
query_embedding = np.random.random(size=5)
query_embedding_2d = query_embedding.reshape((1, 5))
embedding_list = np.random.random(size=(4, 5)).tolist()
first = maximal_marginal_relevance(query_embedding, embedding_list)
second = maximal_marginal_relevance(query_embedding_2d, embedding_list)
assert first == second
|
def test_maximal_marginal_relevance_query_dim() ->None:
query_embedding = np.random.random(size=5)
query_embedding_2d = query_embedding.reshape((1, 5))
embedding_list = np.random.random(size=(4, 5)).tolist()
first = maximal_marginal_relevance(query_embedding, embedding_list)
second = maximal_marginal_relevance(query_embedding_2d, embedding_list)
assert first == second
| null |
_combine_llm_outputs
|
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
continue
token_usage = output['token_usage']
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {'token_usage': overall_token_usage, 'model_name': self.model}
|
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) ->dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
continue
token_usage = output['token_usage']
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {'token_usage': overall_token_usage, 'model_name': self.model}
| null |
from_texts
|
instance = LanceDB(connection, embedding, vector_key, id_key, text_key)
instance.add_texts(texts, metadatas=metadatas, **kwargs)
return instance
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, connection: Any=None, vector_key: Optional[
str]='vector', id_key: Optional[str]='id', text_key: Optional[str]=
'text', **kwargs: Any) ->LanceDB:
instance = LanceDB(connection, embedding, vector_key, id_key, text_key)
instance.add_texts(texts, metadatas=metadatas, **kwargs)
return instance
| null |
remove_node
|
"""Remove a node from the graphm and all edges connected to it."""
self.nodes.pop(node.id)
self.edges = [edge for edge in self.edges if edge.source != node.id and
edge.target != node.id]
|
def remove_node(self, node: Node) ->None:
"""Remove a node from the graphm and all edges connected to it."""
self.nodes.pop(node.id)
self.edges = [edge for edge in self.edges if edge.source != node.id and
edge.target != node.id]
|
Remove a node from the graphm and all edges connected to it.
|
_list_arg_to_length
|
if not arg:
return [None] * num
elif len(arg) == 1:
return arg * num
elif len(arg) == num:
return arg
else:
raise ValueError(f'Argument must be of length 1 or {num}')
|
def _list_arg_to_length(arg: Optional[List[str]], num: int) ->Sequence[Optional
[str]]:
if not arg:
return [None] * num
elif len(arg) == 1:
return arg * num
elif len(arg) == num:
return arg
else:
raise ValueError(f'Argument must be of length 1 or {num}')
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.