method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_create_message_dicts
|
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
|
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional
[List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
| null |
test_structured_tool_types_parsed
|
"""Test the non-primitive types are correctly passed to structured tools."""
class SomeEnum(Enum):
A = 'a'
B = 'b'
class SomeBaseModel(BaseModel):
foo: str
@tool
def structured_tool(some_enum: SomeEnum, some_base_model: SomeBaseModel
) ->dict:
"""Return the arguments directly."""
return {'some_enum': some_enum, 'some_base_model': some_base_model}
assert isinstance(structured_tool, StructuredTool)
args = {'some_enum': SomeEnum.A.value, 'some_base_model': SomeBaseModel(foo
='bar').dict()}
result = structured_tool.run(json.loads(json.dumps(args)))
expected = {'some_enum': SomeEnum.A, 'some_base_model': SomeBaseModel(foo=
'bar')}
assert result == expected
|
def test_structured_tool_types_parsed() ->None:
"""Test the non-primitive types are correctly passed to structured tools."""
class SomeEnum(Enum):
A = 'a'
B = 'b'
class SomeBaseModel(BaseModel):
foo: str
@tool
def structured_tool(some_enum: SomeEnum, some_base_model: SomeBaseModel
) ->dict:
"""Return the arguments directly."""
return {'some_enum': some_enum, 'some_base_model': some_base_model}
assert isinstance(structured_tool, StructuredTool)
args = {'some_enum': SomeEnum.A.value, 'some_base_model': SomeBaseModel
(foo='bar').dict()}
result = structured_tool.run(json.loads(json.dumps(args)))
expected = {'some_enum': SomeEnum.A, 'some_base_model': SomeBaseModel(
foo='bar')}
assert result == expected
|
Test the non-primitive types are correctly passed to structured tools.
|
input_keys
|
"""Input keys this chain returns."""
return self.input_variables
|
@property
def input_keys(self) ->List[str]:
"""Input keys this chain returns."""
return self.input_variables
|
Input keys this chain returns.
|
_can_use_selection_scorer
|
"""
Returns whether the chain can use the selection scorer to score responses or not.
"""
return self.selection_scorer is not None and self.selection_scorer_activated
|
def _can_use_selection_scorer(self) ->bool:
"""
Returns whether the chain can use the selection scorer to score responses or not.
"""
return (self.selection_scorer is not None and self.
selection_scorer_activated)
|
Returns whether the chain can use the selection scorer to score responses or not.
|
__init__
|
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
|
def __init__(self, *, run_id: UUID, handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler], parent_run_id:
Optional[UUID]=None, tags: Optional[List[str]]=None, inheritable_tags:
Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None,
inheritable_metadata: Optional[Dict[str, Any]]=None) ->None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
|
Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
|
test_person_with_kwargs
|
person = Person(secret='hello')
assert dumps(person, separators=(',', ':')) == snapshot
|
def test_person_with_kwargs(snapshot: Any) ->None:
person = Person(secret='hello')
assert dumps(person, separators=(',', ':')) == snapshot
| null |
similarity_search
|
"""Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Run similarity search using Clarifai.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
|
create_table
|
"""Create a new table."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Create(table_name)
if ret:
self.using_table_name = table_name
return ret
|
def create_table(self, table_name: str, **kwargs: Any) ->bool:
"""Create a new table."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Create(table_name)
if ret:
self.using_table_name = table_name
return ret
|
Create a new table.
|
_import_ainetwork_owner
|
from langchain_community.tools.ainetwork.owner import AINOwnerOps
return AINOwnerOps
|
def _import_ainetwork_owner() ->Any:
from langchain_community.tools.ainetwork.owner import AINOwnerOps
return AINOwnerOps
| null |
__init__
|
super().__init__(pydantic_object=LineList)
|
def __init__(self) ->None:
super().__init__(pydantic_object=LineList)
| null |
test_create_alibabacloud_opensearch
|
opensearch = create_alibabacloud_opensearch()
time.sleep(1)
output = opensearch.similarity_search('foo', k=10)
assert len(output) == 3
|
def test_create_alibabacloud_opensearch() ->None:
opensearch = create_alibabacloud_opensearch()
time.sleep(1)
output = opensearch.similarity_search('foo', k=10)
assert len(output) == 3
| null |
test_default_regex_matching
|
prediction = 'Mindy is the CTO'
reference = '^Mindy.*CTO$'
result = regex_match_string_evaluator.evaluate_strings(prediction=
prediction, reference=reference)
assert result['score'] == 1.0
reference = '^Mike.*CEO$'
result = regex_match_string_evaluator.evaluate_strings(prediction=
prediction, reference=reference)
assert result['score'] == 0.0
|
def test_default_regex_matching(regex_match_string_evaluator:
RegexMatchStringEvaluator) ->None:
prediction = 'Mindy is the CTO'
reference = '^Mindy.*CTO$'
result = regex_match_string_evaluator.evaluate_strings(prediction=
prediction, reference=reference)
assert result['score'] == 1.0
reference = '^Mike.*CEO$'
result = regex_match_string_evaluator.evaluate_strings(prediction=
prediction, reference=reference)
assert result['score'] == 0.0
| null |
_run
|
"""Use the tool."""
from langchain.chains.retrieval_qa.base import RetrievalQA
chain = RetrievalQA.from_chain_type(self.llm, retriever=self.vectorstore.
as_retriever())
return chain.run(query, callbacks=run_manager.get_child() if run_manager else
None)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
from langchain.chains.retrieval_qa.base import RetrievalQA
chain = RetrievalQA.from_chain_type(self.llm, retriever=self.
vectorstore.as_retriever())
return chain.run(query, callbacks=run_manager.get_child() if
run_manager else None)
|
Use the tool.
|
test_prompt
|
messages = [SystemMessage(content='sys-msg'), HumanMessage(content=
'usr-msg-1'), AIMessage(content='ai-msg-1'), HumanMessage(content=
'usr-msg-2')]
actual = model.predict_messages(messages).content
expected = """### System:
sys-msg
### User:
usr-msg-1
### Assistant:
ai-msg-1
### User:
usr-msg-2
"""
assert actual == expected
|
def test_prompt(model: Orca) ->None:
messages = [SystemMessage(content='sys-msg'), HumanMessage(content=
'usr-msg-1'), AIMessage(content='ai-msg-1'), HumanMessage(content=
'usr-msg-2')]
actual = model.predict_messages(messages).content
expected = """### System:
sys-msg
### User:
usr-msg-1
### Assistant:
ai-msg-1
### User:
usr-msg-2
"""
assert actual == expected
| null |
_arg
|
self.write(t.arg)
if t.annotation:
self.write(': ')
self.dispatch(t.annotation)
|
def _arg(self, t):
self.write(t.arg)
if t.annotation:
self.write(': ')
self.dispatch(t.annotation)
| null |
invoke
|
if isinstance(input, BaseMessage):
return self._call_with_config(lambda inner_input: self.parse_result([
ChatGeneration(message=inner_input)]), input, config, run_type='parser'
)
else:
return self._call_with_config(lambda inner_input: self.parse_result([
Generation(text=inner_input)]), input, config, run_type='parser')
|
def invoke(self, input: Union[str, BaseMessage], config: Optional[
RunnableConfig]=None) ->T:
if isinstance(input, BaseMessage):
return self._call_with_config(lambda inner_input: self.parse_result
([ChatGeneration(message=inner_input)]), input, config,
run_type='parser')
else:
return self._call_with_config(lambda inner_input: self.parse_result
([Generation(text=inner_input)]), input, config, run_type='parser')
| null |
mset
|
"""Set the values for the given keys.
Args:
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
self.store[key] = value
|
def mset(self, key_value_pairs: Sequence[Tuple[str, V]]) ->None:
"""Set the values for the given keys.
Args:
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
self.store[key] = value
|
Set the values for the given keys.
Args:
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
Returns:
None
|
test_load_uses_page_content_column_to_create_document_text
|
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': [
'2022-01-01', '2022-01-02']}
sample_data_frame = pd.DataFrame(data)
sample_data_frame = sample_data_frame.rename(columns={'text':
'dummy_test_column'})
loader = XorbitsLoader(sample_data_frame, page_content_column=
'dummy_test_column')
docs = loader.load()
assert docs[0].page_content == 'Hello'
assert docs[1].page_content == 'World'
|
@pytest.mark.skipif(not xorbits_installed, reason='xorbits not installed')
def test_load_uses_page_content_column_to_create_document_text() ->None:
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date':
['2022-01-01', '2022-01-02']}
sample_data_frame = pd.DataFrame(data)
sample_data_frame = sample_data_frame.rename(columns={'text':
'dummy_test_column'})
loader = XorbitsLoader(sample_data_frame, page_content_column=
'dummy_test_column')
docs = loader.load()
assert docs[0].page_content == 'Hello'
assert docs[1].page_content == 'World'
| null |
_load_single_chat_session
|
"""
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
"""
chat_session = LangSmithRunChatLoader._get_messages_from_llm_run(llm_run)
functions = LangSmithRunChatLoader._get_functions_from_llm_run(llm_run)
if functions:
chat_session['functions'] = functions
return chat_session
|
def _load_single_chat_session(self, llm_run: 'Run') ->ChatSession:
"""
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
"""
chat_session = LangSmithRunChatLoader._get_messages_from_llm_run(llm_run)
functions = LangSmithRunChatLoader._get_functions_from_llm_run(llm_run)
if functions:
chat_session['functions'] = functions
return chat_session
|
Convert an individual LangSmith LLM run to a ChatSession.
:param llm_run: The LLM run object.
:return: A chat session representing the run's data.
|
test_from_documents
|
input_docs = [Document(page_content='I have a pen.'), Document(page_content
='Do you have a pen?'), Document(page_content='I have a bag.')]
bm25_retriever = BM25Retriever.from_documents(documents=input_docs)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
|
@pytest.mark.requires('rank_bm25')
def test_from_documents() ->None:
input_docs = [Document(page_content='I have a pen.'), Document(
page_content='Do you have a pen?'), Document(page_content=
'I have a bag.')]
bm25_retriever = BM25Retriever.from_documents(documents=input_docs)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
| null |
on_text
|
"""Do nothing"""
pass
|
def on_text(self, text: str, **kwargs: Any) ->None:
"""Do nothing"""
pass
|
Do nothing
|
test_usearch_from_documents
|
"""Test from_documents constructor."""
texts = ['foo', 'bar', 'baz']
docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts]
docsearch = USearch.from_documents(docs, FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'a': 'b'})]
|
def test_usearch_from_documents() ->None:
"""Test from_documents constructor."""
texts = ['foo', 'bar', 'baz']
docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts]
docsearch = USearch.from_documents(docs, FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'a': 'b'})]
|
Test from_documents constructor.
|
test_yellowbrick_with_score
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _yellowbrick_vector_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
docsearch.drop(YELLOWBRICK_TABLE)
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert distances[0] > distances[1] > distances[2]
|
@pytest.mark.requires('yb-vss')
def test_yellowbrick_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _yellowbrick_vector_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
docsearch.drop(YELLOWBRICK_TABLE)
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert distances[0] > distances[1] > distances[2]
|
Test end to end construction and search with scores and IDs.
|
with_config
|
"""
Bind config to a Runnable, returning a new Runnable.
"""
return RunnableBinding(bound=self, config=cast(RunnableConfig, {**config or
{}, **kwargs}), kwargs={})
|
def with_config(self, config: Optional[RunnableConfig]=None, **kwargs: Any
) ->Runnable[Input, Output]:
"""
Bind config to a Runnable, returning a new Runnable.
"""
return RunnableBinding(bound=self, config=cast(RunnableConfig, {**
config or {}, **kwargs}), kwargs={})
|
Bind config to a Runnable, returning a new Runnable.
|
_call
|
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
'NLPCloud only supports a single stop sequence per generation.Pass in a list of length 1.'
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **params)
return response['generated_text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
'NLPCloud only supports a single stop sequence per generation.Pass in a list of length 1.'
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
params = {**self._default_params, **kwargs}
response = self.client.generation(prompt, end_sequence=end_sequence, **
params)
return response['generated_text']
|
Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
|
validate_environment
|
"""Validate that access token exists in environment."""
values['access_token'] = get_from_dict_or_env(values, 'access_token',
'GITHUB_PERSONAL_ACCESS_TOKEN')
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that access token exists in environment."""
values['access_token'] = get_from_dict_or_env(values, 'access_token',
'GITHUB_PERSONAL_ACCESS_TOKEN')
return values
|
Validate that access token exists in environment.
|
_call
|
"""Simpler interface."""
|
@abstractmethod
def _call(self, messages: List[BaseMessage], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Simpler interface."""
|
Simpler interface.
|
_generate
|
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop,
run_manager=run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
| null |
load
|
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f'Fetching documents from {url}')
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f'Fetching documents from {self.web_path}')
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
|
def load(self) ->List[Document]:
"""Load data into Document objects.
Returns:
List of Documents.
"""
if self.load_all_recursively:
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = self.base_url + path
print(f'Fetching documents from {url}')
soup_info = self._scrape(url)
with contextlib.suppress(ValueError):
documents.extend(self._get_documents(soup_info))
return documents
else:
print(f'Fetching documents from {self.web_path}')
soup_info = self.scrape()
self.folder_path = self._get_folder_path(soup_info)
return self._get_documents(soup_info)
|
Load data into Document objects.
Returns:
List of Documents.
|
test_unstructured_org_mode_loader
|
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'README.org')
loader = UnstructuredOrgModeLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
def test_unstructured_org_mode_loader() ->None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'README.org')
loader = UnstructuredOrgModeLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
Test unstructured loader.
|
embed_documents
|
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts)
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._embed(texts)
|
Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
default_databricks_vector_search
|
return DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL,
text_column=DEFAULT_TEXT_COLUMN)
|
def default_databricks_vector_search(index: MagicMock
) ->DatabricksVectorSearch:
return DatabricksVectorSearch(index, embedding=DEFAULT_EMBEDDING_MODEL,
text_column=DEFAULT_TEXT_COLUMN)
| null |
refresh_schema
|
"""Refreshes the graph schema information."""
pass
|
def refresh_schema(self) ->None:
"""Refreshes the graph schema information."""
pass
|
Refreshes the graph schema information.
|
format_response_payload
|
"""Formats response"""
return json.loads(output)['output']
|
def format_response_payload(self, output: bytes) ->str:
"""Formats response"""
return json.loads(output)['output']
|
Formats response
|
_search
|
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f'Error in search request: {response}')
return json.loads(response.text)['value']
|
def _search(self, query: str) ->List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f'Error in search request: {response}')
return json.loads(response.text)['value']
| null |
_create_connection_alias
|
"""Create the connection to the Milvus server."""
from pymilvus import MilvusException, connections
host: str = connection_args.get('host', None)
port: Union[str, int] = connection_args.get('port', None)
address: str = connection_args.get('address', None)
uri: str = connection_args.get('uri', None)
user = connection_args.get('user', None)
if host is not None and port is not None:
given_address = str(host) + ':' + str(port)
elif uri is not None:
given_address = uri.split('https://')[1]
elif address is not None:
given_address = address
else:
given_address = None
logger.debug('Missing standard address type for reuse attempt')
if user is not None:
tmp_user = user
else:
tmp_user = ''
if given_address is not None:
for con in connections.list_connections():
addr = connections.get_connection_addr(con[0])
if con[1] and 'address' in addr and addr['address'
] == given_address and 'user' in addr and addr['user'] == tmp_user:
logger.debug('Using previous connection: %s', con[0])
return con[0]
alias = uuid4().hex
try:
connections.connect(alias=alias, **connection_args)
logger.debug('Created new connection using: %s', alias)
return alias
except MilvusException as e:
logger.error('Failed to create new connection using: %s', alias)
raise e
|
def _create_connection_alias(self, connection_args: dict) ->str:
"""Create the connection to the Milvus server."""
from pymilvus import MilvusException, connections
host: str = connection_args.get('host', None)
port: Union[str, int] = connection_args.get('port', None)
address: str = connection_args.get('address', None)
uri: str = connection_args.get('uri', None)
user = connection_args.get('user', None)
if host is not None and port is not None:
given_address = str(host) + ':' + str(port)
elif uri is not None:
given_address = uri.split('https://')[1]
elif address is not None:
given_address = address
else:
given_address = None
logger.debug('Missing standard address type for reuse attempt')
if user is not None:
tmp_user = user
else:
tmp_user = ''
if given_address is not None:
for con in connections.list_connections():
addr = connections.get_connection_addr(con[0])
if con[1] and 'address' in addr and addr['address'
] == given_address and 'user' in addr and addr['user'
] == tmp_user:
logger.debug('Using previous connection: %s', con[0])
return con[0]
alias = uuid4().hex
try:
connections.connect(alias=alias, **connection_args)
logger.debug('Created new connection using: %s', alias)
return alias
except MilvusException as e:
logger.error('Failed to create new connection using: %s', alias)
raise e
|
Create the connection to the Milvus server.
|
__ror__
|
if isinstance(other, RunnableSequence):
return RunnableSequence(other.first, *other.middle, other.last, self.
first, *self.middle, self.last, name=other.name or self.name)
else:
return RunnableSequence(coerce_to_runnable(other), self.first, *self.
middle, self.last, name=self.name)
|
def __ror__(self, other: Union[Runnable[Other, Any], Callable[[Other], Any],
Callable[[Iterator[Other]], Iterator[Any]], Mapping[str, Union[Runnable
[Other, Any], Callable[[Other], Any], Any]]]) ->RunnableSerializable[
Other, Output]:
if isinstance(other, RunnableSequence):
return RunnableSequence(other.first, *other.middle, other.last,
self.first, *self.middle, self.last, name=other.name or self.name)
else:
return RunnableSequence(coerce_to_runnable(other), self.first, *
self.middle, self.last, name=self.name)
| null |
embed_with_retry
|
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
response = requests.post(**kwargs)
return _check_response(response.json())
return _embed_with_retry(**kwargs)
|
def embed_with_retry(embeddings: VoyageEmbeddings, **kwargs: Any) ->Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
response = requests.post(**kwargs)
return _check_response(response.json())
return _embed_with_retry(**kwargs)
|
Use tenacity to retry the embedding call.
|
test_self_hosted_huggingface_pipeline_summarization
|
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(model_id='facebook/bart-large-cnn', task=
'summarization', hardware=gpu, model_reqs=model_reqs)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_self_hosted_huggingface_pipeline_summarization() ->None:
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(model_id='facebook/bart-large-cnn', task
='summarization', hardware=gpu, model_reqs=model_reqs)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to self-hosted HuggingFace summarization model.
|
_invoke
|
for attempt in self._sync_retrying(reraise=True):
with attempt:
result = super().invoke(input, self._patch_config(config,
run_manager, attempt.retry_state), **kwargs)
if attempt.retry_state.outcome and not attempt.retry_state.outcome.failed:
attempt.retry_state.set_result(result)
return result
|
def _invoke(self, input: Input, run_manager: 'CallbackManagerForChainRun',
config: RunnableConfig, **kwargs: Any) ->Output:
for attempt in self._sync_retrying(reraise=True):
with attempt:
result = super().invoke(input, self._patch_config(config,
run_manager, attempt.retry_state), **kwargs)
if (attempt.retry_state.outcome and not attempt.retry_state.outcome
.failed):
attempt.retry_state.set_result(result)
return result
| null |
_import_awadb
|
from langchain_community.vectorstores.awadb import AwaDB
return AwaDB
|
def _import_awadb() ->Any:
from langchain_community.vectorstores.awadb import AwaDB
return AwaDB
| null |
get_tools
|
docs = retriever.get_relevant_documents(query)
return [ALL_TOOLS[d.metadata['index']] for d in docs]
|
def get_tools(query: str) ->List[Tool]:
docs = retriever.get_relevant_documents(query)
return [ALL_TOOLS[d.metadata['index']] for d in docs]
| null |
test_chat_google_genai_system_message
|
model = ChatGoogleGenerativeAI(model=_MODEL,
convert_system_message_to_human=True)
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
system_message = SystemMessage(content=
"You're supposed to answer math questions.")
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([system_message, message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_google_genai_system_message() ->None:
model = ChatGoogleGenerativeAI(model=_MODEL,
convert_system_message_to_human=True)
text_question1, text_answer1 = 'How much is 2+2?', '4'
text_question2 = 'How much is 3+3?'
system_message = SystemMessage(content=
"You're supposed to answer math questions.")
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([system_message, message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
_sanitize_title
|
sanitized_title = re.sub('\\s', ' ', title)
sanitized_title = re.sub('(?u)[^- \\w.]', '', sanitized_title)
if len(sanitized_title) > _MAXIMUM_TITLE_LENGTH:
sanitized_title = sanitized_title[:_MAXIMUM_TITLE_LENGTH]
return sanitized_title
|
@staticmethod
def _sanitize_title(title: str) ->str:
sanitized_title = re.sub('\\s', ' ', title)
sanitized_title = re.sub('(?u)[^- \\w.]', '', sanitized_title)
if len(sanitized_title) > _MAXIMUM_TITLE_LENGTH:
sanitized_title = sanitized_title[:_MAXIMUM_TITLE_LENGTH]
return sanitized_title
| null |
embed_documents
|
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from clarifai.client.input import Inputs
from clarifai.client.model import Model
except ImportError:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
)
if self.pat is not None:
pat = self.pat
if self.model_url is not None:
_model_init = Model(url=self.model_url, pat=pat)
else:
_model_init = Model(model_id=self.model_id, user_id=self.user_id,
app_id=self.app_id, pat=pat)
input_obj = Inputs(pat=pat)
batch_size = 32
embeddings = []
try:
for i in range(0, len(texts), batch_size):
batch = texts[i:i + batch_size]
input_batch = [input_obj.get_text_input(input_id=str(id), raw_text=
inp) for id, inp in enumerate(batch)]
predict_response = _model_init.predict(input_batch)
embeddings.extend([list(output.data.embeddings[0].vector) for
output in predict_response.outputs])
except Exception as e:
logger.error(f'Predict failed, exception: {e}')
return embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from clarifai.client.input import Inputs
from clarifai.client.model import Model
except ImportError:
raise ImportError(
'Could not import clarifai python package. Please install it with `pip install clarifai`.'
)
if self.pat is not None:
pat = self.pat
if self.model_url is not None:
_model_init = Model(url=self.model_url, pat=pat)
else:
_model_init = Model(model_id=self.model_id, user_id=self.user_id,
app_id=self.app_id, pat=pat)
input_obj = Inputs(pat=pat)
batch_size = 32
embeddings = []
try:
for i in range(0, len(texts), batch_size):
batch = texts[i:i + batch_size]
input_batch = [input_obj.get_text_input(input_id=str(id),
raw_text=inp) for id, inp in enumerate(batch)]
predict_response = _model_init.predict(input_batch)
embeddings.extend([list(output.data.embeddings[0].vector) for
output in predict_response.outputs])
except Exception as e:
logger.error(f'Predict failed, exception: {e}')
return embeddings
|
Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
save
|
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
if self.memory is not None:
raise ValueError('Saving of memory is not yet supported.')
chain_dict = self.dict()
if '_type' not in chain_dict:
raise NotImplementedError(f'Chain {self} does not support saving.')
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == '.json':
with open(file_path, 'w') as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == '.yaml':
with open(file_path, 'w') as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f'{save_path} must be json or yaml')
|
def save(self, file_path: Union[Path, str]) ->None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
if self.memory is not None:
raise ValueError('Saving of memory is not yet supported.')
chain_dict = self.dict()
if '_type' not in chain_dict:
raise NotImplementedError(f'Chain {self} does not support saving.')
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == '.json':
with open(file_path, 'w') as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == '.yaml':
with open(file_path, 'w') as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f'{save_path} must be json or yaml')
|
Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
|
on_agent_finish
|
"""Run when agent ends running."""
aim = import_aim()
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = {'action': 'on_agent_finish'}
resp.update(self.get_custom_callback_meta())
finish_res = deepcopy(finish)
text = """OUTPUT:
{}
LOG:
{}""".format(finish_res.return_values['output'],
finish_res.log)
self._run.track(aim.Text(text), name='on_agent_finish', context=resp)
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
"""Run when agent ends running."""
aim = import_aim()
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = {'action': 'on_agent_finish'}
resp.update(self.get_custom_callback_meta())
finish_res = deepcopy(finish)
text = 'OUTPUT:\n{}\n\nLOG:\n{}'.format(finish_res.return_values[
'output'], finish_res.log)
self._run.track(aim.Text(text), name='on_agent_finish', context=resp)
|
Run when agent ends running.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
fields: Other fields to get from elasticsearch source. These fields
will be added to the document metadata.
Returns:
List[Document]: A list of Documents selected by maximal marginal relevance.
"""
if self.embedding is None:
raise ValueError('You must provide an embedding function to perform MMR')
remove_vector_query_field_from_metadata = True
if fields is None:
fields = [self.vector_query_field]
elif self.vector_query_field not in fields:
fields.append(self.vector_query_field)
else:
remove_vector_query_field_from_metadata = False
query_embedding = self.embedding.embed_query(query)
got_docs = self._search(query_vector=query_embedding, k=fetch_k, fields=
fields, **kwargs)
got_embeddings = [doc.metadata[self.vector_query_field] for doc, _ in got_docs]
selected_indices = maximal_marginal_relevance(np.array(query_embedding),
got_embeddings, lambda_mult=lambda_mult, k=k)
selected_docs = [got_docs[i][0] for i in selected_indices]
if remove_vector_query_field_from_metadata:
for doc in selected_docs:
del doc.metadata[self.vector_query_field]
return selected_docs
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, fields: Optional[List[str]]=None, **kwargs: Any
) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
fields: Other fields to get from elasticsearch source. These fields
will be added to the document metadata.
Returns:
List[Document]: A list of Documents selected by maximal marginal relevance.
"""
if self.embedding is None:
raise ValueError(
'You must provide an embedding function to perform MMR')
remove_vector_query_field_from_metadata = True
if fields is None:
fields = [self.vector_query_field]
elif self.vector_query_field not in fields:
fields.append(self.vector_query_field)
else:
remove_vector_query_field_from_metadata = False
query_embedding = self.embedding.embed_query(query)
got_docs = self._search(query_vector=query_embedding, k=fetch_k, fields
=fields, **kwargs)
got_embeddings = [doc.metadata[self.vector_query_field] for doc, _ in
got_docs]
selected_indices = maximal_marginal_relevance(np.array(query_embedding),
got_embeddings, lambda_mult=lambda_mult, k=k)
selected_docs = [got_docs[i][0] for i in selected_indices]
if remove_vector_query_field_from_metadata:
for doc in selected_docs:
del doc.metadata[self.vector_query_field]
return selected_docs
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
fields: Other fields to get from elasticsearch source. These fields
will be added to the document metadata.
Returns:
List[Document]: A list of Documents selected by maximal marginal relevance.
|
test_loading_few_shot_prompt_when_examples_in_config
|
"""Test loading few shot prompt when the examples are in the config."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('few_shot_prompt_examples_in.json')
expected_prompt = FewShotPromptTemplate(input_variables=['adjective'],
prefix='Write antonyms for the following words.', example_prompt=
PromptTemplate(input_variables=['input', 'output'], template=
"""Input: {input}
Output: {output}"""), examples=[{'input': 'happy',
'output': 'sad'}, {'input': 'tall', 'output': 'short'}], suffix=
"""Input: {adjective}
Output:""")
assert prompt == expected_prompt
|
def test_loading_few_shot_prompt_when_examples_in_config() ->None:
"""Test loading few shot prompt when the examples are in the config."""
with change_directory(EXAMPLE_DIR):
prompt = load_prompt('few_shot_prompt_examples_in.json')
expected_prompt = FewShotPromptTemplate(input_variables=[
'adjective'], prefix='Write antonyms for the following words.',
example_prompt=PromptTemplate(input_variables=['input',
'output'], template="""Input: {input}
Output: {output}"""),
examples=[{'input': 'happy', 'output': 'sad'}, {'input': 'tall',
'output': 'short'}], suffix="""Input: {adjective}
Output:""")
assert prompt == expected_prompt
|
Test loading few shot prompt when the examples are in the config.
|
_get_twilio
|
return Tool(name='Text-Message', description=
'Useful for when you need to send a text message to a provided phone number.'
, func=TwilioAPIWrapper(**kwargs).run)
|
def _get_twilio(**kwargs: Any) ->BaseTool:
return Tool(name='Text-Message', description=
'Useful for when you need to send a text message to a provided phone number.'
, func=TwilioAPIWrapper(**kwargs).run)
| null |
_google_search_results
|
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get('items', [])
|
def _google_search_results(self, search_term: str, **kwargs: Any) ->List[dict]:
cse = self.search_engine.cse()
if self.siterestrict:
cse = cse.siterestrict()
res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()
return res.get('items', [])
| null |
get
|
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {'ids': ids, 'where': where, 'limit': limit, 'offset': offset,
'where_document': where_document}
if include is not None:
kwargs['include'] = include
return self._collection.get(**kwargs)
|
def get(self, ids: Optional[OneOrMany[ID]]=None, where: Optional[Where]=
None, limit: Optional[int]=None, offset: Optional[int]=None,
where_document: Optional[WhereDocument]=None, include: Optional[List[
str]]=None) ->Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {'ids': ids, 'where': where, 'limit': limit, 'offset': offset,
'where_document': where_document}
if include is not None:
kwargs['include'] = include
return self._collection.get(**kwargs)
|
Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
|
get_num_tokens
|
"""Return number of tokens."""
return len(text.split())
|
def get_num_tokens(self, text: str) ->int:
"""Return number of tokens."""
return len(text.split())
|
Return number of tokens.
|
clear
|
"""Clear session memory from Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND nodes(p) as node DETACH DELETE node;'
)
self._driver.execute_query(query, {'session_id': self._session_id}).summary
|
def clear(self) ->None:
"""Clear session memory from Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND nodes(p) as node DETACH DELETE node;'
)
self._driver.execute_query(query, {'session_id': self._session_id}).summary
|
Clear session memory from Neo4j
|
__init__
|
super().__init__()
self.operator = operator or eq
|
def __init__(self, operator: Optional[Callable]=None, **kwargs: Any) ->None:
super().__init__()
self.operator = operator or eq
| null |
clear
|
"""Empty the collection of all its stored entries."""
self._drop_collection()
self._provision_collection()
return None
|
def clear(self) ->None:
"""Empty the collection of all its stored entries."""
self._drop_collection()
self._provision_collection()
return None
|
Empty the collection of all its stored entries.
|
create_data_generation_chain
|
"""Creates a chain that generates synthetic sentences with
provided fields.
Args:
llm: The language model to use.
prompt: Prompt to feed the language model with.
If not provided, the default one will be used.
"""
prompt = prompt or SENTENCE_PROMPT
return LLMChain(llm=llm, prompt=prompt)
|
def create_data_generation_chain(llm: BaseLanguageModel, prompt: Optional[
PromptTemplate]=None) ->Chain:
"""Creates a chain that generates synthetic sentences with
provided fields.
Args:
llm: The language model to use.
prompt: Prompt to feed the language model with.
If not provided, the default one will be used.
"""
prompt = prompt or SENTENCE_PROMPT
return LLMChain(llm=llm, prompt=prompt)
|
Creates a chain that generates synthetic sentences with
provided fields.
Args:
llm: The language model to use.
prompt: Prompt to feed the language model with.
If not provided, the default one will be used.
|
run
|
"""Search Reddit and return posts as a single string."""
results: List[Dict] = self.results(query=query, sort=sort, time_filter=
time_filter, subreddit=subreddit, limit=limit)
if len(results) > 0:
output: List[str] = [f'Searching r/{subreddit} found {len(results)} posts:'
]
for r in results:
category = 'N/A' if r['post_category'] is None else r['post_category']
p = f"""Post Title: '{r['post_title']}'
User: {r['post_author']}
Subreddit: {r['post_subreddit']}:
Text body: {r['post_text']}
Post URL: {r['post_url']}
Post Category: {category}.
Score: {r['post_score']}
"""
output.append(p)
return '\n'.join(output)
else:
return f'Searching r/{subreddit} did not find any posts:'
|
def run(self, query: str, sort: str, time_filter: str, subreddit: str,
limit: int) ->str:
"""Search Reddit and return posts as a single string."""
results: List[Dict] = self.results(query=query, sort=sort, time_filter=
time_filter, subreddit=subreddit, limit=limit)
if len(results) > 0:
output: List[str] = [
f'Searching r/{subreddit} found {len(results)} posts:']
for r in results:
category = 'N/A' if r['post_category'] is None else r[
'post_category']
p = f"""Post Title: '{r['post_title']}'
User: {r['post_author']}
Subreddit: {r['post_subreddit']}:
Text body: {r['post_text']}
Post URL: {r['post_url']}
Post Category: {category}.
Score: {r['post_score']}
"""
output.append(p)
return '\n'.join(output)
else:
return f'Searching r/{subreddit} did not find any posts:'
|
Search Reddit and return posts as a single string.
|
_llm_type
|
"""Return type of llm."""
return 'fake_list'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake_list'
|
Return type of llm.
|
test_confluence_loader_load_data_by_space_id
|
mock_confluence.get_all_pages_from_space.return_value = [self.
_get_mock_page('123'), self._get_mock_page('456')]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self._get_mock_page_restrictions('456')
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
|
def test_confluence_loader_load_data_by_space_id(self, mock_confluence:
MagicMock) ->None:
mock_confluence.get_all_pages_from_space.return_value = [self.
_get_mock_page('123'), self._get_mock_page('456')]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self.
_get_mock_page_restrictions('456')]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY,
max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
| null |
append
|
"""Append message to the end of the chat template.
Args:
message: representation of a message to append.
"""
self.messages.append(_convert_to_message(message))
|
def append(self, message: MessageLikeRepresentation) ->None:
"""Append message to the end of the chat template.
Args:
message: representation of a message to append.
"""
self.messages.append(_convert_to_message(message))
|
Append message to the end of the chat template.
Args:
message: representation of a message to append.
|
_import_eleven_labs_text2speech
|
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
return ElevenLabsText2SpeechTool
|
def _import_eleven_labs_text2speech() ->Any:
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
return ElevenLabsText2SpeechTool
| null |
similarity_search_with_score
|
"""Perform a search on a query string and return results with score."""
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
param=param, expr=expr, timeout=timeout, **kwargs)
return res
|
def similarity_search_with_score(self, query: str, k: int=4, param:
Optional[dict]=None, expr: Optional[str]=None, timeout: Optional[int]=
None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, param=param, expr=expr, timeout=timeout, **kwargs)
return res
|
Perform a search on a query string and return results with score.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'pipeline']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'pipeline']
|
Get the namespace of the langchain object.
|
_get_tools_requests_patch
|
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
|
def _get_tools_requests_patch() ->BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
| null |
test_delete_dataset_by_ids
|
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()['value'][0]
deeplake_datastore.delete(ids=[id])
assert deeplake_datastore.similarity_search('foo', k=1, filter={'metadata':
{'page': '0'}}) == []
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
|
def test_delete_dataset_by_ids(deeplake_datastore: DeepLake) ->None:
"""Test delete dataset."""
id = deeplake_datastore.vectorstore.dataset.id.data()['value'][0]
deeplake_datastore.delete(ids=[id])
assert deeplake_datastore.similarity_search('foo', k=1, filter={
'metadata': {'page': '0'}}) == []
assert len(deeplake_datastore.vectorstore) == 2
deeplake_datastore.delete_dataset()
|
Test delete dataset.
|
requires_reference
|
return False
|
@property
def requires_reference(self) ->bool:
return False
| null |
get_model_list
|
"""Get a list of models loaded in the triton server."""
res = self.client.get_model_repository_index(as_json=True)
return [model['name'] for model in res['models']]
|
def get_model_list(self) ->List[str]:
"""Get a list of models loaded in the triton server."""
res = self.client.get_model_repository_index(as_json=True)
return [model['name'] for model in res['models']]
|
Get a list of models loaded in the triton server.
|
truncate_word
|
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[:length - len(suffix)].rsplit(' ', 1)[0] + suffix
|
def truncate_word(content: Any, *, length: int, suffix: str='...') ->str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[:length - len(suffix)].rsplit(' ', 1)[0] + suffix
|
Truncate a string to a certain number of words, based on the max string
length.
|
clear
|
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
|
def clear(self, **kwargs: Any) ->None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
|
Clear cache.
|
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.question_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
|
Expect input key.
:meta private:
|
parse_result
|
if len(result) != 1:
raise OutputParserException(
f'Expected exactly one result, but got {len(result)}')
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
function_call = message.additional_kwargs['function_call']
except KeyError as exc:
if partial:
return None
else:
raise OutputParserException(f'Could not parse function call: {exc}')
try:
if partial:
try:
if self.args_only:
return parse_partial_json(function_call['arguments'],
strict=self.strict)
else:
return {**function_call, 'arguments': parse_partial_json(
function_call['arguments'], strict=self.strict)}
except json.JSONDecodeError:
return None
elif self.args_only:
try:
return json.loads(function_call['arguments'], strict=self.strict)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
else:
try:
return {**function_call, 'arguments': json.loads(function_call[
'arguments'], strict=self.strict)}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
except KeyError:
return None
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
if len(result) != 1:
raise OutputParserException(
f'Expected exactly one result, but got {len(result)}')
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
function_call = message.additional_kwargs['function_call']
except KeyError as exc:
if partial:
return None
else:
raise OutputParserException(f'Could not parse function call: {exc}'
)
try:
if partial:
try:
if self.args_only:
return parse_partial_json(function_call['arguments'],
strict=self.strict)
else:
return {**function_call, 'arguments':
parse_partial_json(function_call['arguments'],
strict=self.strict)}
except json.JSONDecodeError:
return None
elif self.args_only:
try:
return json.loads(function_call['arguments'], strict=self.
strict)
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
else:
try:
return {**function_call, 'arguments': json.loads(
function_call['arguments'], strict=self.strict)}
except (json.JSONDecodeError, TypeError) as exc:
raise OutputParserException(
f'Could not parse function call data: {exc}')
except KeyError:
return None
| null |
_import_lancedb
|
from langchain_community.vectorstores.lancedb import LanceDB
return LanceDB
|
def _import_lancedb() ->Any:
from langchain_community.vectorstores.lancedb import LanceDB
return LanceDB
| null |
predict
|
...
|
@abstractmethod
def predict(self, event: TEvent) ->Any:
...
| null |
test_xml_output_parser
|
"""Test XMLOutputParser."""
xml_parser = XMLOutputParser()
xml_result = xml_parser.parse_folder(result)
assert DEF_RESULT_EXPECTED == xml_result
assert list(xml_parser.transform(iter(result))) == [{'foo': [{'bar': [{
'baz': None}]}]}, {'foo': [{'bar': [{'baz': 'slim.shady'}]}]}, {'foo':
[{'baz': 'tag'}]}]
|
@pytest.mark.parametrize('result', [DEF_RESULT_ENCODING,
DEF_RESULT_ENCODING[DEF_RESULT_ENCODING.find('\n'):],
f"""
```xml
{DEF_RESULT_ENCODING}
```
""",
f"""
Some random text
```xml
{DEF_RESULT_ENCODING}
```
More random text
"""
])
def test_xml_output_parser(result: str) ->None:
"""Test XMLOutputParser."""
xml_parser = XMLOutputParser()
xml_result = xml_parser.parse_folder(result)
assert DEF_RESULT_EXPECTED == xml_result
assert list(xml_parser.transform(iter(result))) == [{'foo': [{'bar': [{
'baz': None}]}]}, {'foo': [{'bar': [{'baz': 'slim.shady'}]}]}, {
'foo': [{'baz': 'tag'}]}]
|
Test XMLOutputParser.
|
test_api_key_masked_when_passed_via_constructor
|
llm = Predibase(predibase_api_key='secret-api-key')
print(llm.predibase_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = Predibase(predibase_api_key='secret-api-key')
print(llm.predibase_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
test_multiple_messages
|
"""Tests multiple messages works."""
chat = ChatZhipuAI()
message = HumanMessage(content='Hi, how are you.')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
def test_multiple_messages() ->None:
"""Tests multiple messages works."""
chat = ChatZhipuAI()
message = HumanMessage(content='Hi, how are you.')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
Tests multiple messages works.
|
_get_human_tool
|
return HumanInputRun(**kwargs)
|
def _get_human_tool(**kwargs: Any) ->BaseTool:
return HumanInputRun(**kwargs)
| null |
_run
|
"""Use the tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
Use the tool.
|
test_clear_messages
|
file_chat_message_history.add_user_message('Hello!')
file_chat_message_history.add_ai_message('Hi there!')
file_chat_message_history.clear()
messages = file_chat_message_history.messages
assert len(messages) == 0
|
def test_clear_messages(file_chat_message_history: FileChatMessageHistory
) ->None:
file_chat_message_history.add_user_message('Hello!')
file_chat_message_history.add_ai_message('Hi there!')
file_chat_message_history.clear()
messages = file_chat_message_history.messages
assert len(messages) == 0
| null |
invoke
|
key = input['key']
actual_input = input['input']
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
return runnable.invoke(actual_input, config)
|
def invoke(self, input: RouterInput, config: Optional[RunnableConfig]=None
) ->Output:
key = input['key']
actual_input = input['input']
if key not in self.runnables:
raise ValueError(f"No runnable associated with key '{key}'")
runnable = self.runnables[key]
return runnable.invoke(actual_input, config)
| null |
is_lc_serializable
|
"""Return whether this class is serializable."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this class is serializable."""
return True
|
Return whether this class is serializable.
|
_import_render
|
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
return format_tool_to_openai_function
|
def _import_render() ->Any:
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
return format_tool_to_openai_function
| null |
similarity_search_with_score
|
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of (Document, similarity)
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k)
return documents
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of (Document, similarity)
"""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k)
return documents
|
Perform a similarity search with Yellowbrick
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document]: List of (Document, similarity)
|
__init__
|
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param key: The key of the S3 object.
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
super().__init__()
self.bucket = bucket
self.key = key
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
|
def __init__(self, bucket: str, key: str, *, region_name: Optional[str]=
None, api_version: Optional[str]=None, use_ssl: Optional[bool]=True,
verify: Union[str, bool, None]=None, endpoint_url: Optional[str]=None,
aws_access_key_id: Optional[str]=None, aws_secret_access_key: Optional[
str]=None, aws_session_token: Optional[str]=None, boto_config: Optional
[botocore.client.Config]=None):
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param key: The key of the S3 object.
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
super().__init__()
self.bucket = bucket
self.key = key
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
|
Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param key: The key of the S3 object.
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
|
load_prompt
|
"""Unified method for loading a prompt from LangChainHub or local fs."""
if (hub_result := try_load_from_hub(path, _load_prompt_from_file, 'prompts',
{'py', 'json', 'yaml'})):
return hub_result
else:
return _load_prompt_from_file(path)
|
def load_prompt(path: Union[str, Path]) ->BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if (hub_result := try_load_from_hub(path, _load_prompt_from_file,
'prompts', {'py', 'json', 'yaml'})):
return hub_result
else:
return _load_prompt_from_file(path)
|
Unified method for loading a prompt from LangChainHub or local fs.
|
structured_tool_input
|
"""Return the arguments directly."""
return f'{arg1}, {arg2}, {opt_arg}'
|
@tool(infer_schema=False)
def structured_tool_input(arg1: int, arg2: Union[float, datetime], opt_arg:
Optional[dict]=None) ->str:
"""Return the arguments directly."""
return f'{arg1}, {arg2}, {opt_arg}'
|
Return the arguments directly.
|
on_agent_action
|
if self.__has_valid_config is False:
return
try:
name = action.tool
input = _parse_input(action.tool_input)
self.__track_event('tool', 'start', run_id=str(run_id), parent_run_id=
str(parent_run_id) if parent_run_id else None, name=name, input=
input, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_agent_action: {e}')
|
def on_agent_action(self, action: AgentAction, *, run_id: UUID,
parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
name = action.tool
input = _parse_input(action.tool_input)
self.__track_event('tool', 'start', run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
name=name, input=input, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_agent_action: {e}')
| null |
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
expected = {'$and': [{'foo': {'$lt': 2}}, {'bar': {'$eq': 'baz'}}]}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
expected = {'$and': [{'foo': {'$lt': 2}}, {'bar': {'$eq': 'baz'}}]}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
_parse_string_eval_output
|
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
reasoning = text.strip()
parsed_scores = _get_score(reasoning)
if parsed_scores is None:
value, score = None, None
else:
value, score = parsed_scores
return {'reasoning': reasoning, 'value': value, 'score': score}
|
def _parse_string_eval_output(text: str) ->dict:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
reasoning = text.strip()
parsed_scores = _get_score(reasoning)
if parsed_scores is None:
value, score = None, None
else:
value, score = parsed_scores
return {'reasoning': reasoning, 'value': value, 'score': score}
|
Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
|
_create_chat_result
|
generations = []
for res in response['choices']:
message = convert_dict_to_message(res['message'])
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.get('finish_reason')))
generations.append(gen)
token_usage = response.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model}
return ChatResult(generations=generations, llm_output=llm_output)
|
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult:
generations = []
for res in response['choices']:
message = convert_dict_to_message(res['message'])
gen = ChatGeneration(message=message, generation_info=dict(
finish_reason=res.get('finish_reason')))
generations.append(gen)
token_usage = response.get('usage', {})
llm_output = {'token_usage': token_usage, 'model_name': self.model}
return ChatResult(generations=generations, llm_output=llm_output)
| null |
sync_call_fallback
|
"""
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
"""
@functools.wraps(method)
async def wrapper(self: Any, *args: Any, **kwargs: Any) ->Any:
try:
return await method(self, *args, **kwargs)
except NotImplementedError:
return await run_in_executor(None, getattr(self, method.__name__[1:
]), *args, **kwargs)
return wrapper
|
def sync_call_fallback(method: Callable) ->Callable:
"""
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
"""
@functools.wraps(method)
async def wrapper(self: Any, *args: Any, **kwargs: Any) ->Any:
try:
return await method(self, *args, **kwargs)
except NotImplementedError:
return await run_in_executor(None, getattr(self, method.
__name__[1:]), *args, **kwargs)
return wrapper
|
Decorator to call the synchronous method of the class if the async method is not
implemented. This decorator might be only used for the methods that are defined
as async in the class.
|
from_embeddings
|
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = zip(texts, text_embeddings)
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=
ids, **kwargs)
|
@classmethod
def from_embeddings(cls, text_embeddings: Iterable[Tuple[str, List[float]]],
embedding: Embeddings, metadatas: Optional[Iterable[dict]]=None, ids:
Optional[List[str]]=None, **kwargs: Any) ->FAISS:
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = zip(texts, text_embeddings)
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(texts, embeddings, embedding, metadatas=metadatas,
ids=ids, **kwargs)
|
Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = zip(texts, text_embeddings)
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings)
|
validate_variable_names
|
"""Validate variable names do not include restricted names."""
if 'stop' in values['input_variables']:
raise ValueError(
"Cannot have an input variable named 'stop', as it is used internally, please rename."
)
if 'stop' in values['partial_variables']:
raise ValueError(
"Cannot have an partial variable named 'stop', as it is used internally, please rename."
)
overall = set(values['input_variables']).intersection(values[
'partial_variables'])
if overall:
raise ValueError(
f'Found overlapping input and partial variables: {overall}')
return values
|
@root_validator()
def validate_variable_names(cls, values: Dict) ->Dict:
"""Validate variable names do not include restricted names."""
if 'stop' in values['input_variables']:
raise ValueError(
"Cannot have an input variable named 'stop', as it is used internally, please rename."
)
if 'stop' in values['partial_variables']:
raise ValueError(
"Cannot have an partial variable named 'stop', as it is used internally, please rename."
)
overall = set(values['input_variables']).intersection(values[
'partial_variables'])
if overall:
raise ValueError(
f'Found overlapping input and partial variables: {overall}')
return values
|
Validate variable names do not include restricted names.
|
test_hologres_with_filter_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name='test_table_filter',
embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas,
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_hologres_with_filter_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name=
'test_table_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '0'})
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
create_index
|
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
|
def create_index(contexts: List[str], embeddings: Embeddings) ->np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
|
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
|
on_retriever_end
|
self.on_retriever_end_common()
|
def on_retriever_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_retriever_end_common()
| null |
test_singlestoredb_filter_metadata_5
|
"""Test complex metadata path"""
table_name = 'test_singlestoredb_filter_metadata_5'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget',
'subfield': {'subfield': {'idx': i, 'other_idx': i + 1}}}) for i, t in
enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'subfield': {'subfield': {'idx': 2}}})
assert output == [Document(page_content='baz', metadata={'index': 2,
'category': 'budget', 'subfield': {'subfield': {'idx': 2, 'other_idx':
3}}})]
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_filter_metadata_5(texts: List[str]) ->None:
"""Test complex metadata path"""
table_name = 'test_singlestoredb_filter_metadata_5'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category':
'budget', 'subfield': {'subfield': {'idx': i, 'other_idx': i + 1}}}
) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'subfield': {'subfield': {'idx': 2}}})
assert output == [Document(page_content='baz', metadata={'index': 2,
'category': 'budget', 'subfield': {'subfield': {'idx': 2,
'other_idx': 3}}})]
drop(table_name)
|
Test complex metadata path
|
_make_session
|
"""Create a session and close it after use."""
if isinstance(self.session_factory, async_sessionmaker):
raise AssertionError('This method is not supported for async engines.')
session = self.session_factory()
try:
yield session
finally:
session.close()
|
@contextlib.contextmanager
def _make_session(self) ->Generator[Session, None, None]:
"""Create a session and close it after use."""
if isinstance(self.session_factory, async_sessionmaker):
raise AssertionError('This method is not supported for async engines.')
session = self.session_factory()
try:
yield session
finally:
session.close()
|
Create a session and close it after use.
|
main
|
"""Generate the api_reference.rst file for each package."""
for dir in os.listdir(ROOT_DIR / 'libs'):
if dir in ('cli', 'partners'):
continue
else:
_build_rst_file(package_name=dir)
for dir in os.listdir(ROOT_DIR / 'libs' / 'partners'):
_build_rst_file(package_name=dir)
|
def main() ->None:
"""Generate the api_reference.rst file for each package."""
for dir in os.listdir(ROOT_DIR / 'libs'):
if dir in ('cli', 'partners'):
continue
else:
_build_rst_file(package_name=dir)
for dir in os.listdir(ROOT_DIR / 'libs' / 'partners'):
_build_rst_file(package_name=dir)
|
Generate the api_reference.rst file for each package.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.