method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
mdelete
|
"""Delete the given keys and their associated values.
Args:
keys (Sequence[str]): A sequence of keys to delete.
"""
for key in keys:
if key in self.store:
del self.store[key]
|
def mdelete(self, keys: Sequence[str]) ->None:
"""Delete the given keys and their associated values.
Args:
keys (Sequence[str]): A sequence of keys to delete.
"""
for key in keys:
if key in self.store:
del self.store[key]
|
Delete the given keys and their associated values.
Args:
keys (Sequence[str]): A sequence of keys to delete.
|
test_huggingface_embedding_documents
|
"""Test huggingface embeddings."""
documents = ['foo bar']
embedding = HuggingFaceEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
def test_huggingface_embedding_documents() ->None:
"""Test huggingface embeddings."""
documents = ['foo bar']
embedding = HuggingFaceEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
Test huggingface embeddings.
|
test_trajectory_eval_chain
|
llm = _FakeTrajectoryChatModel(queries={'a': """Trajectory good
Score: 5""",
'b': """Trajectory not good
Score: 1"""}, sequential_responses=True)
chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo])
res = chain.evaluate_agent_trajectory(input='What is your favorite food?',
agent_trajectory=intermediate_steps, prediction='I like pie.')
assert res['score'] == 1.0
res = chain.evaluate_agent_trajectory(input='What is your favorite food?',
agent_trajectory=intermediate_steps, prediction='I like pie.',
reference='Paris')
assert res['score'] == 0.0
|
def test_trajectory_eval_chain(intermediate_steps: List[Tuple[AgentAction,
str]]) ->None:
llm = _FakeTrajectoryChatModel(queries={'a':
'Trajectory good\nScore: 5', 'b':
"""Trajectory not good
Score: 1"""}, sequential_responses=True)
chain = TrajectoryEvalChain.from_llm(llm=llm, agent_tools=[foo])
res = chain.evaluate_agent_trajectory(input=
'What is your favorite food?', agent_trajectory=intermediate_steps,
prediction='I like pie.')
assert res['score'] == 1.0
res = chain.evaluate_agent_trajectory(input=
'What is your favorite food?', agent_trajectory=intermediate_steps,
prediction='I like pie.', reference='Paris')
assert res['score'] == 0.0
| null |
_generate
|
"""Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonx_llm.generate(["What is a molecule"])
"""
if stop:
if self.params:
self.params.update({'stop_sequences': stop})
else:
self.params = {'stop_sequences': stop}
should_stream = stream if stream is not None else self.streaming
if should_stream:
if len(prompts) > 1:
raise ValueError(
f'WatsonxLLM currently only supports single prompt, got {prompts}')
generation = GenerationChunk(text='')
stream_iter = self._stream(prompts[0], stop=stop, run_manager=
run_manager, **kwargs)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
if isinstance(generation.generation_info, dict):
llm_output = generation.generation_info.pop('llm_output')
return LLMResult(generations=[[generation]], llm_output=llm_output)
return LLMResult(generations=[[generation]])
else:
response = self.watsonx_model.generate(prompt=prompts, params=self.params)
return self._create_llm_result(response)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, stream: Optional[
bool]=None, **kwargs: Any) ->LLMResult:
"""Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonx_llm.generate(["What is a molecule"])
"""
if stop:
if self.params:
self.params.update({'stop_sequences': stop})
else:
self.params = {'stop_sequences': stop}
should_stream = stream if stream is not None else self.streaming
if should_stream:
if len(prompts) > 1:
raise ValueError(
f'WatsonxLLM currently only supports single prompt, got {prompts}'
)
generation = GenerationChunk(text='')
stream_iter = self._stream(prompts[0], stop=stop, run_manager=
run_manager, **kwargs)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
if isinstance(generation.generation_info, dict):
llm_output = generation.generation_info.pop('llm_output')
return LLMResult(generations=[[generation]], llm_output=llm_output)
return LLMResult(generations=[[generation]])
else:
response = self.watsonx_model.generate(prompt=prompts, params=self.
params)
return self._create_llm_result(response)
|
Call the IBM watsonx.ai inference endpoint which then generate the response.
Args:
prompts: List of strings (prompts) to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Optional callback manager.
Returns:
The full LLMResult output.
Example:
.. code-block:: python
response = watsonx_llm.generate(["What is a molecule"])
|
test_missing_content_formatter
|
"""Test AzureML LLM without a content_formatter attribute"""
with pytest.raises(AttributeError):
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv('OSS_ENDPOINT_URL'),
deployment_name=os.getenv('OSS_DEPLOYMENT_NAME'))
llm('Foo')
|
def test_missing_content_formatter() ->None:
"""Test AzureML LLM without a content_formatter attribute"""
with pytest.raises(AttributeError):
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'OSS_ENDPOINT_API_KEY'), endpoint_url=os.getenv(
'OSS_ENDPOINT_URL'), deployment_name=os.getenv(
'OSS_DEPLOYMENT_NAME'))
llm('Foo')
|
Test AzureML LLM without a content_formatter attribute
|
_take_next_step
|
return self._consume_next_step([a for a in self._iter_next_step(
name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)])
|
def _take_next_step(self, name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str], inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]], run_manager:
Optional[CallbackManagerForChainRun]=None) ->Union[AgentFinish, List[
Tuple[AgentAction, str]]]:
return self._consume_next_step([a for a in self._iter_next_step(
name_to_tool_map, color_mapping, inputs, intermediate_steps,
run_manager)])
| null |
test_clickhouse
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
config = ClickhouseSettings()
config.table = 'test_clickhouse'
docsearch = Clickhouse.from_texts(texts, FakeEmbeddings(), config=config)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'_dummy': 0})]
docsearch.drop()
|
def test_clickhouse() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
config = ClickhouseSettings()
config.table = 'test_clickhouse'
docsearch = Clickhouse.from_texts(texts, FakeEmbeddings(), config=config)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'_dummy': 0})]
docsearch.drop()
|
Test end to end construction and search.
|
_import_momento_vector_index
|
from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex
|
def _import_momento_vector_index() ->Any:
from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex
| null |
_import_alibaba_cloud_open_search
|
from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearch
return AlibabaCloudOpenSearch
|
def _import_alibaba_cloud_open_search() ->Any:
from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearch
return AlibabaCloudOpenSearch
| null |
__set__
|
if instance is not None:
emit_warning()
return super().__set__(instance, value)
|
def __set__(self, instance, value):
if instance is not None:
emit_warning()
return super().__set__(instance, value)
| null |
test_pdf_pagesplitter
|
"""Test splitting with page numbers included."""
script_dir = os.path.dirname(__file__)
loader = PyPDFLoader(os.path.join(script_dir, 'examples/hello.pdf'))
docs = loader.load()
assert 'page' in docs[0].metadata
assert 'source' in docs[0].metadata
faiss_index = FAISS.from_documents(docs, OpenAIEmbeddings())
docs = faiss_index.similarity_search('Complete this sentence: Hello', k=1)
assert 'Hello world' in docs[0].page_content
|
def test_pdf_pagesplitter() ->None:
"""Test splitting with page numbers included."""
script_dir = os.path.dirname(__file__)
loader = PyPDFLoader(os.path.join(script_dir, 'examples/hello.pdf'))
docs = loader.load()
assert 'page' in docs[0].metadata
assert 'source' in docs[0].metadata
faiss_index = FAISS.from_documents(docs, OpenAIEmbeddings())
docs = faiss_index.similarity_search('Complete this sentence: Hello', k=1)
assert 'Hello world' in docs[0].page_content
|
Test splitting with page numbers included.
|
flatten_dict
|
"""Flattens a nested dictionary into a flat dictionary.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Returns:
(dict): A flat dictionary.
"""
flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
return flat_dict
|
def flatten_dict(nested_dict: Dict[str, Any], parent_key: str='', sep: str='_'
) ->Dict[str, Any]:
"""Flattens a nested dictionary into a flat dictionary.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Returns:
(dict): A flat dictionary.
"""
flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
return flat_dict
|
Flattens a nested dictionary into a flat dictionary.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Returns:
(dict): A flat dictionary.
|
_import_azure_openai
|
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
|
def _import_azure_openai() ->Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
| null |
_get_triples
|
triple_query = """
MATCH (a)-[e:`{e_label}`]->(b)
WITH a,e,b LIMIT 3000
RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to
LIMIT 10
"""
triple_template = '(:`{a}`)-[:`{e}`]->(:`{b}`)'
triple_schema = []
for label in e_labels:
q = triple_query.format(e_label=label)
data = self.query(q)
for d in data['results']:
triple = triple_template.format(a=d['from'][0], e=d['edge'], b=d[
'to'][0])
triple_schema.append(triple)
return triple_schema
|
def _get_triples(self, e_labels: List[str]) ->List[str]:
triple_query = """
MATCH (a)-[e:`{e_label}`]->(b)
WITH a,e,b LIMIT 3000
RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to
LIMIT 10
"""
triple_template = '(:`{a}`)-[:`{e}`]->(:`{b}`)'
triple_schema = []
for label in e_labels:
q = triple_query.format(e_label=label)
data = self.query(q)
for d in data['results']:
triple = triple_template.format(a=d['from'][0], e=d['edge'], b=
d['to'][0])
triple_schema.append(triple)
return triple_schema
| null |
embed_query
|
"""Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
Embed a query using GPT4All.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
get
|
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by any metadata of the document.
not_include_fields: Not pack the specified fields of each document.
limit: The number of documents to return. Defaults to 5. Optional.
Returns:
Documents which satisfy the input conditions.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
docs_detail = self.awadb_client.Get(ids=ids, text_in_page_content=
text_in_page_content, meta_filter=meta_filter, not_include_fields=
not_include_fields, limit=limit)
results: Dict[str, Document] = {}
for doc_detail in docs_detail:
content = ''
meta_info = {}
for field in doc_detail:
if field == 'embedding_text':
content = doc_detail[field]
continue
elif field == 'text_embedding' or field == '_id':
continue
meta_info[field] = doc_detail[field]
doc = Document(page_content=content, metadata=meta_info)
results[doc_detail['_id']] = doc
return results
|
def get(self, ids: Optional[List[str]]=None, text_in_page_content: Optional
[str]=None, meta_filter: Optional[dict]=None, not_include_fields:
Optional[Set[str]]=None, limit: Optional[int]=None, **kwargs: Any) ->Dict[
str, Document]:
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by any metadata of the document.
not_include_fields: Not pack the specified fields of each document.
limit: The number of documents to return. Defaults to 5. Optional.
Returns:
Documents which satisfy the input conditions.
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
docs_detail = self.awadb_client.Get(ids=ids, text_in_page_content=
text_in_page_content, meta_filter=meta_filter, not_include_fields=
not_include_fields, limit=limit)
results: Dict[str, Document] = {}
for doc_detail in docs_detail:
content = ''
meta_info = {}
for field in doc_detail:
if field == 'embedding_text':
content = doc_detail[field]
continue
elif field == 'text_embedding' or field == '_id':
continue
meta_info[field] = doc_detail[field]
doc = Document(page_content=content, metadata=meta_info)
results[doc_detail['_id']] = doc
return results
|
Return docs according ids.
Args:
ids: The ids of the embedding vectors.
text_in_page_content: Filter by the text in page_content of Document.
meta_filter: Filter by any metadata of the document.
not_include_fields: Not pack the specified fields of each document.
limit: The number of documents to return. Defaults to 5. Optional.
Returns:
Documents which satisfy the input conditions.
|
get_by_name
|
return session.query(cls).filter(cls.name == name).first()
|
@classmethod
def get_by_name(cls, session: Session, name: str) ->Optional['CollectionStore'
]:
return session.query(cls).filter(cls.name == name).first()
| null |
test_language_loader_for_javascript_with_parser_threshold
|
"""Test JavaScript loader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = GenericLoader.from_filesystem(file_path, glob='hello_world.js',
parser=LanguageParser(language='js', parser_threshold=1000))
docs = loader.load()
assert len(docs) == 1
|
def test_language_loader_for_javascript_with_parser_threshold() ->None:
"""Test JavaScript loader with parser enabled and below threshold."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = GenericLoader.from_filesystem(file_path, glob='hello_world.js',
parser=LanguageParser(language='js', parser_threshold=1000))
docs = loader.load()
assert len(docs) == 1
|
Test JavaScript loader with parser enabled and below threshold.
|
has_history
|
return len(glob.glob(str(self.folder / 'model-????????-??????.vw'))) > 0
|
def has_history(self) ->bool:
return len(glob.glob(str(self.folder / 'model-????????-??????.vw'))) > 0
| null |
lazy_load
|
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
try:
data = loader.lazy_load()
except NotImplementedError:
data = loader.load()
for document in data:
yield document
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load docs from each individual loader."""
for loader in self.loaders:
try:
data = loader.lazy_load()
except NotImplementedError:
data = loader.load()
for document in data:
yield document
|
Lazy load docs from each individual loader.
|
__init__
|
"""Initialize with a file path. This should start with '/tmp/airbyte_local/'."""
self.file_path = file_path
"""Path to the directory containing the json files."""
|
def __init__(self, file_path: str):
"""Initialize with a file path. This should start with '/tmp/airbyte_local/'."""
self.file_path = file_path
"""Path to the directory containing the json files."""
|
Initialize with a file path. This should start with '/tmp/airbyte_local/'.
|
setup_class
|
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
|
@classmethod
def setup_class(cls) ->None:
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
| null |
_deidentify_with_replace
|
"""Uses the Data Loss Prevention API to deidentify sensitive data in a
string by replacing matched input values with the info type.
Args:
project: The Google Cloud project id to use as a parent resource.
input_str: The string to deidentify (will be treated as text).
info_types: A list of strings representing info types to look for.
Returns:
str: The input string after it has been deidentified.
"""
dlp = dlp_v2.DlpServiceClient()
parent = f'projects/{project}/locations/global'
if info_types is None:
info_types = ['PHONE_NUMBER', 'EMAIL_ADDRESS', 'CREDIT_CARD_NUMBER']
inspect_config = {'info_types': [{'name': info_type} for info_type in
info_types]}
deidentify_config = {'info_type_transformations': {'transformations': [{
'primitive_transformation': {'replace_with_info_type_config': {}}}]}}
item = {'value': input_str}
response = dlp.deidentify_content(request={'parent': parent,
'deidentify_config': deidentify_config, 'inspect_config':
inspect_config, 'item': item})
return response.item.value
|
def _deidentify_with_replace(input_str: str, info_types: List[str], project:
str) ->str:
"""Uses the Data Loss Prevention API to deidentify sensitive data in a
string by replacing matched input values with the info type.
Args:
project: The Google Cloud project id to use as a parent resource.
input_str: The string to deidentify (will be treated as text).
info_types: A list of strings representing info types to look for.
Returns:
str: The input string after it has been deidentified.
"""
dlp = dlp_v2.DlpServiceClient()
parent = f'projects/{project}/locations/global'
if info_types is None:
info_types = ['PHONE_NUMBER', 'EMAIL_ADDRESS', 'CREDIT_CARD_NUMBER']
inspect_config = {'info_types': [{'name': info_type} for info_type in
info_types]}
deidentify_config = {'info_type_transformations': {'transformations': [
{'primitive_transformation': {'replace_with_info_type_config': {}}}]}}
item = {'value': input_str}
response = dlp.deidentify_content(request={'parent': parent,
'deidentify_config': deidentify_config, 'inspect_config':
inspect_config, 'item': item})
return response.item.value
|
Uses the Data Loss Prevention API to deidentify sensitive data in a
string by replacing matched input values with the info type.
Args:
project: The Google Cloud project id to use as a parent resource.
input_str: The string to deidentify (will be treated as text).
info_types: A list of strings representing info types to look for.
Returns:
str: The input string after it has been deidentified.
|
_validate_uri
|
if self.target_uri == 'databricks':
return
if urlparse(self.target_uri).scheme != 'databricks':
raise ValueError(
'Invalid target URI. The target URI must be a valid databricks URI.')
|
def _validate_uri(self) ->None:
if self.target_uri == 'databricks':
return
if urlparse(self.target_uri).scheme != 'databricks':
raise ValueError(
'Invalid target URI. The target URI must be a valid databricks URI.'
)
| null |
test_get_input_schema_input_messages
|
class RunnableWithChatHistoryInput(BaseModel):
__root__: Sequence[BaseMessage]
runnable = RunnableLambda(lambda messages: {'output': [AIMessage(content=
'you said: ' + '\n'.join([str(m.content) for m in messages if
isinstance(m, HumanMessage)]))]})
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
output_messages_key='output')
assert with_history.get_input_schema().schema(
) == RunnableWithChatHistoryInput.schema()
|
def test_get_input_schema_input_messages() ->None:
class RunnableWithChatHistoryInput(BaseModel):
__root__: Sequence[BaseMessage]
runnable = RunnableLambda(lambda messages: {'output': [AIMessage(
content='you said: ' + '\n'.join([str(m.content) for m in messages if
isinstance(m, HumanMessage)]))]})
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
output_messages_key='output')
assert with_history.get_input_schema().schema(
) == RunnableWithChatHistoryInput.schema()
| null |
update
|
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
"""
|
@abstractmethod
def update(self, keys: Sequence[str], *, group_ids: Optional[Sequence[
Optional[str]]]=None, time_at_least: Optional[float]=None) ->None:
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
"""
|
Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
|
from_template_file
|
"""Create a class from a template file.
Args:
template_file: path to a template file. String or Path.
input_variables: list of input variables.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt = PromptTemplate.from_file(template_file, input_variables)
return cls(prompt=prompt, **kwargs)
|
@classmethod
def from_template_file(cls: Type[MessagePromptTemplateT], template_file:
Union[str, Path], input_variables: List[str], **kwargs: Any
) ->MessagePromptTemplateT:
"""Create a class from a template file.
Args:
template_file: path to a template file. String or Path.
input_variables: list of input variables.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt = PromptTemplate.from_file(template_file, input_variables)
return cls(prompt=prompt, **kwargs)
|
Create a class from a template file.
Args:
template_file: path to a template file. String or Path.
input_variables: list of input variables.
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
|
observation_prefix
|
"""Prefix to append the observation with."""
return 'Observation: '
|
@property
def observation_prefix(self) ->str:
"""Prefix to append the observation with."""
return 'Observation: '
|
Prefix to append the observation with.
|
__init__
|
"""Initialize with Rockset client.
Args:
client: Rockset client object
collection: Rockset collection to insert docs / query
embeddings: Langchain Embeddings object to use to generate
embedding for given text.
text_key: column in Rockset collection to use to store the text
embedding_key: column in Rockset collection to use to store the embedding.
Note: We must apply `VECTOR_ENFORCE()` on this column via
Rockset ingest transformation.
"""
try:
from rockset import RocksetClient
except ImportError:
raise ImportError(
'Could not import rockset client python package. Please install it with `pip install rockset`.'
)
if not isinstance(client, RocksetClient):
raise ValueError(
f'client should be an instance of rockset.RocksetClient, got {type(client)}'
)
self._client = client
self._collection_name = collection_name
self._embeddings = embeddings
self._text_key = text_key
self._embedding_key = embedding_key
self._workspace = workspace
try:
self._client.set_application('langchain')
except AttributeError:
pass
|
def __init__(self, client: Any, embeddings: Embeddings, collection_name:
str, text_key: str, embedding_key: str, workspace: str='commons'):
"""Initialize with Rockset client.
Args:
client: Rockset client object
collection: Rockset collection to insert docs / query
embeddings: Langchain Embeddings object to use to generate
embedding for given text.
text_key: column in Rockset collection to use to store the text
embedding_key: column in Rockset collection to use to store the embedding.
Note: We must apply `VECTOR_ENFORCE()` on this column via
Rockset ingest transformation.
"""
try:
from rockset import RocksetClient
except ImportError:
raise ImportError(
'Could not import rockset client python package. Please install it with `pip install rockset`.'
)
if not isinstance(client, RocksetClient):
raise ValueError(
f'client should be an instance of rockset.RocksetClient, got {type(client)}'
)
self._client = client
self._collection_name = collection_name
self._embeddings = embeddings
self._text_key = text_key
self._embedding_key = embedding_key
self._workspace = workspace
try:
self._client.set_application('langchain')
except AttributeError:
pass
|
Initialize with Rockset client.
Args:
client: Rockset client object
collection: Rockset collection to insert docs / query
embeddings: Langchain Embeddings object to use to generate
embedding for given text.
text_key: column in Rockset collection to use to store the text
embedding_key: column in Rockset collection to use to store the embedding.
Note: We must apply `VECTOR_ENFORCE()` on this column via
Rockset ingest transformation.
|
embed_query
|
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
test_delete_dataset_by_filter
|
"""Test delete dataset."""
deeplake_datastore.delete(filter={'metadata': {'page': '1'}})
assert deeplake_datastore.similarity_search('bar', k=1, filter={'metadata':
{'page': '1'}}) == []
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
|
def test_delete_dataset_by_filter(deeplake_datastore: DeepLake) ->None:
"""Test delete dataset."""
deeplake_datastore.delete(filter={'metadata': {'page': '1'}})
assert deeplake_datastore.similarity_search('bar', k=1, filter={
'metadata': {'page': '1'}}) == []
assert len(deeplake_datastore.vectorstore.dataset) == 2
deeplake_datastore.delete_dataset()
|
Test delete dataset.
|
from_tiktoken_encoder
|
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to calculate max_tokens_for_prompt. Please install it with `pip install tiktoken`.'
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) ->int:
return len(enc.encode(text, allowed_special=allowed_special,
disallowed_special=disallowed_special))
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {'encoding_name': encoding_name, 'model_name':
model_name, 'allowed_special': allowed_special,
'disallowed_special': disallowed_special}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
|
@classmethod
def from_tiktoken_encoder(cls: Type[TS], encoding_name: str='gpt2',
model_name: Optional[str]=None, allowed_special: Union[Literal['all'],
AbstractSet[str]]=set(), disallowed_special: Union[Literal['all'],
Collection[str]]='all', **kwargs: Any) ->TS:
"""Text splitter that uses tiktoken encoder to count length."""
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to calculate max_tokens_for_prompt. Please install it with `pip install tiktoken`.'
)
if model_name is not None:
enc = tiktoken.encoding_for_model(model_name)
else:
enc = tiktoken.get_encoding(encoding_name)
def _tiktoken_encoder(text: str) ->int:
return len(enc.encode(text, allowed_special=allowed_special,
disallowed_special=disallowed_special))
if issubclass(cls, TokenTextSplitter):
extra_kwargs = {'encoding_name': encoding_name, 'model_name':
model_name, 'allowed_special': allowed_special,
'disallowed_special': disallowed_special}
kwargs = {**kwargs, **extra_kwargs}
return cls(length_function=_tiktoken_encoder, **kwargs)
|
Text splitter that uses tiktoken encoder to count length.
|
__init__
|
"""
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud import translate
except ImportError as exc:
raise ImportError(
'Install Google Cloud Translate to use this parser.(pip install google-cloud-translate)'
) from exc
self.project_id = project_id
self.location = location
self.model_id = model_id
self.glossary_id = glossary_id
self._client = translate.TranslationServiceClient(client_info=
get_client_info('translate'), client_options=ClientOptions(api_endpoint
=api_endpoint) if api_endpoint else None)
self._parent_path = self._client.common_location_path(project_id, location)
self._model_path = (f'{self._parent_path}/models/{model_id}' if model_id else
None)
self._glossary_path = self._client.glossary_path(project_id, location,
glossary_id) if glossary_id else None
|
def __init__(self, project_id: str, *, location: str='global', model_id:
Optional[str]=None, glossary_id: Optional[str]=None, api_endpoint:
Optional[str]=None) ->None:
"""
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud import translate
except ImportError as exc:
raise ImportError(
'Install Google Cloud Translate to use this parser.(pip install google-cloud-translate)'
) from exc
self.project_id = project_id
self.location = location
self.model_id = model_id
self.glossary_id = glossary_id
self._client = translate.TranslationServiceClient(client_info=
get_client_info('translate'), client_options=ClientOptions(
api_endpoint=api_endpoint) if api_endpoint else None)
self._parent_path = self._client.common_location_path(project_id, location)
self._model_path = (f'{self._parent_path}/models/{model_id}' if
model_id else None)
self._glossary_path = self._client.glossary_path(project_id, location,
glossary_id) if glossary_id else None
|
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
|
test_unstructured_api_file_loader_io_multiple_files
|
"""Test unstructured loader."""
file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY,
'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY,
'whatsapp_chat.txt')]
with ExitStack() as stack:
files = [stack.enter_context(open(file_path, 'rb')) for file_path in
file_paths]
loader = UnstructuredAPIFileIOLoader(file=files, api_key='FAKE_API_KEY',
strategy='fast', mode='elements', file_filenames=file_paths)
docs = loader.load()
assert len(docs) > 1
|
def test_unstructured_api_file_loader_io_multiple_files() ->None:
"""Test unstructured loader."""
file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY,
'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY,
'whatsapp_chat.txt')]
with ExitStack() as stack:
files = [stack.enter_context(open(file_path, 'rb')) for file_path in
file_paths]
loader = UnstructuredAPIFileIOLoader(file=files, api_key=
'FAKE_API_KEY', strategy='fast', mode='elements',
file_filenames=file_paths)
docs = loader.load()
assert len(docs) > 1
|
Test unstructured loader.
|
test_not_an_ai
|
parser = OpenAIFunctionsAgentOutputParser()
err = f'Expected an AI message got {str(SystemMessage)}'
with pytest.raises(TypeError, match=err):
parser.invoke(SystemMessage(content='x'))
|
def test_not_an_ai() ->None:
parser = OpenAIFunctionsAgentOutputParser()
err = f'Expected an AI message got {str(SystemMessage)}'
with pytest.raises(TypeError, match=err):
parser.invoke(SystemMessage(content='x'))
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
Get the namespace of the langchain object.
|
embed_query
|
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
|
def embed_query(self, text: str) ->List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
|
Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
evaluate_run
|
"""Evaluate an example."""
try:
result = self({'run': run, 'example': example}, include_run_info=True)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(key=self.string_evaluator.evaluation_name,
comment=f'Error evaluating run {run.id}: {e}')
|
def evaluate_run(self, run: Run, example: Optional[Example]=None
) ->EvaluationResult:
"""Evaluate an example."""
try:
result = self({'run': run, 'example': example}, include_run_info=True)
return self._prepare_evaluator_output(result)
except Exception as e:
return EvaluationResult(key=self.string_evaluator.evaluation_name,
comment=f'Error evaluating run {run.id}: {e}')
|
Evaluate an example.
|
clear
|
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
def clear(self) ->None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
Clear context from this session for every memory.
|
__init__
|
super().__init__(handlers, inheritable_handlers, parent_run_id, **kwargs)
self.parent_run_manager = parent_run_manager
self.ended = False
|
def __init__(self, handlers: List[BaseCallbackHandler],
inheritable_handlers: Optional[List[BaseCallbackHandler]]=None,
parent_run_id: Optional[UUID]=None, *, parent_run_manager:
CallbackManagerForChainRun, **kwargs: Any) ->None:
super().__init__(handlers, inheritable_handlers, parent_run_id, **kwargs)
self.parent_run_manager = parent_run_manager
self.ended = False
| null |
_import_self_hosted
|
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
|
def _import_self_hosted() ->Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
| null |
on_tool_start
|
if self._should_check(serialized) and not self._approve(input_str):
raise HumanRejectedException(
f'Inputs {input_str} to tool {serialized} were rejected.')
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, *,
run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
if self._should_check(serialized) and not self._approve(input_str):
raise HumanRejectedException(
f'Inputs {input_str} to tool {serialized} were rejected.')
| null |
_import_steam_webapi
|
from langchain_community.utilities.steam import SteamWebAPIWrapper
return SteamWebAPIWrapper
|
def _import_steam_webapi() ->Any:
from langchain_community.utilities.steam import SteamWebAPIWrapper
return SteamWebAPIWrapper
| null |
get_num_tokens
|
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
|
def get_num_tokens(self, text: str) ->int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
| null |
query
|
"""Query Kùzu database"""
params_list = []
for param_name in params:
params_list.append([param_name, params[param_name]])
result = self.conn.execute(query, params_list)
column_names = result.get_column_names()
return_list = []
while result.has_next():
row = result.get_next()
return_list.append(dict(zip(column_names, row)))
return return_list
|
def query(self, query: str, params: dict={}) ->List[Dict[str, Any]]:
"""Query Kùzu database"""
params_list = []
for param_name in params:
params_list.append([param_name, params[param_name]])
result = self.conn.execute(query, params_list)
column_names = result.get_column_names()
return_list = []
while result.has_next():
row = result.get_next()
return_list.append(dict(zip(column_names, row)))
return return_list
|
Query Kùzu database
|
next_thought
|
response_text = self.predict_and_parse(problem_description=
problem_description, thoughts=thoughts_path, **kwargs)
return response_text if isinstance(response_text, str) else ''
|
def next_thought(self, problem_description: str, thoughts_path: Tuple[str,
...]=(), **kwargs: Any) ->str:
response_text = self.predict_and_parse(problem_description=
problem_description, thoughts=thoughts_path, **kwargs)
return response_text if isinstance(response_text, str) else ''
| null |
on_retriever_error_common
|
self.errors += 1
self.retriever_errors += 1
|
def on_retriever_error_common(self) ->None:
self.errors += 1
self.retriever_errors += 1
| null |
_import_edenai_EdenaiTool
|
from langchain_community.tools.edenai import EdenaiTool
return EdenaiTool
|
def _import_edenai_EdenaiTool() ->Any:
from langchain_community.tools.edenai import EdenaiTool
return EdenaiTool
| null |
_create_table
|
"""Create table if it doesn't exist."""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""CREATE TABLE IF NOT EXISTS {}
({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci,
{} BLOB, {} JSON);"""
.format(self.table_name, self.content_field, self.vector_field,
self.metadata_field))
finally:
cur.close()
finally:
conn.close()
|
def _create_table(self: SingleStoreDB) ->None:
"""Create table if it doesn't exist."""
conn = self.connection_pool.connect()
try:
cur = conn.cursor()
try:
cur.execute(
"""CREATE TABLE IF NOT EXISTS {}
({} TEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci,
{} BLOB, {} JSON);"""
.format(self.table_name, self.content_field, self.
vector_field, self.metadata_field))
finally:
cur.close()
finally:
conn.close()
|
Create table if it doesn't exist.
|
_import_self_hosted
|
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
|
def _import_self_hosted() ->Any:
from langchain_community.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
| null |
test_myscale_with_metadatas_with_relevance_scores
|
"""Test end to end construction and scored search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = 'test_myscale_with_metadatas_with_relevance_scores'
docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, config=config)
output = docsearch.similarity_search_with_relevance_scores('foo', k=1)
assert output[0][0] == Document(page_content='foo', metadata={'page': '0'})
docsearch.drop()
|
def test_myscale_with_metadatas_with_relevance_scores() ->None:
"""Test end to end construction and scored search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = 'test_myscale_with_metadatas_with_relevance_scores'
docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, config=config)
output = docsearch.similarity_search_with_relevance_scores('foo', k=1)
assert output[0][0] == Document(page_content='foo', metadata={'page': '0'})
docsearch.drop()
|
Test end to end construction and scored search.
|
test_load_returns_no_result
|
"""Test that gives no result."""
api_client = PubMedLoader(query='1605.08386WWW')
docs = api_client.load()
assert len(docs) == 0
|
def test_load_returns_no_result() ->None:
"""Test that gives no result."""
api_client = PubMedLoader(query='1605.08386WWW')
docs = api_client.load()
assert len(docs) == 0
|
Test that gives no result.
|
test_include_types2
|
structured_schema = {'node_props': {'Movie': [{'property': 'title', 'type':
'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'}], 'Person':
[{'property': 'name', 'type': 'STRING'}]}, 'rel_props': {},
'relationships': [{'start': 'Actor', 'end': 'Movie', 'type': 'ACTED_IN'
}, {'start': 'Person', 'end': 'Movie', 'type': 'DIRECTED'}]}
include_types = ['Movie', 'Actor']
output = construct_schema(structured_schema, include_types, [])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
"""
assert output == expected_schema
|
def test_include_types2() ->None:
structured_schema = {'node_props': {'Movie': [{'property': 'title',
'type': 'STRING'}], 'Actor': [{'property': 'name', 'type': 'STRING'
}], 'Person': [{'property': 'name', 'type': 'STRING'}]},
'rel_props': {}, 'relationships': [{'start': 'Actor', 'end':
'Movie', 'type': 'ACTED_IN'}, {'start': 'Person', 'end': 'Movie',
'type': 'DIRECTED'}]}
include_types = ['Movie', 'Actor']
output = construct_schema(structured_schema, include_types, [])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
"""
assert output == expected_schema
| null |
_simple_recursion
|
if x < 10:
return RunnableLambda(lambda *args: _simple_recursion(x + 1))
else:
return x
|
def _simple_recursion(x: int) ->Union[int, Runnable]:
if x < 10:
return RunnableLambda(lambda *args: _simple_recursion(x + 1))
else:
return x
| null |
_parse_stream
|
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
|
def _parse_stream(rbody: Iterator[bytes]) ->Iterator[str]:
for line in rbody:
_line = _parse_stream_helper(line)
if _line is not None:
yield _line
| null |
set_weights
|
if not values.get('weights'):
n_retrievers = len(values['retrievers'])
values['weights'] = [1 / n_retrievers] * n_retrievers
return values
|
@root_validator(pre=True)
def set_weights(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if not values.get('weights'):
n_retrievers = len(values['retrievers'])
values['weights'] = [1 / n_retrievers] * n_retrievers
return values
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'base']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'base']
|
Get the namespace of the langchain object.
|
assert_docs
|
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'summary', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
|
def assert_docs(docs: List[Document], all_meta: bool=False) ->None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'summary', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
| null |
from_texts
|
"""Create an AwaDB vectorstore from a raw documents.
Args:
texts (List[str]): List of texts to add to the table.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory of logging and persistence.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
"""
awadb_client = cls(table_name=table_name, embedding=embedding,
log_and_data_dir=log_and_data_dir, client=client)
awadb_client.add_texts(texts=texts, metadatas=metadatas)
return awadb_client
|
@classmethod
def from_texts(cls: Type[AwaDB], texts: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, table_name: str
=_DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str]=None, client:
Optional[awadb.Client]=None, **kwargs: Any) ->AwaDB:
"""Create an AwaDB vectorstore from a raw documents.
Args:
texts (List[str]): List of texts to add to the table.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory of logging and persistence.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
"""
awadb_client = cls(table_name=table_name, embedding=embedding,
log_and_data_dir=log_and_data_dir, client=client)
awadb_client.add_texts(texts=texts, metadatas=metadatas)
return awadb_client
|
Create an AwaDB vectorstore from a raw documents.
Args:
texts (List[str]): List of texts to add to the table.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory of logging and persistence.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
|
clear
|
"""Delete all entities from store."""
pass
|
@abstractmethod
def clear(self) ->None:
"""Delete all entities from store."""
pass
|
Delete all entities from store.
|
_import_cassandra
|
from langchain_community.vectorstores.cassandra import Cassandra
return Cassandra
|
def _import_cassandra() ->Any:
from langchain_community.vectorstores.cassandra import Cassandra
return Cassandra
| null |
visit_FunctionDef
|
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
|
def visit_FunctionDef(self, node: ast.FunctionDef) ->Any:
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
| null |
_AsyncFunctionDef
|
self.__FunctionDef_helper(t, 'async def')
|
def _AsyncFunctionDef(self, t):
self.__FunctionDef_helper(t, 'async def')
| null |
test_openai_invoke
|
"""Test invoke tokens from AzureChatOpenAI."""
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
@pytest.mark.scheduled
def test_openai_invoke(llm: AzureChatOpenAI) ->None:
"""Test invoke tokens from AzureChatOpenAI."""
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
Test invoke tokens from AzureChatOpenAI.
|
_run
|
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw([t.strip() for t in table_names.
split(',')])
|
def _run(self, table_names: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw([t.strip() for t in table_names.
split(',')])
|
Get the schema for tables in a comma-separated list.
|
_call
|
"""Queries the LLM endpoint with the given prompt and stop sequence."""
request: Dict[str, Any] = {'prompt': prompt}
if self._client.llm:
request.update(self._llm_params)
request.update(self.model_kwargs or self.extra_params)
request.update(kwargs)
if stop:
request['stop'] = stop
if self.transform_input_fn:
request = self.transform_input_fn(**request)
return self._client.post(request, transform_output_fn=self.transform_output_fn)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Queries the LLM endpoint with the given prompt and stop sequence."""
request: Dict[str, Any] = {'prompt': prompt}
if self._client.llm:
request.update(self._llm_params)
request.update(self.model_kwargs or self.extra_params)
request.update(kwargs)
if stop:
request['stop'] = stop
if self.transform_input_fn:
request = self.transform_input_fn(**request)
return self._client.post(request, transform_output_fn=self.
transform_output_fn)
|
Queries the LLM endpoint with the given prompt and stop sequence.
|
run
|
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2)
|
def run(self, query: str) ->str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2)
|
Run a GraphQL query and get the results.
|
__init__
|
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
"""
chat_message_history = ZepChatMessageHistory(session_id=session_id, url=url,
api_key=api_key)
super().__init__(chat_memory=chat_message_history, output_key=output_key,
input_key=input_key, return_messages=return_messages, human_prefix=
human_prefix, ai_prefix=ai_prefix, memory_key=memory_key)
|
def __init__(self, session_id: str, url: str='http://localhost:8000',
api_key: Optional[str]=None, output_key: Optional[str]=None, input_key:
Optional[str]=None, return_messages: bool=False, human_prefix: str=
'Human', ai_prefix: str='AI', memory_key: str='history'):
"""Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
"""
chat_message_history = ZepChatMessageHistory(session_id=session_id, url
=url, api_key=api_key)
super().__init__(chat_memory=chat_message_history, output_key=
output_key, input_key=input_key, return_messages=return_messages,
human_prefix=human_prefix, ai_prefix=ai_prefix, memory_key=memory_key)
|
Initialize ZepMemory.
Args:
session_id (str): Identifies your user or a user's session
url (str, optional): Your Zep server's URL. Defaults to
"http://localhost:8000".
api_key (Optional[str], optional): Your Zep API key. Defaults to None.
output_key (Optional[str], optional): The key to use for the output message.
Defaults to None.
input_key (Optional[str], optional): The key to use for the input message.
Defaults to None.
return_messages (bool, optional): Does your prompt template expect a string
or a list of Messages? Defaults to False
i.e. return a string.
human_prefix (str, optional): The prefix to use for human messages.
Defaults to "Human".
ai_prefix (str, optional): The prefix to use for AI messages.
Defaults to "AI".
memory_key (str, optional): The key to use for the memory.
Defaults to "history".
Ensure that this matches the key used in
chain's prompt template.
|
test_get_open_issues
|
"""Basic test to fetch issues"""
issues = api_client.get_issues()
assert len(issues) != 0
|
def test_get_open_issues(api_client: GitHubAPIWrapper) ->None:
"""Basic test to fetch issues"""
issues = api_client.get_issues()
assert len(issues) != 0
|
Basic test to fetch issues
|
postprocess
|
"""Parses a response from the AI Foundation Model Function API.
Strongly assumes that the API will return a single response.
"""
msg_list = self._process_response(response)
msg, is_stopped = self._aggregate_msgs(msg_list)
msg, is_stopped = self._early_stop_msg(msg, is_stopped, stop=stop)
return msg, is_stopped
|
def postprocess(self, response: Union[str, Response], stop: Optional[
Sequence[str]]=None) ->Tuple[dict, bool]:
"""Parses a response from the AI Foundation Model Function API.
Strongly assumes that the API will return a single response.
"""
msg_list = self._process_response(response)
msg, is_stopped = self._aggregate_msgs(msg_list)
msg, is_stopped = self._early_stop_msg(msg, is_stopped, stop=stop)
return msg, is_stopped
|
Parses a response from the AI Foundation Model Function API.
Strongly assumes that the API will return a single response.
|
_collapse
|
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
def _collapse_docs_func(docs: List[Document], **kwargs: Any) ->str:
return self._collapse_chain.run(input_documents=docs, callbacks=
callbacks, **kwargs)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = split_list_of_docs(result_docs, length_func,
_token_max, **kwargs)
result_docs = []
for docs in new_result_doc_list:
new_doc = collapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
|
def _collapse(self, docs: List[Document], token_max: Optional[int]=None,
callbacks: Callbacks=None, **kwargs: Any) ->Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
def _collapse_docs_func(docs: List[Document], **kwargs: Any) ->str:
return self._collapse_chain.run(input_documents=docs, callbacks=
callbacks, **kwargs)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = split_list_of_docs(result_docs, length_func,
_token_max, **kwargs)
result_docs = []
for docs in new_result_doc_list:
new_doc = collapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
| null |
on_chain_end
|
"""Run when chain ends running."""
aim = import_aim()
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = {'action': 'on_chain_end'}
resp.update(self.get_custom_callback_meta())
outputs_res = deepcopy(outputs)
self._run.track(aim.Text(outputs_res['output']), name='on_chain_end',
context=resp)
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Run when chain ends running."""
aim = import_aim()
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = {'action': 'on_chain_end'}
resp.update(self.get_custom_callback_meta())
outputs_res = deepcopy(outputs)
self._run.track(aim.Text(outputs_res['output']), name='on_chain_end',
context=resp)
|
Run when chain ends running.
|
_pushFile
|
with open(content_path, 'rb') as source_file:
response = requests.post(self._config['BACKEND'] + '/processing/upload',
headers={'content-type': mimetypes.guess_type(content_path)[0] or
'application/octet-stream', 'x-stf-nuakey': 'Bearer ' + self.
_config['NUA_KEY']}, data=source_file.read())
if response.status_code != 200:
logger.info(
f'Error uploading {content_path}: {response.status_code} {response.text}'
)
return ''
else:
field = {'filefield': {'file': f'{response.text}'},
'processing_options': {'ml_text': self._config['enable_ml']}}
return self._pushField(id, field)
|
def _pushFile(self, id: str, content_path: str) ->str:
with open(content_path, 'rb') as source_file:
response = requests.post(self._config['BACKEND'] +
'/processing/upload', headers={'content-type': mimetypes.
guess_type(content_path)[0] or 'application/octet-stream',
'x-stf-nuakey': 'Bearer ' + self._config['NUA_KEY']}, data=
source_file.read())
if response.status_code != 200:
logger.info(
f'Error uploading {content_path}: {response.status_code} {response.text}'
)
return ''
else:
field = {'filefield': {'file': f'{response.text}'},
'processing_options': {'ml_text': self._config['enable_ml']}}
return self._pushField(id, field)
| null |
_import_mosaicml
|
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
|
def _import_mosaicml() ->Any:
from langchain_community.llms.mosaicml import MosaicML
return MosaicML
| null |
_run_coros
|
if hasattr(asyncio, 'Runner'):
with asyncio.Runner() as runner:
for coro in coros:
runner.run(coro)
while (pending := asyncio.all_tasks(runner.get_loop())):
runner.run(asyncio.wait(pending))
else:
for coro in coros:
asyncio.run(coro)
|
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) ->None:
if hasattr(asyncio, 'Runner'):
with asyncio.Runner() as runner:
for coro in coros:
runner.run(coro)
while (pending := asyncio.all_tasks(runner.get_loop())):
runner.run(asyncio.wait(pending))
else:
for coro in coros:
asyncio.run(coro)
| null |
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self._embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
filter=filter, predicates=predicates, **kwargs)
return docs
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[Union[dict, list]]=None, predicates: Optional[Predicates]=None,
**kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self._embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, filter=filter, predicates=predicates, **kwargs)
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
|
test_public_api
|
"""Hard-code public API to help determine if we have broken it."""
assert sorted(__all__) == ['Blob', 'BlobLoader', 'FileSystemBlobLoader',
'YoutubeAudioLoader']
|
def test_public_api() ->None:
"""Hard-code public API to help determine if we have broken it."""
assert sorted(__all__) == ['Blob', 'BlobLoader', 'FileSystemBlobLoader',
'YoutubeAudioLoader']
|
Hard-code public API to help determine if we have broken it.
|
test_follow_up_custom
|
"""Test follow up parsing for custom followups."""
parser = SelfAskOutputParser(followups=('Now:',))
_input = 'Now: what is two + 2'
output = parser.invoke(_input)
expected_output = AgentAction(tool='Intermediate Answer', tool_input=
'what is two + 2', log=_input)
assert output == expected_output
|
def test_follow_up_custom() ->None:
"""Test follow up parsing for custom followups."""
parser = SelfAskOutputParser(followups=('Now:',))
_input = 'Now: what is two + 2'
output = parser.invoke(_input)
expected_output = AgentAction(tool='Intermediate Answer', tool_input=
'what is two + 2', log=_input)
assert output == expected_output
|
Test follow up parsing for custom followups.
|
_parse_front_matter
|
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split('\n')
for line in lines:
if ':' in line:
key, value = line.split(':', 1)
front_matter[key.strip()] = value.strip()
else:
continue
return front_matter
|
def _parse_front_matter(self, content: str) ->dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split('\n')
for line in lines:
if ':' in line:
key, value = line.split(':', 1)
front_matter[key.strip()] = value.strip()
else:
continue
return front_matter
|
Parse front matter metadata from the content and return it as a dict.
|
test_from_texts_with_scores
|
"""Test end to end construction and search with scores and IDs."""
texts = ['apple', 'orange', 'hammer']
metadatas = [{'page': f'{i}'} for i in range(len(texts))]
vector_store.add_texts(texts, metadatas)
wait()
search_results = vector_store.similarity_search_with_score('apple', k=3)
docs = [o[0] for o in search_results]
scores = [o[1] for o in search_results]
assert docs == [Document(page_content='apple', metadata={'page': '0'}),
Document(page_content='orange', metadata={'page': '1'}), Document(
page_content='hammer', metadata={'page': '2'})]
assert scores[0] > scores[1] > scores[2]
|
def test_from_texts_with_scores(vector_store: MomentoVectorIndex) ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['apple', 'orange', 'hammer']
metadatas = [{'page': f'{i}'} for i in range(len(texts))]
vector_store.add_texts(texts, metadatas)
wait()
search_results = vector_store.similarity_search_with_score('apple', k=3)
docs = [o[0] for o in search_results]
scores = [o[1] for o in search_results]
assert docs == [Document(page_content='apple', metadata={'page': '0'}),
Document(page_content='orange', metadata={'page': '1'}), Document(
page_content='hammer', metadata={'page': '2'})]
assert scores[0] > scores[1] > scores[2]
|
Test end to end construction and search with scores and IDs.
|
zep_vectorstore
|
mock_document_client = mocker.patch('zep_python.document.client.DocumentClient'
, autospec=True)
mock_document_client.get_collection.return_value = mock_collection
mock_client = mocker.patch('zep_python.ZepClient', autospec=True)
mock_client.return_value.document = mock_document_client
vs = ZepVectorStore(mock_collection_config.name, 'http://localhost:8080',
api_key='test', config=mock_collection_config)
return vs
|
@pytest.fixture
@pytest.mark.requires('zep_python')
def zep_vectorstore(mocker: MockerFixture, mock_collection:
'DocumentCollection', mock_collection_config: CollectionConfig
) ->ZepVectorStore:
mock_document_client = mocker.patch(
'zep_python.document.client.DocumentClient', autospec=True)
mock_document_client.get_collection.return_value = mock_collection
mock_client = mocker.patch('zep_python.ZepClient', autospec=True)
mock_client.return_value.document = mock_document_client
vs = ZepVectorStore(mock_collection_config.name,
'http://localhost:8080', api_key='test', config=mock_collection_config)
return vs
| null |
from_llm
|
chain = _load_sequential_chain(llm, create_assertions_prompt,
check_assertions_prompt, revised_summary_prompt, are_all_true_prompt,
verbose=verbose)
return cls(sequential_chain=chain, verbose=verbose, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, create_assertions_prompt:
PromptTemplate=CREATE_ASSERTIONS_PROMPT, check_assertions_prompt:
PromptTemplate=CHECK_ASSERTIONS_PROMPT, revised_summary_prompt:
PromptTemplate=REVISED_SUMMARY_PROMPT, are_all_true_prompt:
PromptTemplate=ARE_ALL_TRUE_PROMPT, verbose: bool=False, **kwargs: Any
) ->LLMSummarizationCheckerChain:
chain = _load_sequential_chain(llm, create_assertions_prompt,
check_assertions_prompt, revised_summary_prompt,
are_all_true_prompt, verbose=verbose)
return cls(sequential_chain=chain, verbose=verbose, **kwargs)
| null |
output_keys
|
"""Return output key.
:meta private:
"""
return self.output_variables
|
@property
def output_keys(self) ->List[str]:
"""Return output key.
:meta private:
"""
return self.output_variables
|
Return output key.
:meta private:
|
_import_ai21
|
from langchain_community.llms.ai21 import AI21
return AI21
|
def _import_ai21() ->Any:
from langchain_community.llms.ai21 import AI21
return AI21
| null |
get_embeddings_model
|
assert slug == _MODEL_ID
return GradientEmbeddingsModel()
|
def get_embeddings_model(self, slug: str) ->GradientEmbeddingsModel:
assert slug == _MODEL_ID
return GradientEmbeddingsModel()
| null |
_prepare_output
|
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
if 'score' in parsed and self.normalize_by is not None:
parsed['score'] = parsed['score'] / self.normalize_by
return parsed
|
def _prepare_output(self, result: dict) ->dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
if 'score' in parsed and self.normalize_by is not None:
parsed['score'] = parsed['score'] / self.normalize_by
return parsed
|
Prepare the output.
|
test_memory_with_message_store
|
"""Test the memory with a message store."""
message_history = _chat_message_history()
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
assert memory.chat_memory.messages == []
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
expected = [AIMessage(content='This is me, the AI'), HumanMessage(content=
'This is me, the human')]
assert messages == expected
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
def test_memory_with_message_store() ->None:
"""Test the memory with a message store."""
message_history = _chat_message_history()
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
assert memory.chat_memory.messages == []
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
expected = [AIMessage(content='This is me, the AI'), HumanMessage(
content='This is me, the human')]
assert messages == expected
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
Test the memory with a message store.
|
llm_with_multi_fallbacks
|
error_llm = FakeListLLM(responses=['foo'], i=1)
error_llm_2 = FakeListLLM(responses=['baz'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
return error_llm.with_fallbacks([error_llm_2, pass_llm])
|
@pytest.fixture()
def llm_with_multi_fallbacks() ->RunnableWithFallbacks:
error_llm = FakeListLLM(responses=['foo'], i=1)
error_llm_2 = FakeListLLM(responses=['baz'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
return error_llm.with_fallbacks([error_llm_2, pass_llm])
| null |
_is_valid_url
|
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
|
@staticmethod
def _is_valid_url(url: str) ->bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
|
Check if the url is valid.
|
_remove_front_matter
|
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub('', content)
|
def _remove_front_matter(self, content: str) ->str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub('', content)
|
Remove front matter metadata from the given content.
|
accepts_config
|
"""Check if a callable accepts a config argument."""
try:
return signature(callable).parameters.get('config') is not None
except ValueError:
return False
|
def accepts_config(callable: Callable[..., Any]) ->bool:
"""Check if a callable accepts a config argument."""
try:
return signature(callable).parameters.get('config') is not None
except ValueError:
return False
|
Check if a callable accepts a config argument.
|
test_api_key_is_string
|
llm = StochasticAI(stochasticai_api_key='secret-api-key')
assert isinstance(llm.stochasticai_api_key, SecretStr)
|
def test_api_key_is_string() ->None:
llm = StochasticAI(stochasticai_api_key='secret-api-key')
assert isinstance(llm.stochasticai_api_key, SecretStr)
| null |
test_get_access_code_url
|
assert isinstance(ClickupAPIWrapper.get_access_code_url('oauth_client_id',
'oauth_client_secret'), str)
|
def test_get_access_code_url() ->None:
assert isinstance(ClickupAPIWrapper.get_access_code_url(
'oauth_client_id', 'oauth_client_secret'), str)
| null |
test_qdrant_embedding_interface
|
"""Test Qdrant may accept different types for embeddings."""
from qdrant_client import QdrantClient
client = QdrantClient(':memory:')
collection_name = uuid.uuid4().hex
Qdrant(client, collection_name, embeddings=embeddings, embedding_function=
embedding_function)
|
@pytest.mark.parametrize(['embeddings', 'embedding_function'], [(
ConsistentFakeEmbeddings(), None), (ConsistentFakeEmbeddings().
embed_query, None), (None, ConsistentFakeEmbeddings().embed_query)])
def test_qdrant_embedding_interface(embeddings: Optional[Embeddings],
embedding_function: Optional[Callable]) ->None:
"""Test Qdrant may accept different types for embeddings."""
from qdrant_client import QdrantClient
client = QdrantClient(':memory:')
collection_name = uuid.uuid4().hex
Qdrant(client, collection_name, embeddings=embeddings,
embedding_function=embedding_function)
|
Test Qdrant may accept different types for embeddings.
|
test_duckdb_loader_page_content_columns
|
"""Test DuckDB loader."""
loader = DuckDBLoader('SELECT 1 AS a, 2 AS b UNION SELECT 3 AS a, 4 AS b',
page_content_columns=['a'])
docs = loader.load()
assert len(docs) == 2
assert docs[0].page_content == 'a: 1'
assert docs[0].metadata == {}
assert docs[1].page_content == 'a: 3'
assert docs[1].metadata == {}
|
@unittest.skipIf(not duckdb_installed, 'duckdb not installed')
def test_duckdb_loader_page_content_columns() ->None:
"""Test DuckDB loader."""
loader = DuckDBLoader('SELECT 1 AS a, 2 AS b UNION SELECT 3 AS a, 4 AS b',
page_content_columns=['a'])
docs = loader.load()
assert len(docs) == 2
assert docs[0].page_content == 'a: 1'
assert docs[0].metadata == {}
assert docs[1].page_content == 'a: 3'
assert docs[1].metadata == {}
|
Test DuckDB loader.
|
test_openai_invoke
|
"""Test streaming tokens from AzureOpenAI."""
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
@pytest.mark.scheduled
def test_openai_invoke(llm: AzureOpenAI) ->None:
"""Test streaming tokens from AzureOpenAI."""
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
Test streaming tokens from AzureOpenAI.
|
test_latex_code_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.LATEX,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
Hi Harrison!
\\chapter{1}
"""
chunks = splitter.split_text(code)
assert chunks == ['Hi Harrison!', '\\chapter{1}']
|
def test_latex_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.LATEX,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = '\nHi Harrison!\n\\chapter{1}\n'
chunks = splitter.split_text(code)
assert chunks == ['Hi Harrison!', '\\chapter{1}']
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_get_task_executor
|
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
|
@classmethod
def _get_task_executor(cls, request_parallelism: int=5) ->Executor:
if cls.task_executor is None:
cls.task_executor = ThreadPoolExecutor(max_workers=request_parallelism)
return cls.task_executor
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['gradient_access_token'] = get_from_dict_or_env(values,
'gradient_access_token', 'GRADIENT_ACCESS_TOKEN')
values['gradient_workspace_id'] = get_from_dict_or_env(values,
'gradient_workspace_id', 'GRADIENT_WORKSPACE_ID')
if values['gradient_access_token'] is None or len(values[
'gradient_access_token']) < 10:
raise ValueError('env variable `GRADIENT_ACCESS_TOKEN` must be set')
if values['gradient_workspace_id'] is None or len(values[
'gradient_access_token']) < 3:
raise ValueError('env variable `GRADIENT_WORKSPACE_ID` must be set')
if values['model_kwargs']:
kw = values['model_kwargs']
if not 0 <= kw.get('temperature', 0.5) <= 1:
raise ValueError('`temperature` must be in the range [0.0, 1.0]')
if not 0 <= kw.get('top_p', 0.5) <= 1:
raise ValueError('`top_p` must be in the range [0.0, 1.0]')
if 0 >= kw.get('top_k', 0.5):
raise ValueError('`top_k` must be positive')
if 0 >= kw.get('max_generated_token_count', 1):
raise ValueError('`max_generated_token_count` must be positive')
values['gradient_api_url'] = get_from_dict_or_env(values,
'gradient_api_url', 'GRADIENT_API_URL')
try:
import gradientai
except ImportError:
logging.warning(
'DeprecationWarning: `GradientLLM` will use `pip install gradientai` in future releases of langchain.'
)
except Exception:
pass
return values
|
@root_validator(allow_reuse=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['gradient_access_token'] = get_from_dict_or_env(values,
'gradient_access_token', 'GRADIENT_ACCESS_TOKEN')
values['gradient_workspace_id'] = get_from_dict_or_env(values,
'gradient_workspace_id', 'GRADIENT_WORKSPACE_ID')
if values['gradient_access_token'] is None or len(values[
'gradient_access_token']) < 10:
raise ValueError('env variable `GRADIENT_ACCESS_TOKEN` must be set')
if values['gradient_workspace_id'] is None or len(values[
'gradient_access_token']) < 3:
raise ValueError('env variable `GRADIENT_WORKSPACE_ID` must be set')
if values['model_kwargs']:
kw = values['model_kwargs']
if not 0 <= kw.get('temperature', 0.5) <= 1:
raise ValueError('`temperature` must be in the range [0.0, 1.0]')
if not 0 <= kw.get('top_p', 0.5) <= 1:
raise ValueError('`top_p` must be in the range [0.0, 1.0]')
if 0 >= kw.get('top_k', 0.5):
raise ValueError('`top_k` must be positive')
if 0 >= kw.get('max_generated_token_count', 1):
raise ValueError('`max_generated_token_count` must be positive')
values['gradient_api_url'] = get_from_dict_or_env(values,
'gradient_api_url', 'GRADIENT_API_URL')
try:
import gradientai
except ImportError:
logging.warning(
'DeprecationWarning: `GradientLLM` will use `pip install gradientai` in future releases of langchain.'
)
except Exception:
pass
return values
|
Validate that api key and python package exists in environment.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.