method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_check_parser
|
"""Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(valid_parsers) +
'.')
|
@staticmethod
def _check_parser(parser: str) ->None:
"""Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(
valid_parsers) + '.')
|
Check that parser is valid for bs4.
|
test_given_engine_args_are_provided_then_they_should_be_used
|
"""When engine arguments are provided then they must be used to create the underlying engine."""
engine_args = {'pool_size': 5, 'max_overflow': 10, 'pool_recycle': -1,
'pool_use_lifo': False, 'pool_pre_ping': False, 'pool_timeout': 30}
pgvector.PGVector(connection_string=_CONNECTION_STRING, embedding_function=
_EMBEDDING_FUNCTION, engine_args=engine_args)
create_engine.assert_called_with(url=_CONNECTION_STRING, **engine_args)
|
@pytest.mark.requires('pgvector')
@mock.patch('sqlalchemy.create_engine')
def test_given_engine_args_are_provided_then_they_should_be_used(create_engine:
Mock) ->None:
"""When engine arguments are provided then they must be used to create the underlying engine."""
engine_args = {'pool_size': 5, 'max_overflow': 10, 'pool_recycle': -1,
'pool_use_lifo': False, 'pool_pre_ping': False, 'pool_timeout': 30}
pgvector.PGVector(connection_string=_CONNECTION_STRING,
embedding_function=_EMBEDDING_FUNCTION, engine_args=engine_args)
create_engine.assert_called_with(url=_CONNECTION_STRING, **engine_args)
|
When engine arguments are provided then they must be used to create the underlying engine.
|
_import_gmail_GmailSearch
|
from langchain_community.tools.gmail import GmailSearch
return GmailSearch
|
def _import_gmail_GmailSearch() ->Any:
from langchain_community.tools.gmail import GmailSearch
return GmailSearch
| null |
test_from_texts_with_metadatas_delete_multiple
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, similarity_algorithm)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=5)
first_document_id_object = output[0].metadata['_id']
first_document_id = str(first_document_id_object)
output[1].metadata['_id']
second_document_id = output[1].metadata['_id']
output[2].metadata['_id']
third_document_id = output[2].metadata['_id']
document_ids = [first_document_id, second_document_id, third_document_id]
vectorstore.delete(document_ids)
sleep(2)
output_2 = vectorstore.similarity_search('Sandwich', k=5)
assert output
assert output_2
assert len(output) == 4
assert len(output_2) == 1
vectorstore.delete_index()
|
def test_from_texts_with_metadatas_delete_multiple(self,
azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, similarity_algorithm)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=5)
first_document_id_object = output[0].metadata['_id']
first_document_id = str(first_document_id_object)
output[1].metadata['_id']
second_document_id = output[1].metadata['_id']
output[2].metadata['_id']
third_document_id = output[2].metadata['_id']
document_ids = [first_document_id, second_document_id, third_document_id]
vectorstore.delete(document_ids)
sleep(2)
output_2 = vectorstore.similarity_search('Sandwich', k=5)
assert output
assert output_2
assert len(output) == 4
assert len(output_2) == 1
vectorstore.delete_index()
| null |
format_response_payload
|
return json.loads(output)[0]['generated_text']
|
def format_response_payload(self, output: bytes) ->str:
return json.loads(output)[0]['generated_text']
| null |
get_llm_cache
|
"""Get the value of the `llm_cache` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cache = langchain.llm_cache
global _llm_cache
return _llm_cache or old_llm_cache
|
def get_llm_cache() ->'BaseCache':
"""Get the value of the `llm_cache` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cache = langchain.llm_cache
global _llm_cache
return _llm_cache or old_llm_cache
|
Get the value of the `llm_cache` global setting.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids of the added texts.
"""
embeddings = self._embedding.embed_documents(list(texts))
with Session(self._engine) as _session:
results: List[str] = []
for text, embedding, metadata in zip(texts, embeddings, metadatas or [
dict()] * len(list(texts))):
t = insert(self._table).values(text=text, meta=metadata, embedding=
embedding)
id = _session.execute(t).inserted_primary_key[0]
results.append(str(id))
_session.commit()
return results
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids of the added texts.
"""
embeddings = self._embedding.embed_documents(list(texts))
with Session(self._engine) as _session:
results: List[str] = []
for text, embedding, metadata in zip(texts, embeddings, metadatas or
[dict()] * len(list(texts))):
t = insert(self._table).values(text=text, meta=metadata,
embedding=embedding)
id = _session.execute(t).inserted_primary_key[0]
results.append(str(id))
_session.commit()
return results
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids of the added texts.
|
metadata_func
|
metadata['name'] = record.get('name')
metadata['summary'] = record.get('summary')
metadata['url'] = record.get('url')
metadata['category'] = record.get('category')
metadata['updated_at'] = record.get('updated_at')
return metadata
|
def metadata_func(record: dict, metadata: dict) ->dict:
metadata['name'] = record.get('name')
metadata['summary'] = record.get('summary')
metadata['url'] = record.get('url')
metadata['category'] = record.get('category')
metadata['updated_at'] = record.get('updated_at')
return metadata
| null |
_import_ifttt
|
from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook
|
def _import_ifttt() ->Any:
from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook
| null |
test_with_config_with_config
|
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(llm.with_config({'metadata': {'a': 'b'}}).with_config(tags=[
'a-tag'])) == dumpd(llm.with_config({'metadata': {'a': 'b'}, 'tags': [
'a-tag']}))
|
def test_with_config_with_config() ->None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(llm.with_config({'metadata': {'a': 'b'}}).with_config(tags
=['a-tag'])) == dumpd(llm.with_config({'metadata': {'a': 'b'},
'tags': ['a-tag']}))
| null |
test_get_input_variables
|
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
assert pipeline_prompt.input_variables == ['foo']
|
def test_get_input_variables() ->None:
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
assert pipeline_prompt.input_variables == ['foo']
| null |
_pull_queue
|
try:
from nucliadb_protos.writer_pb2 import BrokerMessage
except ImportError as e:
raise ImportError(
'nucliadb-protos is not installed. Run `pip install nucliadb-protos` to install.'
) from e
try:
from google.protobuf.json_format import MessageToJson
except ImportError as e:
raise ImportError(
'Unable to import google.protobuf, please install with `pip install protobuf`.'
) from e
res = requests.get(self._config['BACKEND'] + '/processing/pull', headers={
'x-stf-nuakey': 'Bearer ' + self._config['NUA_KEY']}).json()
if res['status'] == 'empty':
logger.info('Queue empty')
elif res['status'] == 'ok':
payload = res['payload']
pb = BrokerMessage()
pb.ParseFromString(base64.b64decode(payload))
uuid = pb.uuid
logger.info(f'Pulled {uuid} from queue')
matching_id = self._find_matching_id(uuid)
if not matching_id:
logger.info(f'No matching id for {uuid}')
else:
self._results[matching_id]['status'] = 'done'
data = MessageToJson(pb, preserving_proto_field_name=True,
including_default_value_fields=True)
self._results[matching_id]['data'] = data
|
def _pull_queue(self) ->None:
try:
from nucliadb_protos.writer_pb2 import BrokerMessage
except ImportError as e:
raise ImportError(
'nucliadb-protos is not installed. Run `pip install nucliadb-protos` to install.'
) from e
try:
from google.protobuf.json_format import MessageToJson
except ImportError as e:
raise ImportError(
'Unable to import google.protobuf, please install with `pip install protobuf`.'
) from e
res = requests.get(self._config['BACKEND'] + '/processing/pull',
headers={'x-stf-nuakey': 'Bearer ' + self._config['NUA_KEY']}).json()
if res['status'] == 'empty':
logger.info('Queue empty')
elif res['status'] == 'ok':
payload = res['payload']
pb = BrokerMessage()
pb.ParseFromString(base64.b64decode(payload))
uuid = pb.uuid
logger.info(f'Pulled {uuid} from queue')
matching_id = self._find_matching_id(uuid)
if not matching_id:
logger.info(f'No matching id for {uuid}')
else:
self._results[matching_id]['status'] = 'done'
data = MessageToJson(pb, preserving_proto_field_name=True,
including_default_value_fields=True)
self._results[matching_id]['data'] = data
| null |
_check_response
|
if any(len(d['embedding']) == 1 for d in response['data']):
import openai
raise openai.error.APIError('LocalAI API returned an empty embedding')
return response
|
def _check_response(response: dict) ->dict:
if any(len(d['embedding']) == 1 for d in response['data']):
import openai
raise openai.error.APIError('LocalAI API returned an empty embedding')
return response
| null |
__init__
|
super().__init__(persist_path)
self.pd = guard_import('pandas')
self.pa = guard_import('pyarrow')
self.pq = guard_import('pyarrow.parquet')
|
def __init__(self, persist_path: str) ->None:
super().__init__(persist_path)
self.pd = guard_import('pandas')
self.pa = guard_import('pyarrow')
self.pq = guard_import('pyarrow.parquet')
| null |
_call
|
plan = self.planner.plan(inputs, callbacks=run_manager.get_child() if
run_manager else None)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {'previous_steps': self.step_container, 'current_step':
step, 'objective': inputs[self.input_key]}
new_inputs = {**_new_inputs, **inputs}
response = self.executor.step(new_inputs, callbacks=run_manager.
get_child() if run_manager else None)
if run_manager:
run_manager.on_text(f'*****\n\nStep: {step.value}', verbose=self.
verbose)
run_manager.on_text(f'\n\nResponse: {response.response}', verbose=
self.verbose)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
plan = self.planner.plan(inputs, callbacks=run_manager.get_child() if
run_manager else None)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {'previous_steps': self.step_container,
'current_step': step, 'objective': inputs[self.input_key]}
new_inputs = {**_new_inputs, **inputs}
response = self.executor.step(new_inputs, callbacks=run_manager.
get_child() if run_manager else None)
if run_manager:
run_manager.on_text(f'*****\n\nStep: {step.value}', verbose=
self.verbose)
run_manager.on_text(f'\n\nResponse: {response.response}',
verbose=self.verbose)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()}
| null |
_dump_as_bytes
|
"""Return a bytes representation of a document."""
return dumps(obj).encode('utf-8')
|
def _dump_as_bytes(obj: Serializable) ->bytes:
"""Return a bytes representation of a document."""
return dumps(obj).encode('utf-8')
|
Return a bytes representation of a document.
|
_get_edenai
|
headers = {'accept': 'application/json', 'authorization':
f'Bearer {self.edenai_api_key}', 'User-Agent': self.get_user_agent()}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response
|
def _get_edenai(self, url: str) ->requests.Response:
headers = {'accept': 'application/json', 'authorization':
f'Bearer {self.edenai_api_key}', 'User-Agent': self.get_user_agent()}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response
| null |
_import_manifest
|
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
|
def _import_manifest() ->Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper
| null |
_get_relevant_documents
|
return self.load_docs(query=query)
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
return self.load_docs(query=query)
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat']
|
Get the namespace of the langchain object.
|
get_title
|
return self.DocumentTitle.Text
|
def get_title(self) ->str:
return self.DocumentTitle.Text
| null |
__init__
|
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
def __init__(self) ->None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
Initialize with empty cache.
|
test_critique
|
response = 'Test Critique'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ['Test Idea 1', 'Test Idea 2']
result = chain._critique()
assert result == response
|
def test_critique() ->None:
response = 'Test Critique'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.history.question = prompt_value.to_string()
chain.history.ideas = ['Test Idea 1', 'Test Idea 2']
result = chain._critique()
assert result == response
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Embeddings:
return self._embedding
| null |
config_schema
|
"""The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydantic model that can be used to validate config.
"""
include = include or []
config_specs = self.config_specs
configurable = create_model('Configurable', **{spec.id: (spec.annotation,
Field(spec.default, title=spec.name, description=spec.description)) for
spec in config_specs}, __config__=_SchemaConfig) if config_specs else None
return create_model(self.get_name('Config'), __config__=_SchemaConfig, **{
'configurable': (configurable, None)} if configurable else {}, **{
field_name: (field_type, None) for field_name, field_type in
RunnableConfig.__annotations__.items() if field_name in [i for i in
include if i != 'configurable']})
|
def config_schema(self, *, include: Optional[Sequence[str]]=None) ->Type[
BaseModel]:
"""The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydantic model that can be used to validate config.
"""
include = include or []
config_specs = self.config_specs
configurable = create_model('Configurable', **{spec.id: (spec.
annotation, Field(spec.default, title=spec.name, description=spec.
description)) for spec in config_specs}, __config__=_SchemaConfig
) if config_specs else None
return create_model(self.get_name('Config'), __config__=_SchemaConfig,
**{'configurable': (configurable, None)} if configurable else {},
**{field_name: (field_type, None) for field_name, field_type in
RunnableConfig.__annotations__.items() if field_name in [i for i in
include if i != 'configurable']})
|
The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydantic model that can be used to validate config.
|
validate_environment
|
"""Validate that api key exists in environment."""
values['edenai_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'edenai_api_key', 'EDENAI_API_KEY'))
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
values['edenai_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'edenai_api_key', 'EDENAI_API_KEY'))
return values
|
Validate that api key exists in environment.
|
update
|
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
"""
|
@abstractmethod
def update(self, keys: Sequence[str], *, group_ids: Optional[Sequence[
Optional[str]]]=None, time_at_least: Optional[float]=None) ->None:
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
"""
|
Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match the length of group_ids.
|
get_buffer_string
|
"""Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single string concatenation of all input messages.
Example:
.. code-block:: python
from langchain_core import AIMessage, HumanMessage
messages = [
HumanMessage(content="Hi, how are you?"),
AIMessage(content="Good, how are you?"),
]
get_buffer_string(messages)
# -> "Human: Hi, how are you?
AI: Good, how are you?"
"""
string_messages = []
for m in messages:
if isinstance(m, HumanMessage):
role = human_prefix
elif isinstance(m, AIMessage):
role = ai_prefix
elif isinstance(m, SystemMessage):
role = 'System'
elif isinstance(m, FunctionMessage):
role = 'Function'
elif isinstance(m, ToolMessage):
role = 'Tool'
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f'Got unsupported message type: {m}')
message = f'{role}: {m.content}'
if isinstance(m, AIMessage) and 'function_call' in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
string_messages.append(message)
return '\n'.join(string_messages)
|
def get_buffer_string(messages: Sequence[BaseMessage], human_prefix: str=
'Human', ai_prefix: str='AI') ->str:
"""Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single string concatenation of all input messages.
Example:
.. code-block:: python
from langchain_core import AIMessage, HumanMessage
messages = [
HumanMessage(content="Hi, how are you?"),
AIMessage(content="Good, how are you?"),
]
get_buffer_string(messages)
# -> "Human: Hi, how are you?
AI: Good, how are you?"
"""
string_messages = []
for m in messages:
if isinstance(m, HumanMessage):
role = human_prefix
elif isinstance(m, AIMessage):
role = ai_prefix
elif isinstance(m, SystemMessage):
role = 'System'
elif isinstance(m, FunctionMessage):
role = 'Function'
elif isinstance(m, ToolMessage):
role = 'Tool'
elif isinstance(m, ChatMessage):
role = m.role
else:
raise ValueError(f'Got unsupported message type: {m}')
message = f'{role}: {m.content}'
if isinstance(m, AIMessage) and 'function_call' in m.additional_kwargs:
message += f"{m.additional_kwargs['function_call']}"
string_messages.append(message)
return '\n'.join(string_messages)
|
Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single string concatenation of all input messages.
Example:
.. code-block:: python
from langchain_core import AIMessage, HumanMessage
messages = [
HumanMessage(content="Hi, how are you?"),
AIMessage(content="Good, how are you?"),
]
get_buffer_string(messages)
# -> "Human: Hi, how are you?
AI: Good, how are you?"
|
from_document
|
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata)
|
@classmethod
def from_document(cls, doc: Document) ->'_DocumentWithState':
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata)
|
Create a DocumentWithState from a Document.
|
get_default_output_parser
|
return XMLAgentOutputParser()
|
@staticmethod
def get_default_output_parser() ->XMLAgentOutputParser:
return XMLAgentOutputParser()
| null |
check_rellm_installation
|
import_rellm()
return values
|
@root_validator
def check_rellm_installation(cls, values: dict) ->dict:
import_rellm()
return values
| null |
_get_embeddings
|
embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
except ImportError as e:
raise ImportError(
'Must have tqdm installed if `show_progress_bar` is set to True. Please install with `pip install tqdm`.'
) from e
_iter = tqdm(range(0, len(texts), batch_size))
else:
_iter = range(0, len(texts), batch_size)
if input_type and input_type not in ['query', 'document']:
raise ValueError(
f"input_type {input_type} is invalid. Options: None, 'query', 'document'."
)
for i in _iter:
response = embed_with_retry(self, **self._invocation_params(input=texts
[i:i + batch_size], input_type=input_type))
embeddings.extend(r['embedding'] for r in response['data'])
return embeddings
|
def _get_embeddings(self, texts: List[str], batch_size: Optional[int]=None,
input_type: Optional[str]=None) ->List[List[float]]:
embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
except ImportError as e:
raise ImportError(
'Must have tqdm installed if `show_progress_bar` is set to True. Please install with `pip install tqdm`.'
) from e
_iter = tqdm(range(0, len(texts), batch_size))
else:
_iter = range(0, len(texts), batch_size)
if input_type and input_type not in ['query', 'document']:
raise ValueError(
f"input_type {input_type} is invalid. Options: None, 'query', 'document'."
)
for i in _iter:
response = embed_with_retry(self, **self._invocation_params(input=
texts[i:i + batch_size], input_type=input_type))
embeddings.extend(r['embedding'] for r in response['data'])
return embeddings
| null |
generate_dialogue_response
|
"""React to a given observation."""
call_to_action_template = """What would {agent_name} say? To end the conversation, write: GOODBYE: "what to say". Otherwise to continue the conversation, write: SAY: "what to say next"
"""
full_result = self._generate_reaction(observation, call_to_action_template,
now=now)
result = full_result.strip().split('\n')[0]
if 'GOODBYE:' in result:
farewell = self._clean_response(result.split('GOODBYE:')[-1])
self.memory.save_context({}, {self.memory.add_memory_key:
f'{self.name} observed {observation} and said {farewell}', self.
memory.now_key: now})
return False, f'{self.name} said {farewell}'
if 'SAY:' in result:
response_text = self._clean_response(result.split('SAY:')[-1])
self.memory.save_context({}, {self.memory.add_memory_key:
f'{self.name} observed {observation} and said {response_text}',
self.memory.now_key: now})
return True, f'{self.name} said {response_text}'
else:
return False, result
|
def generate_dialogue_response(self, observation: str, now: Optional[
datetime]=None) ->Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = """What would {agent_name} say? To end the conversation, write: GOODBYE: "what to say". Otherwise to continue the conversation, write: SAY: "what to say next"
"""
full_result = self._generate_reaction(observation,
call_to_action_template, now=now)
result = full_result.strip().split('\n')[0]
if 'GOODBYE:' in result:
farewell = self._clean_response(result.split('GOODBYE:')[-1])
self.memory.save_context({}, {self.memory.add_memory_key:
f'{self.name} observed {observation} and said {farewell}', self
.memory.now_key: now})
return False, f'{self.name} said {farewell}'
if 'SAY:' in result:
response_text = self._clean_response(result.split('SAY:')[-1])
self.memory.save_context({}, {self.memory.add_memory_key:
f'{self.name} observed {observation} and said {response_text}',
self.memory.now_key: now})
return True, f'{self.name} said {response_text}'
else:
return False, result
|
React to a given observation.
|
test_llm
|
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
with pytest.raises(ValueError, match='Must specify reference_key'):
run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=llm,
evaluation=eval_config, client=client)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA], reference_key='some_output')
with pytest.raises(InputFormatError, match=
'Example inputs do not match language model'):
run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=llm,
evaluation=eval_config, client=client)
def input_mapper(d: dict) ->str:
return d['some_input']
run_on_dataset(client=client, dataset_name=kv_dataset_name,
llm_or_chain_factory=llm, evaluation=eval_config, input_mapper=
input_mapper, project_name=eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
|
def test_llm(kv_dataset_name: str, eval_project_name: str, client: Client
) ->None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
with pytest.raises(ValueError, match='Must specify reference_key'):
run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=
llm, evaluation=eval_config, client=client)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA], reference_key='some_output')
with pytest.raises(InputFormatError, match=
'Example inputs do not match language model'):
run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=
llm, evaluation=eval_config, client=client)
def input_mapper(d: dict) ->str:
return d['some_input']
run_on_dataset(client=client, dataset_name=kv_dataset_name,
llm_or_chain_factory=llm, evaluation=eval_config, input_mapper=
input_mapper, project_name=eval_project_name, tags=['shouldpass'])
_check_all_feedback_passed(eval_project_name, client)
| null |
_identifying_params
|
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()['model_name']
model_id = self._client._metadata()['model_id']
else:
if self._runner is None:
raise ValueError('Runner must be initialized.')
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(json.loads(self._runner.identifying_params[
'configuration']))
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(server_url=self.server_url, server_type=self.
server_type, embedded=self.embedded, llm_kwargs=self.llm_kwargs,
model_name=model_name, model_id=model_id)
|
@property
def _identifying_params(self) ->IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()['model_name']
model_id = self._client._metadata()['model_id']
else:
if self._runner is None:
raise ValueError('Runner must be initialized.')
model_name = self.model_name
model_id = self.model_id
try:
self.llm_kwargs.update(json.loads(self._runner.
identifying_params['configuration']))
except (TypeError, json.JSONDecodeError):
pass
return IdentifyingParams(server_url=self.server_url, server_type=self.
server_type, embedded=self.embedded, llm_kwargs=self.llm_kwargs,
model_name=model_name, model_id=model_id)
|
Get the identifying parameters.
|
_process_name
|
preprocessed = name.replace('_', '-').lower()
if preprocessed.startswith('langchain-'):
preprocessed = preprocessed[len('langchain-'):]
if not re.match('^[a-z][a-z0-9-]*$', preprocessed):
raise ValueError(
'Name should only contain lowercase letters (a-z), numbers, and hyphens, and start with a letter.'
)
if preprocessed.endswith('-'):
raise ValueError('Name should not end with `-`.')
if preprocessed.find('--') != -1:
raise ValueError('Name should not contain consecutive hyphens.')
return Replacements({'__package_name__': f'langchain-{preprocessed}',
'__module_name__': 'langchain_' + preprocessed.replace('-', '_'),
'__ModuleName__': preprocessed.title().replace('-', ''),
'__package_name_short__': preprocessed})
|
def _process_name(name: str):
preprocessed = name.replace('_', '-').lower()
if preprocessed.startswith('langchain-'):
preprocessed = preprocessed[len('langchain-'):]
if not re.match('^[a-z][a-z0-9-]*$', preprocessed):
raise ValueError(
'Name should only contain lowercase letters (a-z), numbers, and hyphens, and start with a letter.'
)
if preprocessed.endswith('-'):
raise ValueError('Name should not end with `-`.')
if preprocessed.find('--') != -1:
raise ValueError('Name should not contain consecutive hyphens.')
return Replacements({'__package_name__': f'langchain-{preprocessed}',
'__module_name__': 'langchain_' + preprocessed.replace('-', '_'),
'__ModuleName__': preprocessed.title().replace('-', ''),
'__package_name_short__': preprocessed})
| null |
test_redis_new_vector
|
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(docsearch.index_name)
|
def test_redis_new_vector(texts: List[str]) ->None:
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(docsearch.index_name)
|
Test adding a new document
|
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
event: TEvent = self._call_before_predict(inputs=inputs)
prediction = self.active_policy.predict(event=event)
if self.metrics:
self.metrics.on_decision()
next_chain_inputs, event = self._call_after_predict_before_llm(inputs=
inputs, event=event, prediction=prediction)
t = self.llm_chain.run(**next_chain_inputs, callbacks=_run_manager.get_child())
_run_manager.on_text(t, color='green', verbose=self.verbose)
t = t.strip()
if self.verbose:
_run_manager.on_text('\nCode: ', verbose=self.verbose)
output = t
_run_manager.on_text('\nAnswer: ', verbose=self.verbose)
_run_manager.on_text(output, color='yellow', verbose=self.verbose)
next_chain_inputs, event = self._call_after_llm_before_scoring(llm_response
=output, event=event)
score = None
try:
if self._can_use_selection_scorer():
score = self.selection_scorer.score_response(inputs=
next_chain_inputs, llm_response=output, event=event)
except Exception as e:
logger.info(
f'The selection scorer was not able to score, and the chain was not able to adjust to this response, error: {e}'
)
if self.metrics and score is not None:
self.metrics.on_feedback(score)
event = self._call_after_scoring_before_learning(score=score, event=event)
self.active_policy.learn(event=event)
self.active_policy.log(event=event)
return {self.output_key: {'response': output, 'selection_metadata': event}}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
event: TEvent = self._call_before_predict(inputs=inputs)
prediction = self.active_policy.predict(event=event)
if self.metrics:
self.metrics.on_decision()
next_chain_inputs, event = self._call_after_predict_before_llm(inputs=
inputs, event=event, prediction=prediction)
t = self.llm_chain.run(**next_chain_inputs, callbacks=_run_manager.
get_child())
_run_manager.on_text(t, color='green', verbose=self.verbose)
t = t.strip()
if self.verbose:
_run_manager.on_text('\nCode: ', verbose=self.verbose)
output = t
_run_manager.on_text('\nAnswer: ', verbose=self.verbose)
_run_manager.on_text(output, color='yellow', verbose=self.verbose)
next_chain_inputs, event = self._call_after_llm_before_scoring(llm_response
=output, event=event)
score = None
try:
if self._can_use_selection_scorer():
score = self.selection_scorer.score_response(inputs=
next_chain_inputs, llm_response=output, event=event)
except Exception as e:
logger.info(
f'The selection scorer was not able to score, and the chain was not able to adjust to this response, error: {e}'
)
if self.metrics and score is not None:
self.metrics.on_feedback(score)
event = self._call_after_scoring_before_learning(score=score, event=event)
self.active_policy.learn(event=event)
self.active_policy.log(event=event)
return {self.output_key: {'response': output, 'selection_metadata': event}}
| null |
_import_momento_vector_index
|
from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex
|
def _import_momento_vector_index() ->Any:
from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex
| null |
patch_config
|
"""Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
copy_locals (bool, optional): Whether to copy locals. Defaults to False.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_limit (Optional[int], optional): The recursion limit to set.
Defaults to None.
max_concurrency (Optional[int], optional): The max concurrency to set.
Defaults to None.
run_name (Optional[str], optional): The run name to set. Defaults to None.
configurable (Optional[Dict[str, Any]], optional): The configurable to set.
Defaults to None.
Returns:
RunnableConfig: The patched config.
"""
config = ensure_config(config)
if callbacks is not None:
config['callbacks'] = callbacks
if 'run_name' in config:
del config['run_name']
if recursion_limit is not None:
config['recursion_limit'] = recursion_limit
if max_concurrency is not None:
config['max_concurrency'] = max_concurrency
if run_name is not None:
config['run_name'] = run_name
if configurable is not None:
config['configurable'] = {**config.get('configurable', {}), **configurable}
return config
|
def patch_config(config: Optional[RunnableConfig], *, callbacks: Optional[
BaseCallbackManager]=None, recursion_limit: Optional[int]=None,
max_concurrency: Optional[int]=None, run_name: Optional[str]=None,
configurable: Optional[Dict[str, Any]]=None) ->RunnableConfig:
"""Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
copy_locals (bool, optional): Whether to copy locals. Defaults to False.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_limit (Optional[int], optional): The recursion limit to set.
Defaults to None.
max_concurrency (Optional[int], optional): The max concurrency to set.
Defaults to None.
run_name (Optional[str], optional): The run name to set. Defaults to None.
configurable (Optional[Dict[str, Any]], optional): The configurable to set.
Defaults to None.
Returns:
RunnableConfig: The patched config.
"""
config = ensure_config(config)
if callbacks is not None:
config['callbacks'] = callbacks
if 'run_name' in config:
del config['run_name']
if recursion_limit is not None:
config['recursion_limit'] = recursion_limit
if max_concurrency is not None:
config['max_concurrency'] = max_concurrency
if run_name is not None:
config['run_name'] = run_name
if configurable is not None:
config['configurable'] = {**config.get('configurable', {}), **
configurable}
return config
|
Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
copy_locals (bool, optional): Whether to copy locals. Defaults to False.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_limit (Optional[int], optional): The recursion limit to set.
Defaults to None.
max_concurrency (Optional[int], optional): The max concurrency to set.
Defaults to None.
run_name (Optional[str], optional): The run name to set. Defaults to None.
configurable (Optional[Dict[str, Any]], optional): The configurable to set.
Defaults to None.
Returns:
RunnableConfig: The patched config.
|
split_documents
|
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
|
def split_documents(self, documents: Iterable[Document]) ->List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas)
|
Split documents.
|
_import_vllm
|
from langchain_community.llms.vllm import VLLM
return VLLM
|
def _import_vllm() ->Any:
from langchain_community.llms.vllm import VLLM
return VLLM
| null |
test_init_fail_embedding_dim_mismatch
|
index = mock_index(index_details)
with pytest.raises(ValueError) as ex:
DatabricksVectorSearch(index, text_column=DEFAULT_TEXT_COLUMN,
embedding=FakeEmbeddingsWithDimension(DEFAULT_VECTOR_DIMENSION + 1))
assert f"embedding model's dimension '{DEFAULT_VECTOR_DIMENSION + 1}' does not match with the index's dimension '{DEFAULT_VECTOR_DIMENSION}'" in str(
ex.value)
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', [
DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX])
def test_init_fail_embedding_dim_mismatch(index_details: dict) ->None:
index = mock_index(index_details)
with pytest.raises(ValueError) as ex:
DatabricksVectorSearch(index, text_column=DEFAULT_TEXT_COLUMN,
embedding=FakeEmbeddingsWithDimension(DEFAULT_VECTOR_DIMENSION + 1)
)
assert f"embedding model's dimension '{DEFAULT_VECTOR_DIMENSION + 1}' does not match with the index's dimension '{DEFAULT_VECTOR_DIMENSION}'" in str(
ex.value)
| null |
clear
|
"""Clear memory contents."""
|
@abstractmethod
def clear(self) ->None:
"""Clear memory contents."""
|
Clear memory contents.
|
on_llm_start
|
"""Run when LLM starts."""
self.metrics['step'] += 1
self.metrics['llm_starts'] += 1
self.metrics['starts'] += 1
llm_starts = self.metrics['llm_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp['prompt'] = prompt
self.records['on_llm_start_records'].append(prompt_resp)
self.records['action_records'].append(prompt_resp)
self.mlflg.jsonf(prompt_resp, f'llm_start_{llm_starts}_prompt_{idx}')
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
self.metrics['step'] += 1
self.metrics['llm_starts'] += 1
self.metrics['starts'] += 1
llm_starts = self.metrics['llm_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
for idx, prompt in enumerate(prompts):
prompt_resp = deepcopy(resp)
prompt_resp['prompt'] = prompt
self.records['on_llm_start_records'].append(prompt_resp)
self.records['action_records'].append(prompt_resp)
self.mlflg.jsonf(prompt_resp, f'llm_start_{llm_starts}_prompt_{idx}')
|
Run when LLM starts.
|
available_models
|
"""Map the available models that can be invoked."""
return self.client.available_models
|
@property
def available_models(self) ->dict:
"""Map the available models that can be invoked."""
return self.client.available_models
|
Map the available models that can be invoked.
|
test_add_texts
|
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_params(FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 0
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
|
def test_add_texts(texts: List[str], tmp_path: Path) ->None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_params(FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 0
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
|
Test end to end construction and simple similarity search.
|
_get_prompt_input_key
|
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
|
def _get_prompt_input_key(self, inputs: Dict[str, Any]) ->str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
|
Get the input key for the prompt.
|
import_google
|
"""Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
"""
try:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
except ImportError:
raise ImportError(
'You need to install google-auth-httplib2 to use this toolkit. Try running pip install --upgrade google-auth-httplib2'
)
return Request, Credentials
|
def import_google() ->Tuple[Request, Credentials]:
"""Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
"""
try:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
except ImportError:
raise ImportError(
'You need to install google-auth-httplib2 to use this toolkit. Try running pip install --upgrade google-auth-httplib2'
)
return Request, Credentials
|
Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
|
test_model_garden
|
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ['PROJECT']
location = 'europe-west4'
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project,
result_arg=result_arg, location=location)
output = llm('What is the meaning of life?')
assert isinstance(output, str)
assert llm._llm_type == 'vertexai_model_garden'
|
@pytest.mark.parametrize('endpoint_os_variable_name,result_arg', [(
'FALCON_ENDPOINT_ID', 'generated_text'), ('LLAMA_ENDPOINT_ID', None)])
def test_model_garden(endpoint_os_variable_name: str, result_arg: Optional[str]
) ->None:
"""In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ['PROJECT']
location = 'europe-west4'
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project,
result_arg=result_arg, location=location)
output = llm('What is the meaning of life?')
assert isinstance(output, str)
assert llm._llm_type == 'vertexai_model_garden'
|
In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
|
_convert_schema
|
props = {k: {'title': k, **v} for k, v in schema['properties'].items()}
return {'type': 'object', 'properties': props, 'required': schema.get(
'required', [])}
|
def _convert_schema(schema: dict) ->dict:
props = {k: {'title': k, **v} for k, v in schema['properties'].items()}
return {'type': 'object', 'properties': props, 'required': schema.get(
'required', [])}
| null |
from_filesystem
|
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader(path, glob=glob, exclude=exclude,
suffixes=suffixes, show_progress=show_progress)
if isinstance(parser, str):
if parser == 'default' and cls.get_parser != GenericLoader.get_parser:
blob_parser = cls.get_parser(**parser_kwargs or {})
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
|
@classmethod
def from_filesystem(cls, path: _PathLike, *, glob: str='**/[!.]*', exclude:
Sequence[str]=(), suffixes: Optional[Sequence[str]]=None, show_progress:
bool=False, parser: Union[DEFAULT, BaseBlobParser]='default',
num_workers: int=4, parser_kwargs: Optional[dict]=None) ->ConcurrentLoader:
"""Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
"""
blob_loader = FileSystemBlobLoader(path, glob=glob, exclude=exclude,
suffixes=suffixes, show_progress=show_progress)
if isinstance(parser, str):
if parser == 'default' and cls.get_parser != GenericLoader.get_parser:
blob_parser = cls.get_parser(**parser_kwargs or {})
else:
blob_parser = get_parser(parser)
else:
blob_parser = parser
return cls(blob_loader, blob_parser, num_workers=num_workers)
|
Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
exclude: A list of patterns to exclude from the loader.
show_progress: Whether to show a progress bar or not (requires tqdm).
Proxies to the file system loader.
parser: A blob parser which knows how to parse blobs into documents
num_workers: Max number of concurrent workers to use.
parser_kwargs: Keyword arguments to pass to the parser.
|
test_against_pal_chain_doc
|
"""
Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb
"""
narrative_input = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
answer = cpal_chain.run(narrative_input)
"""
>>> story._outcome_table
name code value depends_on
0 cindy pass 4.0 []
1 marcia marcia.value = cindy.value + 2 6.0 [cindy]
2 jan jan.value = marcia.value * 3 18.0 [marcia]
"""
self.assertEqual(answer, 28.0)
|
def test_against_pal_chain_doc(self) ->None:
"""
Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb
"""
narrative_input = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
answer = cpal_chain.run(narrative_input)
"""
>>> story._outcome_table
name code value depends_on
0 cindy pass 4.0 []
1 marcia marcia.value = cindy.value + 2 6.0 [cindy]
2 jan jan.value = marcia.value * 3 18.0 [marcia]
"""
self.assertEqual(answer, 28.0)
|
Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb
|
load
|
return list(self.lazy_load())
|
def load(self) ->List[Document]:
return list(self.lazy_load())
| null |
__getattr__
|
if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif name == 'LambdaWrapper':
return _import_awslambda()
elif name == 'BibtexparserWrapper':
return _import_bibtex()
elif name == 'BingSearchAPIWrapper':
return _import_bing_search()
elif name == 'BraveSearchWrapper':
return _import_brave_search()
elif name == 'DuckDuckGoSearchAPIWrapper':
return _import_duckduckgo_search()
elif name == 'GoogleLensAPIWrapper':
return _import_google_lens()
elif name == 'GoldenQueryAPIWrapper':
return _import_golden_query()
elif name == 'GoogleJobsAPIWrapper':
return _import_google_jobs()
elif name == 'GoogleScholarAPIWrapper':
return _import_google_scholar()
elif name == 'GoogleFinanceAPIWrapper':
return _import_google_finance()
elif name == 'GoogleTrendsAPIWrapper':
return _import_google_trends()
elif name == 'GooglePlacesAPIWrapper':
return _import_google_places_api()
elif name == 'GoogleSearchAPIWrapper':
return _import_google_search()
elif name == 'GoogleSerperAPIWrapper':
return _import_google_serper()
elif name == 'GraphQLAPIWrapper':
return _import_graphql()
elif name == 'JiraAPIWrapper':
return _import_jira()
elif name == 'MaxComputeAPIWrapper':
return _import_max_compute()
elif name == 'MerriamWebsterAPIWrapper':
return _import_merriam_webster()
elif name == 'MetaphorSearchAPIWrapper':
return _import_metaphor_search()
elif name == 'NasaAPIWrapper':
return _import_nasa()
elif name == 'OpenWeatherMapAPIWrapper':
return _import_openweathermap()
elif name == 'OutlineAPIWrapper':
return _import_outline()
elif name == 'Portkey':
return _import_portkey()
elif name == 'PowerBIDataset':
return _import_powerbi()
elif name == 'PubMedAPIWrapper':
return _import_pubmed()
elif name == 'PythonREPL':
return _import_python()
elif name == 'SceneXplainAPIWrapper':
return _import_scenexplain()
elif name == 'SearchApiAPIWrapper':
return _import_searchapi()
elif name == 'SearxSearchWrapper':
return _import_searx_search()
elif name == 'SerpAPIWrapper':
return _import_serpapi()
elif name == 'SparkSQL':
return _import_spark_sql()
elif name == 'StackExchangeAPIWrapper':
return _import_stackexchange()
elif name == 'SQLDatabase':
return _import_sql_database()
elif name == 'SteamWebAPIWrapper':
return _import_steam_webapi()
elif name == 'TensorflowDatasets':
return _import_tensorflow_datasets()
elif name == 'TwilioAPIWrapper':
return _import_twilio()
elif name == 'WikipediaAPIWrapper':
return _import_wikipedia()
elif name == 'WolframAlphaAPIWrapper':
return _import_wolfram_alpha()
elif name == 'ZapierNLAWrapper':
return _import_zapier()
else:
raise AttributeError(f'Could not find: {name}')
|
def __getattr__(name: str) ->Any:
if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif name == 'LambdaWrapper':
return _import_awslambda()
elif name == 'BibtexparserWrapper':
return _import_bibtex()
elif name == 'BingSearchAPIWrapper':
return _import_bing_search()
elif name == 'BraveSearchWrapper':
return _import_brave_search()
elif name == 'DuckDuckGoSearchAPIWrapper':
return _import_duckduckgo_search()
elif name == 'GoogleLensAPIWrapper':
return _import_google_lens()
elif name == 'GoldenQueryAPIWrapper':
return _import_golden_query()
elif name == 'GoogleJobsAPIWrapper':
return _import_google_jobs()
elif name == 'GoogleScholarAPIWrapper':
return _import_google_scholar()
elif name == 'GoogleFinanceAPIWrapper':
return _import_google_finance()
elif name == 'GoogleTrendsAPIWrapper':
return _import_google_trends()
elif name == 'GooglePlacesAPIWrapper':
return _import_google_places_api()
elif name == 'GoogleSearchAPIWrapper':
return _import_google_search()
elif name == 'GoogleSerperAPIWrapper':
return _import_google_serper()
elif name == 'GraphQLAPIWrapper':
return _import_graphql()
elif name == 'JiraAPIWrapper':
return _import_jira()
elif name == 'MaxComputeAPIWrapper':
return _import_max_compute()
elif name == 'MerriamWebsterAPIWrapper':
return _import_merriam_webster()
elif name == 'MetaphorSearchAPIWrapper':
return _import_metaphor_search()
elif name == 'NasaAPIWrapper':
return _import_nasa()
elif name == 'OpenWeatherMapAPIWrapper':
return _import_openweathermap()
elif name == 'OutlineAPIWrapper':
return _import_outline()
elif name == 'Portkey':
return _import_portkey()
elif name == 'PowerBIDataset':
return _import_powerbi()
elif name == 'PubMedAPIWrapper':
return _import_pubmed()
elif name == 'PythonREPL':
return _import_python()
elif name == 'SceneXplainAPIWrapper':
return _import_scenexplain()
elif name == 'SearchApiAPIWrapper':
return _import_searchapi()
elif name == 'SearxSearchWrapper':
return _import_searx_search()
elif name == 'SerpAPIWrapper':
return _import_serpapi()
elif name == 'SparkSQL':
return _import_spark_sql()
elif name == 'StackExchangeAPIWrapper':
return _import_stackexchange()
elif name == 'SQLDatabase':
return _import_sql_database()
elif name == 'SteamWebAPIWrapper':
return _import_steam_webapi()
elif name == 'TensorflowDatasets':
return _import_tensorflow_datasets()
elif name == 'TwilioAPIWrapper':
return _import_twilio()
elif name == 'WikipediaAPIWrapper':
return _import_wikipedia()
elif name == 'WolframAlphaAPIWrapper':
return _import_wolfram_alpha()
elif name == 'ZapierNLAWrapper':
return _import_zapier()
else:
raise AttributeError(f'Could not find: {name}')
| null |
test_sqlitevss
|
"""Test end to end construction and search."""
docsearch = _sqlite_vss_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={})]
|
@pytest.mark.requires('sqlite-vss')
def test_sqlitevss() ->None:
"""Test end to end construction and search."""
docsearch = _sqlite_vss_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={})]
|
Test end to end construction and search.
|
test_cassandra_no_drop
|
"""Test end to end construction and re-opening the same index."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
del docsearch
texts2 = ['foo2', 'bar2', 'baz2']
docsearch = _vectorstore_from_texts(texts2, metadatas=metadatas, drop=False)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
def test_cassandra_no_drop() ->None:
"""Test end to end construction and re-opening the same index."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
del docsearch
texts2 = ['foo2', 'bar2', 'baz2']
docsearch = _vectorstore_from_texts(texts2, metadatas=metadatas, drop=False
)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
Test end to end construction and re-opening the same index.
|
test_loadnotebook_eachnotehasexpectedcontentwithleadingandtrailingremoved
|
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook.enex'), False).load()
content_note1 = documents[0].page_content
assert content_note1 == 'abc'
content_note2 = documents[1].page_content
assert content_note2 == '**Jan - March 2022**'
|
def test_loadnotebook_eachnotehasexpectedcontentwithleadingandtrailingremoved(
self) ->None:
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook.enex'), False).load()
content_note1 = documents[0].page_content
assert content_note1 == 'abc'
content_note2 = documents[1].page_content
assert content_note2 == '**Jan - March 2022**'
| null |
validate_environment
|
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT')
user_id = values.get('user_id')
app_id = values.get('app_id')
model_id = values.get('model_id')
model_url = values.get('model_url')
if model_url is not None and model_id is not None:
raise ValueError('Please provide either model_url or model_id, not both.')
if model_url is None and model_id is None:
raise ValueError('Please provide one of model_url or model_id.')
if model_url is None and model_id is not None:
if user_id is None or app_id is None:
raise ValueError('Please provide a user_id and app_id.')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT')
user_id = values.get('user_id')
app_id = values.get('app_id')
model_id = values.get('model_id')
model_url = values.get('model_url')
if model_url is not None and model_id is not None:
raise ValueError(
'Please provide either model_url or model_id, not both.')
if model_url is None and model_id is None:
raise ValueError('Please provide one of model_url or model_id.')
if model_url is None and model_id is not None:
if user_id is None or app_id is None:
raise ValueError('Please provide a user_id and app_id.')
return values
|
Validate that we have all required info to access Clarifai
platform and python package exists in environment.
|
_kwargs_post_request
|
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
return dict(url=f'{self.gradient_api_url}/models/{self.model_id}/complete',
headers={'authorization': f'Bearer {self.gradient_access_token}',
'x-gradient-workspace-id': f'{self.gradient_workspace_id}', 'accept':
'application/json', 'content-type': 'application/json'}, json=dict(
query=prompt, maxGeneratedTokenCount=_params.get(
'max_generated_token_count', None), temperature=_params.get(
'temperature', None), topK=_params.get('top_k', None), topP=_params.get
('top_p', None)))
|
def _kwargs_post_request(self, prompt: str, kwargs: Mapping[str, Any]
) ->Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
return dict(url=
f'{self.gradient_api_url}/models/{self.model_id}/complete', headers
={'authorization': f'Bearer {self.gradient_access_token}',
'x-gradient-workspace-id': f'{self.gradient_workspace_id}',
'accept': 'application/json', 'content-type': 'application/json'},
json=dict(query=prompt, maxGeneratedTokenCount=_params.get(
'max_generated_token_count', None), temperature=_params.get(
'temperature', None), topK=_params.get('top_k', None), topP=_params
.get('top_p', None)))
|
Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
|
test_whatsapp_chat_loader
|
chat_path = pathlib.Path(__file__).parent / 'data' / 'whatsapp_chat.txt'
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'Dr. Feather'))
assert chat_sessions, 'Chat sessions should not be empty'
assert chat_sessions[0]['messages'], 'Chat messages should not be empty'
assert 'I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!' in chat_sessions[
0]['messages'][0].content, 'Chat content mismatch'
|
def test_whatsapp_chat_loader() ->None:
chat_path = pathlib.Path(__file__).parent / 'data' / 'whatsapp_chat.txt'
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'Dr. Feather'))
assert chat_sessions, 'Chat sessions should not be empty'
assert chat_sessions[0]['messages'], 'Chat messages should not be empty'
assert 'I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!' in chat_sessions[
0]['messages'][0].content, 'Chat content mismatch'
| null |
__init__
|
super().__init__()
self.metadata_key = metadata_key
|
def __init__(self, metadata_key: str='metadata') ->None:
super().__init__()
self.metadata_key = metadata_key
| null |
concatenate_cells
|
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell['cell_type']
source = cell['source']
output = cell['outputs']
if include_outputs and cell_type == 'code' and output:
if 'ename' in output[0].keys():
error_name = output[0]['ename']
error_value = output[0]['evalue']
if traceback:
traceback = output[0]['traceback']
return f"""'{cell_type}' cell: '{source}'
, gives error '{error_name}', with description '{error_value}'
and traceback '{traceback}'
"""
else:
return f"""'{cell_type}' cell: '{source}'
, gives error '{error_name}',with description '{error_value}'
"""
elif output[0]['output_type'] == 'stream':
output = output[0]['text']
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ''
|
def concatenate_cells(cell: dict, include_outputs: bool, max_output_length:
int, traceback: bool) ->str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
"""
cell_type = cell['cell_type']
source = cell['source']
output = cell['outputs']
if include_outputs and cell_type == 'code' and output:
if 'ename' in output[0].keys():
error_name = output[0]['ename']
error_value = output[0]['evalue']
if traceback:
traceback = output[0]['traceback']
return f"""'{cell_type}' cell: '{source}'
, gives error '{error_name}', with description '{error_value}'
and traceback '{traceback}'
"""
else:
return f"""'{cell_type}' cell: '{source}'
, gives error '{error_name}',with description '{error_value}'
"""
elif output[0]['output_type'] == 'stream':
output = output[0]['text']
min_output = min(max_output_length, len(output))
return (
f"'{cell_type}' cell: '{source}'\n with output: '{output[:min_output]}'\n\n"
)
else:
return f"'{cell_type}' cell: '{source}'\n\n"
return ''
|
Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the cell information.
|
test_load_converts_dataframe_columns_to_document_metadata
|
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': [
'2022-01-01', '2022-01-02']}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
expected = {'author': ['Alice', 'Bob'], 'date': ['2022-01-01', '2022-01-02']}
for i, doc in enumerate(docs):
assert doc.metadata['author'] == expected['author'][i]
assert doc.metadata['date'] == expected['date'][i]
|
@pytest.mark.skipif(not xorbits_installed, reason='xorbits not installed')
def test_load_converts_dataframe_columns_to_document_metadata() ->None:
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date':
['2022-01-01', '2022-01-02']}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
expected = {'author': ['Alice', 'Bob'], 'date': ['2022-01-01',
'2022-01-02']}
for i, doc in enumerate(docs):
assert doc.metadata['author'] == expected['author'][i]
assert doc.metadata['date'] == expected['date'][i]
| null |
wait_for_all_tracers
|
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures()
|
def wait_for_all_tracers() ->None:
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures()
|
Wait for all tracers to finish.
|
init_gptcache_map
|
i = getattr(init_gptcache_map, '_i', 0)
cache_path = f'data_map_{i}.txt'
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(pre_embedding_func=get_prompt, data_manager=get_data_manager
(data_path=cache_path))
init_gptcache_map._i = i + 1
|
def init_gptcache_map(cache_obj: Any) ->None:
i = getattr(init_gptcache_map, '_i', 0)
cache_path = f'data_map_{i}.txt'
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(pre_embedding_func=get_prompt, data_manager=
get_data_manager(data_path=cache_path))
init_gptcache_map._i = i + 1
| null |
serialize_inputs
|
if 'prompts' in inputs:
input_ = '\n\n'.join(inputs['prompts'])
elif 'prompt' in inputs:
input_ = inputs['prompt']
elif 'messages' in inputs:
input_ = self.serialize_chat_messages(inputs['messages'])
else:
raise ValueError('LLM Run must have either messages or prompts as inputs.')
return input_
|
def serialize_inputs(self, inputs: Dict) ->str:
if 'prompts' in inputs:
input_ = '\n\n'.join(inputs['prompts'])
elif 'prompt' in inputs:
input_ = inputs['prompt']
elif 'messages' in inputs:
input_ = self.serialize_chat_messages(inputs['messages'])
else:
raise ValueError(
'LLM Run must have either messages or prompts as inputs.')
return input_
| null |
test_parse_nested_operation
|
op = 'and(or(eq("a", "b"), eq("a", "c"), eq("a", "d")), not(eq("z", "foo")))'
eq1 = Comparison(comparator=Comparator.EQ, attribute='a', value='b')
eq2 = Comparison(comparator=Comparator.EQ, attribute='a', value='c')
eq3 = Comparison(comparator=Comparator.EQ, attribute='a', value='d')
eq4 = Comparison(comparator=Comparator.EQ, attribute='z', value='foo')
_not = Operation(operator=Operator.NOT, arguments=[eq4])
_or = Operation(operator=Operator.OR, arguments=[eq1, eq2, eq3])
expected = Operation(operator=Operator.AND, arguments=[_or, _not])
actual = DEFAULT_PARSER.parse_folder(op)
assert expected == actual
|
def test_parse_nested_operation() ->None:
op = (
'and(or(eq("a", "b"), eq("a", "c"), eq("a", "d")), not(eq("z", "foo")))'
)
eq1 = Comparison(comparator=Comparator.EQ, attribute='a', value='b')
eq2 = Comparison(comparator=Comparator.EQ, attribute='a', value='c')
eq3 = Comparison(comparator=Comparator.EQ, attribute='a', value='d')
eq4 = Comparison(comparator=Comparator.EQ, attribute='z', value='foo')
_not = Operation(operator=Operator.NOT, arguments=[eq4])
_or = Operation(operator=Operator.OR, arguments=[eq1, eq2, eq3])
expected = Operation(operator=Operator.AND, arguments=[_or, _not])
actual = DEFAULT_PARSER.parse_folder(op)
assert expected == actual
| null |
_import_yandex_gpt
|
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
|
def _import_yandex_gpt() ->Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT
| null |
test_cloudflare_workersai_stream
|
response_body = ['data: {"response": "Hello"}', 'data: [DONE]']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, body='\n'.join(response_body), status=200)
llm = CloudflareWorkersAI(account_id='my_account_id', api_token=
'my_api_token', model='@cf/meta/llama-2-7b-chat-int8', streaming=True)
outputs = []
for chunk in llm.stream('Say Hello'):
outputs.append(chunk)
assert ''.join(outputs) == 'Hello'
|
@responses.activate
def test_cloudflare_workersai_stream() ->None:
response_body = ['data: {"response": "Hello"}', 'data: [DONE]']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, body='\n'.join(response_body), status=200)
llm = CloudflareWorkersAI(account_id='my_account_id', api_token=
'my_api_token', model='@cf/meta/llama-2-7b-chat-int8', streaming=True)
outputs = []
for chunk in llm.stream('Say Hello'):
outputs.append(chunk)
assert ''.join(outputs) == 'Hello'
| null |
max_marginal_relevance_search_with_score
|
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(embedding=
embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter,
**kwargs)
return docs
|
def max_marginal_relevance_search_with_score(self, query: str, k: int=4,
fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[dict]=None,
**kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(embedding
=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=
filter, **kwargs)
return docs
|
Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
|
format_log_to_messages
|
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(content=template_tool_response.format(
observation=observation))
thoughts.append(human_message)
return thoughts
|
def format_log_to_messages(intermediate_steps: List[Tuple[AgentAction, str]
], template_tool_response: str='{observation}') ->List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(content=template_tool_response.format(
observation=observation))
thoughts.append(human_message)
return thoughts
|
Construct the scratchpad that lets the agent continue its thought process.
|
get_allowed_tools
|
return None
|
def get_allowed_tools(self) ->Optional[List[str]]:
return None
| null |
test_jinachat_api_key_is_secret_string
|
llm = JinaChat(jinachat_api_key='secret-api-key')
assert isinstance(llm.jinachat_api_key, SecretStr)
|
def test_jinachat_api_key_is_secret_string() ->None:
llm = JinaChat(jinachat_api_key='secret-api-key')
assert isinstance(llm.jinachat_api_key, SecretStr)
| null |
test_visit_comparison_eq
|
comp = Comparison(comparator=Comparator.EQ, attribute='qty', value=10)
expected = {'qty': {'$eq': 10}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_eq() ->None:
comp = Comparison(comparator=Comparator.EQ, attribute='qty', value=10)
expected = {'qty': {'$eq': 10}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
_identifying_params
|
return {}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {}
| null |
ngram_fuzzy_matching_strategy
|
"""
N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to find the position of the anonymized entity in the text.
It generates n-grams of the same length as the anonymized entity from the text and
uses fuzzy matching to find the position of the anonymized entity in the text.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
fuzzy_threshold: fuzzy matching threshold
use_variable_length: whether to use (n-1, n, n+1)-grams or just n-grams
"""
def generate_ngrams(words_list: List[str], n: int) ->list:
"""Generate n-grams from a list of words"""
return [' '.join(words_list[i:i + n]) for i in range(len(words_list) -
(n - 1))]
try:
from fuzzywuzzy import fuzz
except ImportError as e:
raise ImportError(
'Could not import fuzzywuzzy, please install with `pip install fuzzywuzzy`.'
) from e
text_words = text.split()
replacements = []
matched_indices: List[int] = []
for entity_type in deanonymizer_mapping:
for anonymized, original in deanonymizer_mapping[entity_type].items():
anonymized_words = anonymized.split()
if use_variable_length:
gram_lengths = [len(anonymized_words) - 1, len(anonymized_words
), len(anonymized_words) + 1]
else:
gram_lengths = [len(anonymized_words)]
for n in gram_lengths:
if n > 0:
segments = generate_ngrams(text_words, n)
for i, segment in enumerate(segments):
if fuzz.ratio(anonymized.lower(), segment.lower()
) > fuzzy_threshold and i not in matched_indices:
replacements.append((i, n, original))
matched_indices.extend(range(i, i + n))
replacements.sort(key=lambda x: x[0], reverse=True)
for start, length, replacement in replacements:
text_words[start:start + length] = replacement.split()
return ' '.join(text_words)
|
def ngram_fuzzy_matching_strategy(text: str, deanonymizer_mapping:
MappingDataType, fuzzy_threshold: int=85, use_variable_length: bool=True
) ->str:
"""
N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to find the position of the anonymized entity in the text.
It generates n-grams of the same length as the anonymized entity from the text and
uses fuzzy matching to find the position of the anonymized entity in the text.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
fuzzy_threshold: fuzzy matching threshold
use_variable_length: whether to use (n-1, n, n+1)-grams or just n-grams
"""
def generate_ngrams(words_list: List[str], n: int) ->list:
"""Generate n-grams from a list of words"""
return [' '.join(words_list[i:i + n]) for i in range(len(words_list
) - (n - 1))]
try:
from fuzzywuzzy import fuzz
except ImportError as e:
raise ImportError(
'Could not import fuzzywuzzy, please install with `pip install fuzzywuzzy`.'
) from e
text_words = text.split()
replacements = []
matched_indices: List[int] = []
for entity_type in deanonymizer_mapping:
for anonymized, original in deanonymizer_mapping[entity_type].items():
anonymized_words = anonymized.split()
if use_variable_length:
gram_lengths = [len(anonymized_words) - 1, len(
anonymized_words), len(anonymized_words) + 1]
else:
gram_lengths = [len(anonymized_words)]
for n in gram_lengths:
if n > 0:
segments = generate_ngrams(text_words, n)
for i, segment in enumerate(segments):
if fuzz.ratio(anonymized.lower(), segment.lower()
) > fuzzy_threshold and i not in matched_indices:
replacements.append((i, n, original))
matched_indices.extend(range(i, i + n))
replacements.sort(key=lambda x: x[0], reverse=True)
for start, length, replacement in replacements:
text_words[start:start + length] = replacement.split()
return ' '.join(text_words)
|
N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to find the position of the anonymized entity in the text.
It generates n-grams of the same length as the anonymized entity from the text and
uses fuzzy matching to find the position of the anonymized entity in the text.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
fuzzy_threshold: fuzzy matching threshold
use_variable_length: whether to use (n-1, n, n+1)-grams or just n-grams
|
create_table_if_not_exists
|
"""
Helper function: create table if not exists
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL(
'CREATE TABLE IF NOT EXISTS {} ( id UUID, embedding_id INTEGER, text VARCHAR(60000), metadata VARCHAR(1024), embedding FLOAT)'
).format(sql.Identifier(self._table)))
self._connection.commit()
cursor.close()
|
def create_table_if_not_exists(self) ->None:
"""
Helper function: create table if not exists
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL(
'CREATE TABLE IF NOT EXISTS {} ( id UUID, embedding_id INTEGER, text VARCHAR(60000), metadata VARCHAR(1024), embedding FLOAT)'
).format(sql.Identifier(self._table)))
self._connection.commit()
cursor.close()
|
Helper function: create table if not exists
|
list_as_str
|
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
|
def list_as_str(self) ->str:
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions)
|
Same as list, but returns a stringified version of the JSON for
insertting back into an LLM.
|
check_queries_required
|
if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return queries
|
@validator('queries', always=True)
def check_queries_required(cls, queries: Optional[Mapping], values: Mapping
[str, Any]) ->Optional[Mapping]:
if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return queries
| null |
_get_relevant_documents
|
assert isinstance(self, FakeRetrieverV2)
assert run_manager is not None
assert isinstance(run_manager, CallbackManagerForRetrieverRun)
if self.throw_error:
raise ValueError('Test error')
return [Document(page_content=query)]
|
def _get_relevant_documents(self, query: str, *, run_manager: Optional[
CallbackManagerForRetrieverRun]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV2)
assert run_manager is not None
assert isinstance(run_manager, CallbackManagerForRetrieverRun)
if self.throw_error:
raise ValueError('Test error')
return [Document(page_content=query)]
| null |
reciprocal_rank_fusion
|
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [(loads(doc), score) for doc, score in sorted(
fused_scores.items(), key=lambda x: x[1], reverse=True)]
return reranked_results
|
def reciprocal_rank_fusion(results: list[list], k=60):
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [(loads(doc), score) for doc, score in sorted(
fused_scores.items(), key=lambda x: x[1], reverse=True)]
return reranked_results
| null |
on_chat_model_start
|
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
handle_event(self.handlers, 'on_chat_model_start', 'ignore_chat_model',
serialized, [message_list], run_id=run_id_, parent_run_id=self.
parent_run_id, tags=self.tags, metadata=self.metadata, **kwargs)
managers.append(CallbackManagerForLLMRun(run_id=run_id_, handlers=self.
handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=
self.inheritable_tags, metadata=self.metadata, inheritable_metadata
=self.inheritable_metadata))
return managers
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], **kwargs: Any) ->List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
handle_event(self.handlers, 'on_chat_model_start',
'ignore_chat_model', serialized, [message_list], run_id=run_id_,
parent_run_id=self.parent_run_id, tags=self.tags, metadata=self
.metadata, **kwargs)
managers.append(CallbackManagerForLLMRun(run_id=run_id_, handlers=
self.handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags,
inheritable_tags=self.inheritable_tags, metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata))
return managers
|
Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
|
get_lc_namespace
|
"""Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"]
"""
return cls.__module__.split('.')
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"]
"""
return cls.__module__.split('.')
|
Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"]
|
flush_tracker
|
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
pd = import_pandas()
clearml = import_clearml()
self.logger.report_table('Action Records', name, table_plot=pd.DataFrame(
self.action_records))
session_analysis_df = self._create_session_analysis_df()
self.logger.report_table('Session Analysis', name, table_plot=
session_analysis_df)
if self.stream_logs:
self.logger.report_text({'action_records': pd.DataFrame(self.
action_records), 'session_analysis': session_analysis_df})
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, 'model.json')
try:
langchain_asset.save(langchain_asset_path)
output_model = clearml.OutputModel(task=self.task, config_text=
load_json(langchain_asset_path))
output_model.update_weights(weights_filename=str(
langchain_asset_path), auto_delete_file=False, target_filename=name
)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
output_model = clearml.OutputModel(task=self.task, config_text=
load_json(langchain_asset_path))
output_model.update_weights(weights_filename=str(
langchain_asset_path), auto_delete_file=False, target_filename=name
)
except NotImplementedError as e:
print('Could not save model.')
print(repr(e))
pass
self.task.flush(wait_for_uploads=True)
self.temp_dir.cleanup()
self.temp_dir = tempfile.TemporaryDirectory()
self.reset_callback_meta()
if finish:
self.task.close()
|
def flush_tracker(self, name: Optional[str]=None, langchain_asset: Any=None,
finish: bool=False) ->None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
pd = import_pandas()
clearml = import_clearml()
self.logger.report_table('Action Records', name, table_plot=pd.
DataFrame(self.action_records))
session_analysis_df = self._create_session_analysis_df()
self.logger.report_table('Session Analysis', name, table_plot=
session_analysis_df)
if self.stream_logs:
self.logger.report_text({'action_records': pd.DataFrame(self.
action_records), 'session_analysis': session_analysis_df})
if langchain_asset:
langchain_asset_path = Path(self.temp_dir.name, 'model.json')
try:
langchain_asset.save(langchain_asset_path)
output_model = clearml.OutputModel(task=self.task, config_text=
load_json(langchain_asset_path))
output_model.update_weights(weights_filename=str(
langchain_asset_path), auto_delete_file=False,
target_filename=name)
except ValueError:
langchain_asset.save_agent(langchain_asset_path)
output_model = clearml.OutputModel(task=self.task, config_text=
load_json(langchain_asset_path))
output_model.update_weights(weights_filename=str(
langchain_asset_path), auto_delete_file=False,
target_filename=name)
except NotImplementedError as e:
print('Could not save model.')
print(repr(e))
pass
self.task.flush(wait_for_uploads=True)
self.temp_dir.cleanup()
self.temp_dir = tempfile.TemporaryDirectory()
self.reset_callback_meta()
if finish:
self.task.close()
|
Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
|
_import_bittensor
|
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
|
def _import_bittensor() ->Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
| null |
test_simple_context_str_w_emb
|
str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'a_namespace': encoded_str1}]
assert base.embed(base.Embed(str1), MockEncoder(), 'a_namespace') == expected
expected_embed_and_keep = [{'a_namespace': str1 + ' ' + encoded_str1}]
assert base.embed(base.EmbedAndKeep(str1), MockEncoder(), 'a_namespace'
) == expected_embed_and_keep
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_simple_context_str_w_emb() ->None:
str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'a_namespace': encoded_str1}]
assert base.embed(base.Embed(str1), MockEncoder(), 'a_namespace'
) == expected
expected_embed_and_keep = [{'a_namespace': str1 + ' ' + encoded_str1}]
assert base.embed(base.EmbedAndKeep(str1), MockEncoder(), 'a_namespace'
) == expected_embed_and_keep
| null |
_create_retry_decorator
|
import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(self.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
|
def _create_retry_decorator(self) ->Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
| null |
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value=1)])
expected = "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )"
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value=1)])
expected = (
"( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )")
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
_build_rst_file
|
"""Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental".
"""
package_dir = _package_dir(package_name)
package_members = _load_package_modules(package_dir)
package_version = _get_package_version(package_dir)
with open(_out_file_path(package_name), 'w') as f:
f.write(_doc_first_line(package_name) + _construct_doc(
_package_namespace(package_name), package_members, package_version))
|
def _build_rst_file(package_name: str='langchain') ->None:
"""Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental".
"""
package_dir = _package_dir(package_name)
package_members = _load_package_modules(package_dir)
package_version = _get_package_version(package_dir)
with open(_out_file_path(package_name), 'w') as f:
f.write(_doc_first_line(package_name) + _construct_doc(
_package_namespace(package_name), package_members, package_version)
)
|
Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental".
|
_build_condition
|
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f'{key}.{_key}', value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f'{key}[]', _value))
else:
out.extend(self._build_condition(f'{key}', _value))
else:
out.append(rest.FieldCondition(key=f'{self.metadata_payload_key}.{key}',
match=rest.MatchValue(value=value)))
return out
|
def _build_condition(self, key: str, value: Any) ->List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f'{key}.{_key}', value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condition(f'{key}[]', _value))
else:
out.extend(self._build_condition(f'{key}', _value))
else:
out.append(rest.FieldCondition(key=
f'{self.metadata_payload_key}.{key}', match=rest.MatchValue(
value=value)))
return out
| null |
retriever
|
return PubMedRetriever()
|
@pytest.fixture
def retriever() ->PubMedRetriever:
return PubMedRetriever()
| null |
__init__
|
super().__init__()
|
def __init__(self, **kwargs: Any) ->None:
super().__init__()
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
get_input_schema
|
return self.runnable.get_input_schema(config)
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return self.runnable.get_input_schema(config)
| null |
test_aviary_call
|
"""Test valid call to Anyscale."""
llm = Aviary()
output = llm('Say bar:')
print(f"""llm answer:
{output}""")
assert isinstance(output, str)
|
def test_aviary_call() ->None:
"""Test valid call to Anyscale."""
llm = Aviary()
output = llm('Say bar:')
print(f'llm answer:\n{output}')
assert isinstance(output, str)
|
Test valid call to Anyscale.
|
_Return
|
self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value)
|
def _Return(self, t):
self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value)
| null |
_import_edenai_EdenAiParsingInvoiceTool
|
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool
|
def _import_edenai_EdenAiParsingInvoiceTool() ->Any:
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool
| null |
similarity_search
|
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = self.similarity_search_with_score(query=query, k=k, **kwargs)
return [doc for doc, _ in res]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = self.similarity_search_with_score(query=query, k=k, **kwargs)
return [doc for doc, _ in res]
|
Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
|
_generate_docs_object
|
page_offset = []
for page in result.pages:
page_offset.append(page.spans[0]['offset'])
for para in result.paragraphs:
yield Document(page_content=para.content, metadata={'role': para.role,
'page': para.bounding_regions[0].page_number, 'bounding_box': para.
bounding_regions[0].polygon, 'type': 'paragraph'})
for table in result.tables:
yield Document(page_content=table.cells, metadata={'footnote': table.
footnotes, 'caption': table.caption, 'page': para.bounding_regions[
0].page_number, 'bounding_box': para.bounding_regions[0].polygon,
'row_count': table.row_count, 'column_count': table.column_count,
'type': 'table'})
|
def _generate_docs_object(self, result: Any) ->Iterator[Document]:
page_offset = []
for page in result.pages:
page_offset.append(page.spans[0]['offset'])
for para in result.paragraphs:
yield Document(page_content=para.content, metadata={'role': para.
role, 'page': para.bounding_regions[0].page_number,
'bounding_box': para.bounding_regions[0].polygon, 'type':
'paragraph'})
for table in result.tables:
yield Document(page_content=table.cells, metadata={'footnote':
table.footnotes, 'caption': table.caption, 'page': para.
bounding_regions[0].page_number, 'bounding_box': para.
bounding_regions[0].polygon, 'row_count': table.row_count,
'column_count': table.column_count, 'type': 'table'})
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.