method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_op_require_direct_access_index
|
"""
Raise ValueError if the operation is not supported for direct-access index."""
if not self._is_direct_access_index():
raise ValueError(f'`{op_name}` is only supported for direct-access index.')
|
def _op_require_direct_access_index(self, op_name: str) ->None:
"""
Raise ValueError if the operation is not supported for direct-access index."""
if not self._is_direct_access_index():
raise ValueError(
f'`{op_name}` is only supported for direct-access index.')
|
Raise ValueError if the operation is not supported for direct-access index.
|
_load_pal_chain
|
from langchain_experimental.pal_chain import PALChain
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError('One of `llm_chain` or `llm_chain_path` must be present.')
return PALChain(llm_chain=llm_chain, **config)
|
def _load_pal_chain(config: dict, **kwargs: Any) ->Any:
from langchain_experimental.pal_chain import PALChain
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError(
'One of `llm_chain` or `llm_chain_path` must be present.')
return PALChain(llm_chain=llm_chain, **config)
| null |
download_documents
|
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [Document(page_content=item.get('description'), metadata={'title':
item.get('title'), 'link': item.get('url')}) for item in results]
|
def download_documents(self, query: str) ->List[Document]:
"""Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
"""
results = self._search_request(query)
return [Document(page_content=item.get('description'), metadata={
'title': item.get('title'), 'link': item.get('url')}) for item in
results]
|
Query the Brave search engine and return the results as a list of Documents.
Args:
query: The query to search for.
Returns: The results as a list of Documents.
|
output_keys
|
"""Expect output key.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
|
Expect output key.
:meta private:
|
esprima_installed
|
try:
import esprima
return True
except Exception as e:
print(f'esprima not installed, skipping test {e}')
return False
|
def esprima_installed() ->bool:
try:
import esprima
return True
except Exception as e:
print(f'esprima not installed, skipping test {e}')
return False
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
get_additional_metadata
|
"""Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
"""
return {}
|
def get_additional_metadata(self) ->dict:
"""Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
"""
return {}
|
Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
|
__init__
|
self.nonlocals: Set[str] = set()
|
def __init__(self) ->None:
self.nonlocals: Set[str] = set()
| null |
__init__
|
"""Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
|
def __init__(self, path: str, include_outputs: bool=False,
max_output_length: int=10, remove_newline: bool=False, traceback: bool=
False):
"""Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
"""
self.file_path = path
self.include_outputs = include_outputs
self.max_output_length = max_output_length
self.remove_newline = remove_newline
self.traceback = traceback
|
Initialize with a path.
Args:
path: The path to load the notebook from.
include_outputs: Whether to include the outputs of the cell.
Defaults to False.
max_output_length: Maximum length of the output to be displayed.
Defaults to 10.
remove_newline: Whether to remove newlines from the notebook.
Defaults to False.
traceback: Whether to return a traceback of the error.
Defaults to False.
|
test_custom_formatter
|
"""Test ability to create a custom content formatter."""
class CustomFormatter(ContentFormatterBase):
content_type = 'application/json'
accepts = 'application/json'
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
input_str = json.dumps({'inputs': [prompt], 'parameters':
model_kwargs, 'options': {'use_cache': False, 'wait_for_model':
True}})
return input_str.encode('utf-8')
def format_response_payload(self, output: bytes) ->str:
response_json = json.loads(output)
return response_json[0]['summary_text']
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'BART_ENDPOINT_API_KEY'), endpoint_url=os.getenv('BART_ENDPOINT_URL'),
deployment_name=os.getenv('BART_DEPLOYMENT_NAME'), content_formatter=
CustomFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
def test_custom_formatter() ->None:
"""Test ability to create a custom content formatter."""
class CustomFormatter(ContentFormatterBase):
content_type = 'application/json'
accepts = 'application/json'
def format_request_payload(self, prompt: str, model_kwargs: Dict
) ->bytes:
input_str = json.dumps({'inputs': [prompt], 'parameters':
model_kwargs, 'options': {'use_cache': False,
'wait_for_model': True}})
return input_str.encode('utf-8')
def format_response_payload(self, output: bytes) ->str:
response_json = json.loads(output)
return response_json[0]['summary_text']
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'BART_ENDPOINT_API_KEY'), endpoint_url=os.getenv(
'BART_ENDPOINT_URL'), deployment_name=os.getenv(
'BART_DEPLOYMENT_NAME'), content_formatter=CustomFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
Test ability to create a custom content formatter.
|
_has_assistant_message
|
"""Check if chat session has an assistant message."""
return any([isinstance(m, AIMessage) for m in session['messages']])
|
def _has_assistant_message(session: ChatSession) ->bool:
"""Check if chat session has an assistant message."""
return any([isinstance(m, AIMessage) for m in session['messages']])
|
Check if chat session has an assistant message.
|
_docarray_to_langchain_doc
|
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else doc.__fields__
if self.content_field not in fields:
raise ValueError(
f'Document does not contain the content field - {self.content_field}.')
lc_doc = Document(page_content=doc[self.content_field] if isinstance(doc,
dict) else getattr(doc, self.content_field))
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if isinstance(value, (str, int, float, bool)
) and name != self.content_field:
lc_doc.metadata[name] = value
return lc_doc
|
def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]
) ->Document:
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else doc.__fields__
if self.content_field not in fields:
raise ValueError(
f'Document does not contain the content field - {self.content_field}.'
)
lc_doc = Document(page_content=doc[self.content_field] if isinstance(
doc, dict) else getattr(doc, self.content_field))
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if isinstance(value, (str, int, float, bool)
) and name != self.content_field:
lc_doc.metadata[name] = value
return lc_doc
|
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
|
test_redis_model_creation
|
redis_model = RedisModel(text=[TextFieldSchema(name='content')], tag=[
TagFieldSchema(name='tag')], numeric=[NumericFieldSchema(name='numeric'
)], vector=[FlatVectorField(name='flat_vector', dims=128, algorithm=
'FLAT')])
assert redis_model.text[0].name == 'content'
assert redis_model.tag[0].name == 'tag'
assert redis_model.numeric[0].name == 'numeric'
assert redis_model.vector[0].name == 'flat_vector'
with pytest.raises(ValueError):
_ = redis_model.content_vector
|
def test_redis_model_creation() ->None:
redis_model = RedisModel(text=[TextFieldSchema(name='content')], tag=[
TagFieldSchema(name='tag')], numeric=[NumericFieldSchema(name=
'numeric')], vector=[FlatVectorField(name='flat_vector', dims=128,
algorithm='FLAT')])
assert redis_model.text[0].name == 'content'
assert redis_model.tag[0].name == 'tag'
assert redis_model.numeric[0].name == 'numeric'
assert redis_model.vector[0].name == 'flat_vector'
with pytest.raises(ValueError):
_ = redis_model.content_vector
| null |
test_multiple_msg
|
msgs = [self.human_msg, self.ai_msg, self.sys_msg, self.func_msg, self.
tool_msg, self.chat_msg]
expected_output = '\n'.join(['Human: human', 'AI: ai', 'System: system',
'Function: function', 'Tool: tool', 'Chat: chat'])
self.assertEqual(get_buffer_string(msgs), expected_output)
|
def test_multiple_msg(self) ->None:
msgs = [self.human_msg, self.ai_msg, self.sys_msg, self.func_msg, self.
tool_msg, self.chat_msg]
expected_output = '\n'.join(['Human: human', 'AI: ai', 'System: system',
'Function: function', 'Tool: tool', 'Chat: chat'])
self.assertEqual(get_buffer_string(msgs), expected_output)
| null |
_generate
|
params = self._format_params(messages=messages, stop=stop, **kwargs)
data = self._client.beta.messages.create(**params)
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=
data.content[0].text))], llm_output=data)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
params = self._format_params(messages=messages, stop=stop, **kwargs)
data = self._client.beta.messages.create(**params)
return ChatResult(generations=[ChatGeneration(message=AIMessage(content
=data.content[0].text))], llm_output=data)
| null |
_get_columns
|
page_content_columns = (self.page_content_columns if self.
page_content_columns else [])
metadata_columns = self.metadata_columns if self.metadata_columns else []
if page_content_columns is None and query_result:
page_content_columns = list(query_result[0].keys())
if metadata_columns is None:
metadata_columns = []
return page_content_columns or [], metadata_columns
|
def _get_columns(self, query_result: List[Dict[str, Any]]) ->Tuple[List[str
], List[str]]:
page_content_columns = (self.page_content_columns if self.
page_content_columns else [])
metadata_columns = self.metadata_columns if self.metadata_columns else []
if page_content_columns is None and query_result:
page_content_columns = list(query_result[0].keys())
if metadata_columns is None:
metadata_columns = []
return page_content_columns or [], metadata_columns
| null |
_get_debug
|
from langchain_core.globals import get_debug
return get_debug()
|
def _get_debug() ->bool:
from langchain_core.globals import get_debug
return get_debug()
| null |
_run
|
try:
response = multion.new_session({'input': query, 'url': url})
return {'sessionId': response['session_id'], 'Response': response[
'message']}
except Exception as e:
raise Exception(f'An error occurred: {e}')
|
def _run(self, query: str, url: Optional[str]='https://www.google.com/',
run_manager: Optional[CallbackManagerForToolRun]=None) ->dict:
try:
response = multion.new_session({'input': query, 'url': url})
return {'sessionId': response['session_id'], 'Response': response[
'message']}
except Exception as e:
raise Exception(f'An error occurred: {e}')
| null |
lazy_parse
|
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
|
Extract the second character of a blob.
|
from_llm
|
"""Load QA Eval Chain from LLM."""
prompt = prompt or COT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]=
None, **kwargs: Any) ->CotQAEvalChain:
"""Load QA Eval Chain from LLM."""
prompt = prompt or COT_PROMPT
cls._validate_input_vars(prompt)
return cls(llm=llm, prompt=prompt, **kwargs)
|
Load QA Eval Chain from LLM.
|
patch
|
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
|
def patch(self, url: str, data: Dict[str, Any], **kwargs: Any) ->str:
"""PATCH the URL and return the text."""
return self.requests.patch(url, data, **kwargs).text
|
PATCH the URL and return the text.
|
test_ruby_code_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.RUBY,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = """
def hello_world
puts "Hello, World!"
end
hello_world
"""
chunks = splitter.split_text(code)
assert chunks == ['def hello_world', 'puts "Hello,', 'World!"', 'end',
'hello_world']
|
def test_ruby_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.RUBY,
chunk_size=CHUNK_SIZE, chunk_overlap=0)
code = (
'\ndef hello_world\n puts "Hello, World!"\nend\n\nhello_world\n ')
chunks = splitter.split_text(code)
assert chunks == ['def hello_world', 'puts "Hello,', 'World!"', 'end',
'hello_world']
| null |
on_chain_start
|
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = ','.join([f'{k}={v}' for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp['inputs'] = chain_input
self.deck.append(self.markdown_renderer().to_html('### Chain Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
input_resp])) + '\n')
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = ','.join([f'{k}={v}' for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp['inputs'] = chain_input
self.deck.append(self.markdown_renderer().to_html('### Chain Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
input_resp])) + '\n')
|
Run when chain starts running.
|
_get_prompt
|
"""Get prompt from inputs.
Args:
inputs: The input dictionary.
Returns:
A string prompt.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError('Inputs should not be empty.')
prompts = []
if 'prompt' in inputs:
if not isinstance(inputs['prompt'], str):
raise InputFormatError(
f"Expected string for 'prompt', got {type(inputs['prompt']).__name__}"
)
prompts = [inputs['prompt']]
elif 'prompts' in inputs:
if not isinstance(inputs['prompts'], list) or not all(isinstance(i, str
) for i in inputs['prompts']):
raise InputFormatError(
f"Expected list of strings for 'prompts', got {type(inputs['prompts']).__name__}"
)
prompts = inputs['prompts']
elif len(inputs) == 1:
prompt_ = next(iter(inputs.values()))
if isinstance(prompt_, str):
prompts = [prompt_]
elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_
):
prompts = prompt_
else:
raise InputFormatError(
f'LLM Run expects string prompt input. Got {inputs}')
else:
raise InputFormatError(
f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}")
if len(prompts) == 1:
return prompts[0]
else:
raise InputFormatError(
f'LLM Run expects single prompt input. Got {len(prompts)} prompts.')
|
def _get_prompt(inputs: Dict[str, Any]) ->str:
"""Get prompt from inputs.
Args:
inputs: The input dictionary.
Returns:
A string prompt.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError('Inputs should not be empty.')
prompts = []
if 'prompt' in inputs:
if not isinstance(inputs['prompt'], str):
raise InputFormatError(
f"Expected string for 'prompt', got {type(inputs['prompt']).__name__}"
)
prompts = [inputs['prompt']]
elif 'prompts' in inputs:
if not isinstance(inputs['prompts'], list) or not all(isinstance(i,
str) for i in inputs['prompts']):
raise InputFormatError(
f"Expected list of strings for 'prompts', got {type(inputs['prompts']).__name__}"
)
prompts = inputs['prompts']
elif len(inputs) == 1:
prompt_ = next(iter(inputs.values()))
if isinstance(prompt_, str):
prompts = [prompt_]
elif isinstance(prompt_, list) and all(isinstance(i, str) for i in
prompt_):
prompts = prompt_
else:
raise InputFormatError(
f'LLM Run expects string prompt input. Got {inputs}')
else:
raise InputFormatError(
f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}")
if len(prompts) == 1:
return prompts[0]
else:
raise InputFormatError(
f'LLM Run expects single prompt input. Got {len(prompts)} prompts.'
)
|
Get prompt from inputs.
Args:
inputs: The input dictionary.
Returns:
A string prompt.
Raises:
InputFormatError: If the input format is invalid.
|
delete
|
if ids is None:
return False
batch = [{'id': id} for id in ids]
result = self._vespa_app.delete_batch(batch)
return sum([(0 if r.status_code == 200 else 1) for r in result]) == 0
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
if ids is None:
return False
batch = [{'id': id} for id in ids]
result = self._vespa_app.delete_batch(batch)
return sum([(0 if r.status_code == 200 else 1) for r in result]) == 0
| null |
_prepare_output
|
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
|
def _prepare_output(self, result: dict) ->dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
|
Prepare the output.
|
__getitem__
|
...
|
@overload
def __getitem__(self, item: int) ->Iterator[T]:
...
| null |
__from
|
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, embedding_function=
embedding_function, ndims=ndims, table_name=table_name,
pre_delete_table=pre_delete_table)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
|
@classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]],
embedding_function: Embeddings, metadatas: Optional[List[dict]]=None,
ids: Optional[List[str]]=None, ndims: int=ADA_TOKEN_COUNT, table_name:
str=_LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool=False, **
kwargs: Any) ->Hologres:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, embedding_function=
embedding_function, ndims=ndims, table_name=table_name,
pre_delete_table=pre_delete_table)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
| null |
test_run_llm_or_chain_with_input_mapper
|
example = Example(id=uuid.uuid4(), created_at=_CREATED_AT, inputs={
'the wrong input': '1', 'another key': '2'}, outputs={'output': '2'},
dataset_id=str(uuid.uuid4()))
def run_val(inputs: dict) ->dict:
assert 'the right input' in inputs
return {'output': '2'}
mock_chain = TransformChain(input_variables=['the right input'],
output_variables=['output'], transform=run_val)
def input_mapper(inputs: dict) ->dict:
assert 'the wrong input' in inputs
return {'the right input': inputs['the wrong input']}
result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=lambda : mock_chain, input_mapper=input_mapper)
assert result == {'output': '2', 'the right input': '1'}
bad_result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=lambda : mock_chain)
assert 'Error' in bad_result
def llm_input_mapper(inputs: dict) ->str:
assert 'the wrong input' in inputs
return 'the right input'
mock_llm = FakeLLM(queries={'the right input': 'somenumber'})
llm_result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=mock_llm, input_mapper=llm_input_mapper)
assert isinstance(llm_result, str)
assert llm_result == 'somenumber'
|
def test_run_llm_or_chain_with_input_mapper() ->None:
example = Example(id=uuid.uuid4(), created_at=_CREATED_AT, inputs={
'the wrong input': '1', 'another key': '2'}, outputs={'output': '2'
}, dataset_id=str(uuid.uuid4()))
def run_val(inputs: dict) ->dict:
assert 'the right input' in inputs
return {'output': '2'}
mock_chain = TransformChain(input_variables=['the right input'],
output_variables=['output'], transform=run_val)
def input_mapper(inputs: dict) ->dict:
assert 'the wrong input' in inputs
return {'the right input': inputs['the wrong input']}
result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=lambda : mock_chain, input_mapper=input_mapper)
assert result == {'output': '2', 'the right input': '1'}
bad_result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=lambda : mock_chain)
assert 'Error' in bad_result
def llm_input_mapper(inputs: dict) ->str:
assert 'the wrong input' in inputs
return 'the right input'
mock_llm = FakeLLM(queries={'the right input': 'somenumber'})
llm_result = _run_llm_or_chain(example, {'callbacks': [], 'tags': []},
llm_or_chain_factory=mock_llm, input_mapper=llm_input_mapper)
assert isinstance(llm_result, str)
assert llm_result == 'somenumber'
| null |
test_similarity_search_limit_distance
|
"""Test similarity search limit score."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search(texts[0], k=3, distance_threshold=0.1)
assert len(output) == 2
assert drop(docsearch.index_name)
|
def test_similarity_search_limit_distance(texts: List[str]) ->None:
"""Test similarity search limit score."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
output = docsearch.similarity_search(texts[0], k=3, distance_threshold=0.1)
assert len(output) == 2
assert drop(docsearch.index_name)
|
Test similarity search limit score.
|
test_multi_variable_pipeline
|
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('okay {bar} {baz}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
output = pipeline_prompt.format(foo='jim', baz='deep')
assert output == 'okay jim deep'
|
def test_multi_variable_pipeline() ->None:
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('okay {bar} {baz}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
output = pipeline_prompt.format(foo='jim', baz='deep')
assert output == 'okay jim deep'
| null |
_import_tiledb
|
from langchain_community.vectorstores.tiledb import TileDB
return TileDB
|
def _import_tiledb() ->Any:
from langchain_community.vectorstores.tiledb import TileDB
return TileDB
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'predicates': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'predicates': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
__init__
|
"""MyScale Wrapper to LangChain
embedding (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
'Could not import clickhouse connect python package. Please install it with `pip install clickhouse-connect`.'
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert self.config.column_map and self.config.database and self.config.table and self.config.metric
for k in ['id', 'vector', 'text', 'metadata']:
assert k in self.config.column_map
assert self.config.metric.upper() in ['IP', 'COSINE', 'L2']
if self.config.metric in ['ip', 'cosine', 'l2']:
logger.warning(
"Lower case metric types will be deprecated the future. Please use one of ('IP', 'Cosine', 'L2')"
)
dim = len(embedding.embed_query('try this out'))
index_params = ', ' + ','.join([f"'{k}={v}'" for k, v in self.config.
index_param.items()]) if self.config.index_param else ''
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length( {self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} TYPE {self.config.index_type}( 'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = '\\'
self.must_escape = '\\', "'"
self._embeddings = embedding
self.dist_order = 'ASC' if self.config.metric.upper() in ['COSINE', 'L2'
] else 'DESC'
self.client = get_client(host=self.config.host, port=self.config.port,
username=self.config.username, password=self.config.password, **kwargs)
self.client.command('SET allow_experimental_object_type=1')
self.client.command(schema_)
|
def __init__(self, embedding: Embeddings, config: Optional[MyScaleSettings]
=None, **kwargs: Any) ->None:
"""MyScale Wrapper to LangChain
embedding (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
'Could not import clickhouse connect python package. Please install it with `pip install clickhouse-connect`.'
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert self.config.column_map and self.config.database and self.config.table and self.config.metric
for k in ['id', 'vector', 'text', 'metadata']:
assert k in self.config.column_map
assert self.config.metric.upper() in ['IP', 'COSINE', 'L2']
if self.config.metric in ['ip', 'cosine', 'l2']:
logger.warning(
"Lower case metric types will be deprecated the future. Please use one of ('IP', 'Cosine', 'L2')"
)
dim = len(embedding.embed_query('try this out'))
index_params = ', ' + ','.join([f"'{k}={v}'" for k, v in self.config.
index_param.items()]) if self.config.index_param else ''
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length( {self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} TYPE {self.config.index_type}( 'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = '\\'
self.must_escape = '\\', "'"
self._embeddings = embedding
self.dist_order = 'ASC' if self.config.metric.upper() in ['COSINE', 'L2'
] else 'DESC'
self.client = get_client(host=self.config.host, port=self.config.port,
username=self.config.username, password=self.config.password, **kwargs)
self.client.command('SET allow_experimental_object_type=1')
self.client.command(schema_)
|
MyScale Wrapper to LangChain
embedding (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
|
test_tracer_chat_model_run
|
"""Test tracer on a Chat Model run."""
tracer = FakeTracer()
manager = CallbackManager(handlers=[tracer])
run_managers = manager.on_chat_model_start(serialized=SERIALIZED_CHAT,
messages=[[HumanMessage(content='')]])
compare_run = Run(id=str(run_managers[0].run_id), name='chat_model',
start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.
utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {
'name': 'end', 'time': datetime.now(timezone.utc)}], extra={},
execution_order=1, child_execution_order=1, serialized=SERIALIZED_CHAT,
inputs=dict(prompts=['Human: ']), outputs=LLMResult(generations=[[]]),
error=None, run_type='llm', trace_id=run_managers[0].run_id,
dotted_order=f'20230101T000000000000Z{run_managers[0].run_id}')
for run_manager in run_managers:
run_manager.on_llm_end(response=LLMResult(generations=[[]]))
assert tracer.runs == [compare_run]
|
@freeze_time('2023-01-01')
def test_tracer_chat_model_run() ->None:
"""Test tracer on a Chat Model run."""
tracer = FakeTracer()
manager = CallbackManager(handlers=[tracer])
run_managers = manager.on_chat_model_start(serialized=SERIALIZED_CHAT,
messages=[[HumanMessage(content='')]])
compare_run = Run(id=str(run_managers[0].run_id), name='chat_model',
start_time=datetime.now(timezone.utc), end_time=datetime.now(
timezone.utc), events=[{'name': 'start', 'time': datetime.now(
timezone.utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)}
], extra={}, execution_order=1, child_execution_order=1, serialized
=SERIALIZED_CHAT, inputs=dict(prompts=['Human: ']), outputs=
LLMResult(generations=[[]]), error=None, run_type='llm', trace_id=
run_managers[0].run_id, dotted_order=
f'20230101T000000000000Z{run_managers[0].run_id}')
for run_manager in run_managers:
run_manager.on_llm_end(response=LLMResult(generations=[[]]))
assert tracer.runs == [compare_run]
|
Test tracer on a Chat Model run.
|
_llm_type
|
"""Return type of llm."""
return 'clarifai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'clarifai'
|
Return type of llm.
|
_stream
|
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, 'stream': True}
for stream_resp in completion_with_retry(self, messages=messages,
run_manager=run_manager, **params):
token = stream_resp['choices'][0]['delta'].get('content', '')
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, 'stream': True}
for stream_resp in completion_with_retry(self, messages=messages,
run_manager=run_manager, **params):
token = stream_resp['choices'][0]['delta'].get('content', '')
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
| null |
_batch
|
"""Utility batching function."""
it = iter(iterable)
while True:
chunk = list(islice(it, size))
if not chunk:
return
yield chunk
|
def _batch(size: int, iterable: Iterable[T]) ->Iterator[List[T]]:
"""Utility batching function."""
it = iter(iterable)
while True:
chunk = list(islice(it, size))
if not chunk:
return
yield chunk
|
Utility batching function.
|
from_llm
|
execution_template = """The AI assistant has parsed the user input into several tasksand executed them. The results are as follows:
{task_execution}
Please summarize the results and generate a response."""
prompt = PromptTemplate(template=execution_template, input_variables=[
'task_execution'])
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool=True) ->LLMChain:
execution_template = """The AI assistant has parsed the user input into several tasksand executed them. The results are as follows:
{task_execution}
Please summarize the results and generate a response."""
prompt = PromptTemplate(template=execution_template, input_variables=[
'task_execution'])
return cls(prompt=prompt, llm=llm, verbose=verbose)
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {'endpoint_url': self.endpoint_url}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {'endpoint_url': self.endpoint_url}
|
Get the identifying parameters.
|
_import_anonymizer_engine
|
try:
from presidio_anonymizer import AnonymizerEngine
except ImportError as e:
raise ImportError(
'Could not import presidio_anonymizer, please install with `pip install presidio-anonymizer`.'
) from e
return AnonymizerEngine
|
def _import_anonymizer_engine() ->'AnonymizerEngine':
try:
from presidio_anonymizer import AnonymizerEngine
except ImportError as e:
raise ImportError(
'Could not import presidio_anonymizer, please install with `pip install presidio-anonymizer`.'
) from e
return AnonymizerEngine
| null |
pytest_collection_modifyitems
|
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption('--only-extended') or False
only_core = config.getoption('--only-core') or False
if only_extended and only_core:
raise ValueError('Cannot specify both `--only-extended` and `--only-core`.'
)
for item in items:
requires_marker = item.get_closest_marker('requires')
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason=
'Skipping not a core test.'))
continue
required_pkgs = requires_marker.args
for pkg in required_pkgs:
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.'
)
else:
item.add_marker(pytest.mark.skip(reason=
f'Requires pkg: `{pkg}`'))
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason=
'Skipping not an extended test.'))
|
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]
) ->None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption('--only-extended') or False
only_core = config.getoption('--only-core') or False
if only_extended and only_core:
raise ValueError(
'Cannot specify both `--only-extended` and `--only-core`.')
for item in items:
requires_marker = item.get_closest_marker('requires')
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason=
'Skipping not a core test.'))
continue
required_pkgs = requires_marker.args
for pkg in required_pkgs:
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.'
)
else:
item.add_marker(pytest.mark.skip(reason=
f'Requires pkg: `{pkg}`'))
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason=
'Skipping not an extended test.'))
|
Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
|
fun
|
output = fake.invoke(input)
output += fake.invoke(input * 2)
output += fake.invoke(input * 3)
return output
|
@chain
def fun(input: str) ->int:
output = fake.invoke(input)
output += fake.invoke(input * 2)
output += fake.invoke(input * 3)
return output
| null |
_cast_schema_list_type
|
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
|
@staticmethod
def _cast_schema_list_type(schema: Schema) ->Optional[Union[str, Tuple[str,
...]]]:
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
| null |
__init__
|
"""Initialize with a file path.
Args:
file_path: The path to the Outlook Message file.
"""
self.file_path = file_path
if not os.path.isfile(self.file_path):
raise ValueError('File path %s is not a valid file' % self.file_path)
try:
import extract_msg
except ImportError:
raise ImportError(
'extract_msg is not installed. Please install it with `pip install extract_msg`'
)
|
def __init__(self, file_path: str):
"""Initialize with a file path.
Args:
file_path: The path to the Outlook Message file.
"""
self.file_path = file_path
if not os.path.isfile(self.file_path):
raise ValueError('File path %s is not a valid file' % self.file_path)
try:
import extract_msg
except ImportError:
raise ImportError(
'extract_msg is not installed. Please install it with `pip install extract_msg`'
)
|
Initialize with a file path.
Args:
file_path: The path to the Outlook Message file.
|
parse
|
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
"""
verdict = None
score = None
match_last = re.search('\\s*(Y|N)\\s*$', text, re.IGNORECASE)
match_first = re.search('^\\s*(Y|N)\\s*', text, re.IGNORECASE)
match_end = re.search('\\b(Y|N)\\b\\s*$', text, re.IGNORECASE)
if match_last:
verdict = match_last.group(1).strip()
text = text[:match_last.start()].strip()
elif match_first:
verdict = match_first.group(1).strip()
text = text[match_first.end():].strip()
elif match_end:
verdict = match_end.group(1).strip()
text = text[:match_end.start()].strip()
else:
splits = text.strip().rsplit('\n', maxsplit=1)
if len(splits) == 1:
reasoning = ''
verdict = splits[0]
else:
reasoning, verdict = splits
if verdict:
score = 1 if verdict.upper() == 'Y' else 0 if verdict.upper(
) == 'N' else None
return {'reasoning': text.strip(), 'value': verdict, 'score': score}
|
def parse(self, text: str) ->Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
"""
verdict = None
score = None
match_last = re.search('\\s*(Y|N)\\s*$', text, re.IGNORECASE)
match_first = re.search('^\\s*(Y|N)\\s*', text, re.IGNORECASE)
match_end = re.search('\\b(Y|N)\\b\\s*$', text, re.IGNORECASE)
if match_last:
verdict = match_last.group(1).strip()
text = text[:match_last.start()].strip()
elif match_first:
verdict = match_first.group(1).strip()
text = text[match_first.end():].strip()
elif match_end:
verdict = match_end.group(1).strip()
text = text[:match_end.start()].strip()
else:
splits = text.strip().rsplit('\n', maxsplit=1)
if len(splits) == 1:
reasoning = ''
verdict = splits[0]
else:
reasoning, verdict = splits
if verdict:
score = 1 if verdict.upper() == 'Y' else 0 if verdict.upper(
) == 'N' else None
return {'reasoning': text.strip(), 'value': verdict, 'score': score}
|
Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
|
add_embeddings
|
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(list(texts), list(embeddings), metadatas=metadatas, ids=
ids, bulk_size=bulk_size, **kwargs)
|
def add_embeddings(self, text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None,
bulk_size: int=500, **kwargs: Any) ->List[str]:
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(list(texts), list(embeddings), metadatas=metadatas,
ids=ids, bulk_size=bulk_size, **kwargs)
|
Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
|
_try_init_vertexai
|
allowed_params = ['project', 'location', 'credentials']
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
|
@classmethod
def _try_init_vertexai(cls, values: Dict) ->None:
allowed_params = ['project', 'location', 'credentials']
params = {k: v for k, v in values.items() if k in allowed_params}
init_vertexai(**params)
return None
| null |
_load_folder_tree
|
cli, field_content = self._create_rspace_client()
if self.global_id:
docs_in_folder = cli.list_folder_tree(folder_id=self.global_id[2:],
typesToInclude=['document'])
doc_ids: List[int] = [d['id'] for d in docs_in_folder['records']]
for doc_id in doc_ids:
yield self._get_doc(cli, field_content, doc_id)
|
def _load_folder_tree(self) ->Iterator[Document]:
cli, field_content = self._create_rspace_client()
if self.global_id:
docs_in_folder = cli.list_folder_tree(folder_id=self.global_id[2:],
typesToInclude=['document'])
doc_ids: List[int] = [d['id'] for d in docs_in_folder['records']]
for doc_id in doc_ids:
yield self._get_doc(cli, field_content, doc_id)
| null |
clear
|
"""Clear the graph."""
self._graph.clear()
|
def clear(self) ->None:
"""Clear the graph."""
self._graph.clear()
|
Clear the graph.
|
test_collect_runs
|
llm = FakeListLLM(responses=['hello'])
with collect_runs() as cb:
llm.predict('hi')
assert cb.traced_runs
assert len(cb.traced_runs) == 1
assert isinstance(cb.traced_runs[0].id, uuid.UUID)
assert cb.traced_runs[0].inputs == {'prompts': ['hi']}
|
def test_collect_runs() ->None:
llm = FakeListLLM(responses=['hello'])
with collect_runs() as cb:
llm.predict('hi')
assert cb.traced_runs
assert len(cb.traced_runs) == 1
assert isinstance(cb.traced_runs[0].id, uuid.UUID)
assert cb.traced_runs[0].inputs == {'prompts': ['hi']}
| null |
change_directory
|
"""Change the working directory to the right folder."""
origin = Path().absolute()
try:
os.chdir(dir)
yield
finally:
os.chdir(origin)
|
@contextmanager
def change_directory(dir: Path) ->Iterator:
"""Change the working directory to the right folder."""
origin = Path().absolute()
try:
os.chdir(dir)
yield
finally:
os.chdir(origin)
|
Change the working directory to the right folder.
|
import_tiktoken
|
"""Import tiktoken for counting tokens for OpenAI models."""
try:
import tiktoken
except ImportError:
raise ImportError(
'To use the ChatOpenAI model with Infino callback manager, you need to have the `tiktoken` python package installed.Please install it with `pip install tiktoken`'
)
return tiktoken
|
def import_tiktoken() ->Any:
"""Import tiktoken for counting tokens for OpenAI models."""
try:
import tiktoken
except ImportError:
raise ImportError(
'To use the ChatOpenAI model with Infino callback manager, you need to have the `tiktoken` python package installed.Please install it with `pip install tiktoken`'
)
return tiktoken
|
Import tiktoken for counting tokens for OpenAI models.
|
test_unstructured_pdf_loader_paged_mode
|
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf'
loader = UnstructuredPDFLoader(str(file_path), mode='paged')
docs = loader.load()
assert len(docs) == 16
|
def test_unstructured_pdf_loader_paged_mode() ->None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__
).parent.parent / 'examples/layout-parser-paper.pdf'
loader = UnstructuredPDFLoader(str(file_path), mode='paged')
docs = loader.load()
assert len(docs) == 16
|
Test unstructured loader with various modes.
|
test_index_page
|
loader = ReadTheDocsLoader(PARENT_DIR / 'index_page', exclude_links_ratio=0.5)
documents = loader.load()
assert len(documents[0].page_content) == 0
|
@pytest.mark.requires('bs4')
def test_index_page() ->None:
loader = ReadTheDocsLoader(PARENT_DIR / 'index_page',
exclude_links_ratio=0.5)
documents = loader.load()
assert len(documents[0].page_content) == 0
| null |
InputType
|
return List[self.bound.InputType]
|
@property
def InputType(self) ->Any:
return List[self.bound.InputType]
| null |
test_intermediate_output
|
joke = 'Why did the chicken cross the Mobius strip?'
llm = FakeListLLM(responses=[f'Response {i + 1}' for i in range(5)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, return_intermediate_steps=True)
result = chain(joke)
assert result['joke'] == joke
assert result['ideas'] == [f'Response {i + 1}' for i in range(3)]
assert result['critique'] == 'Response 4'
assert result['resolution'] == 'Response 5'
|
def test_intermediate_output() ->None:
joke = 'Why did the chicken cross the Mobius strip?'
llm = FakeListLLM(responses=[f'Response {i + 1}' for i in range(5)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, return_intermediate_steps
=True)
result = chain(joke)
assert result['joke'] == joke
assert result['ideas'] == [f'Response {i + 1}' for i in range(3)]
assert result['critique'] == 'Response 4'
assert result['resolution'] == 'Response 5'
| null |
_call
|
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError('Client is not initialized.')
return self._client.generate(prompt=prompt, **kwargs)
except Exception as e:
raise Exception(f'Failed to generate text: {e}') from e
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError('Client is not initialized.')
return self._client.generate(prompt=prompt, **kwargs)
except Exception as e:
raise Exception(f'Failed to generate text: {e}') from e
|
Generate text from Arcee DALM.
Args:
prompt: Prompt to generate text from.
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
|
calculate_hashes
|
"""Root validator to calculate content and metadata hash."""
content = values.get('page_content', '')
metadata = values.get('metadata', {})
forbidden_keys = 'hash_', 'content_hash', 'metadata_hash'
for key in forbidden_keys:
if key in metadata:
raise ValueError(
f'Metadata cannot contain key {key} as it is reserved for internal use.'
)
content_hash = str(_hash_string_to_uuid(content))
try:
metadata_hash = str(_hash_nested_dict_to_uuid(metadata))
except Exception as e:
raise ValueError(
f'Failed to hash metadata: {e}. Please use a dict that can be serialized using json.'
)
values['content_hash'] = content_hash
values['metadata_hash'] = metadata_hash
values['hash_'] = str(_hash_string_to_uuid(content_hash + metadata_hash))
_uid = values.get('uid', None)
if _uid is None:
values['uid'] = values['hash_']
return values
|
@root_validator(pre=True)
def calculate_hashes(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Root validator to calculate content and metadata hash."""
content = values.get('page_content', '')
metadata = values.get('metadata', {})
forbidden_keys = 'hash_', 'content_hash', 'metadata_hash'
for key in forbidden_keys:
if key in metadata:
raise ValueError(
f'Metadata cannot contain key {key} as it is reserved for internal use.'
)
content_hash = str(_hash_string_to_uuid(content))
try:
metadata_hash = str(_hash_nested_dict_to_uuid(metadata))
except Exception as e:
raise ValueError(
f'Failed to hash metadata: {e}. Please use a dict that can be serialized using json.'
)
values['content_hash'] = content_hash
values['metadata_hash'] = metadata_hash
values['hash_'] = str(_hash_string_to_uuid(content_hash + metadata_hash))
_uid = values.get('uid', None)
if _uid is None:
values['uid'] = values['hash_']
return values
|
Root validator to calculate content and metadata hash.
|
test_boolean_output_parser_parse
|
parser = BooleanOutputParser()
result = parser.parse_folder('YES')
assert result is True
result = parser.parse_folder('NO')
assert result is False
result = parser.parse_folder('yes')
assert result is True
result = parser.parse_folder('no')
assert result is False
try:
parser.parse_folder('INVALID')
assert False, 'Should have raised ValueError'
except ValueError:
pass
|
def test_boolean_output_parser_parse() ->None:
parser = BooleanOutputParser()
result = parser.parse_folder('YES')
assert result is True
result = parser.parse_folder('NO')
assert result is False
result = parser.parse_folder('yes')
assert result is True
result = parser.parse_folder('no')
assert result is False
try:
parser.parse_folder('INVALID')
assert False, 'Should have raised ValueError'
except ValueError:
pass
| null |
create_extraction_chain_pydantic
|
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages([('system', system_message), (
'user', '{input}')])
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{'type': 'function', 'function': d} for d in functions]
model = llm.bind(tools=tools)
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
return chain
|
def create_extraction_chain_pydantic(pydantic_schemas: Union[List[Type[
BaseModel]], Type[BaseModel]], llm: BaseLanguageModel, system_message:
str=_EXTRACTION_TEMPLATE) ->Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages([('system', system_message),
('user', '{input}')])
functions = [convert_pydantic_to_openai_function(p) for p in
pydantic_schemas]
tools = [{'type': 'function', 'function': d} for d in functions]
model = llm.bind(tools=tools)
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
return chain
|
Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
|
test_huggingface_summarization
|
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceHub(repo_id='facebook/bart-large-cnn')
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_huggingface_summarization() ->None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceHub(repo_id='facebook/bart-large-cnn')
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to HuggingFace summarization model.
|
test_opensearch_serverless_with_scripting_search_indexing_throws_error
|
"""Test to validate indexing using Serverless without Approximate Search."""
import boto3
from opensearchpy import AWSV4SignerAuth
region = 'test-region'
service = 'aoss'
credentials = boto3.Session().get_credentials()
auth = AWSV4SignerAuth(credentials, region, service)
with pytest.raises(ValueError):
OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False,
http_auth=auth)
|
def test_opensearch_serverless_with_scripting_search_indexing_throws_error(
) ->None:
"""Test to validate indexing using Serverless without Approximate Search."""
import boto3
from opensearchpy import AWSV4SignerAuth
region = 'test-region'
service = 'aoss'
credentials = boto3.Session().get_credentials()
auth = AWSV4SignerAuth(credentials, region, service)
with pytest.raises(ValueError):
OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False,
http_auth=auth)
|
Test to validate indexing using Serverless without Approximate Search.
|
on_feedback
|
self.sum += value
self.queue.append(value)
self.i += 1
if len(self.queue) > self.window_size:
old_val = self.queue.popleft()
self.sum -= old_val
if self.step > 0 and self.i % self.step == 0:
self.history.append({'step': self.i, 'score': self.sum / len(self.queue)})
|
def on_feedback(self, value: float) ->None:
self.sum += value
self.queue.append(value)
self.i += 1
if len(self.queue) > self.window_size:
old_val = self.queue.popleft()
self.sum -= old_val
if self.step > 0 and self.i % self.step == 0:
self.history.append({'step': self.i, 'score': self.sum / len(self.
queue)})
| null |
get_current_page
|
"""
Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
"""
if not browser.contexts:
context = browser.new_context()
return context.new_page()
context = browser.contexts[0]
if not context.pages:
return context.new_page()
return context.pages[-1]
|
def get_current_page(browser: SyncBrowser) ->SyncPage:
"""
Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
"""
if not browser.contexts:
context = browser.new_context()
return context.new_page()
context = browser.contexts[0]
if not context.pages:
return context.new_page()
return context.pages[-1]
|
Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
|
test_local_sitemap
|
"""Test sitemap loader."""
file_path = Path(__file__).parent.parent / 'examples/sitemap.xml'
loader = SitemapLoader(str(file_path), is_local=True)
documents = loader.load()
assert len(documents) > 1
assert '🦜️🔗' in documents[0].page_content
|
def test_local_sitemap() ->None:
"""Test sitemap loader."""
file_path = Path(__file__).parent.parent / 'examples/sitemap.xml'
loader = SitemapLoader(str(file_path), is_local=True)
documents = loader.load()
assert len(documents) > 1
assert '🦜️🔗' in documents[0].page_content
|
Test sitemap loader.
|
from_client_params
|
"""Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain_community.embedding.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
"""
try:
from typesense import Client
except ImportError:
raise ValueError(
'Could not import typesense python package. Please install it with `pip install typesense`.'
)
node = {'host': host, 'port': str(port), 'protocol': protocol}
typesense_api_key = typesense_api_key or get_from_env('typesense_api_key',
'TYPESENSE_API_KEY')
client_config = {'nodes': [node], 'api_key': typesense_api_key,
'connection_timeout_seconds': connection_timeout_seconds}
return cls(Client(client_config), embedding, **kwargs)
|
@classmethod
def from_client_params(cls, embedding: Embeddings, *, host: str='localhost',
port: Union[str, int]='8108', protocol: str='http', typesense_api_key:
Optional[str]=None, connection_timeout_seconds: int=2, **kwargs: Any
) ->Typesense:
"""Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain_community.embedding.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
"""
try:
from typesense import Client
except ImportError:
raise ValueError(
'Could not import typesense python package. Please install it with `pip install typesense`.'
)
node = {'host': host, 'port': str(port), 'protocol': protocol}
typesense_api_key = typesense_api_key or get_from_env('typesense_api_key',
'TYPESENSE_API_KEY')
client_config = {'nodes': [node], 'api_key': typesense_api_key,
'connection_timeout_seconds': connection_timeout_seconds}
return cls(Client(client_config), embedding, **kwargs)
|
Initialize Typesense directly from client parameters.
Example:
.. code-block:: python
from langchain_community.embedding.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Typesense
# Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY".
vectorstore = Typesense(
OpenAIEmbeddings(),
host="localhost",
port="8108",
protocol="http",
typesense_collection_name="langchain-memory",
)
|
_import_huggingface_hub
|
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
|
def _import_huggingface_hub() ->Any:
from langchain_community.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
| null |
xor_args
|
"""Validate specified keyword args are mutually exclusive."""
def decorator(func: Callable) ->Callable:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) ->Any:
"""Validate exactly one arg in each group is not None."""
counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not None
) for arg_group in arg_groups]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [', '.join(arg_groups[i]) for i in
invalid_groups]
raise ValueError(
f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
return decorator
|
def xor_args(*arg_groups: Tuple[str, ...]) ->Callable:
"""Validate specified keyword args are mutually exclusive."""
def decorator(func: Callable) ->Callable:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) ->Any:
"""Validate exactly one arg in each group is not None."""
counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not
None) for arg_group in arg_groups]
invalid_groups = [i for i, count in enumerate(counts) if count != 1
]
if invalid_groups:
invalid_group_names = [', '.join(arg_groups[i]) for i in
invalid_groups]
raise ValueError(
f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
return decorator
|
Validate specified keyword args are mutually exclusive.
|
test_raw_features_underscored
|
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
str1 = 'this is a long string'
str1_underscored = str1.replace(' ', '_')
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
ctx_str = 'this is a long context'
ctx_str_underscored = ctx_str.replace(' ', '_')
encoded_ctx_str = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str))
named_actions = {'action': [str1]}
context = {'context': ctx_str}
expected_no_embed = f"""shared |context {ctx_str_underscored}
|action {str1_underscored} """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_no_embed
named_actions = {'action': rl_chain.Embed([str1])}
context = {'context': rl_chain.Embed(ctx_str)}
expected_embed = f"""shared |context {encoded_ctx_str}
|action {encoded_str1} """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_embed
named_actions = {'action': rl_chain.EmbedAndKeep([str1])}
context = {'context': rl_chain.EmbedAndKeep(ctx_str)}
expected_embed_and_keep = f"""shared |context {ctx_str_underscored + ' ' + encoded_ctx_str}
|action {str1_underscored + ' ' + encoded_str1} """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_embed_and_keep
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_raw_features_underscored() ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
str1 = 'this is a long string'
str1_underscored = str1.replace(' ', '_')
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
ctx_str = 'this is a long context'
ctx_str_underscored = ctx_str.replace(' ', '_')
encoded_ctx_str = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str))
named_actions = {'action': [str1]}
context = {'context': ctx_str}
expected_no_embed = (
f'shared |context {ctx_str_underscored} \n|action {str1_underscored} ')
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_no_embed
named_actions = {'action': rl_chain.Embed([str1])}
context = {'context': rl_chain.Embed(ctx_str)}
expected_embed = (
f'shared |context {encoded_ctx_str} \n|action {encoded_str1} ')
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_embed
named_actions = {'action': rl_chain.EmbedAndKeep([str1])}
context = {'context': rl_chain.EmbedAndKeep(ctx_str)}
expected_embed_and_keep = f"""shared |context {ctx_str_underscored + ' ' + encoded_ctx_str}
|action {str1_underscored + ' ' + encoded_str1} """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected_embed_and_keep
| null |
_yield_paths
|
"""Yield paths that match the requested pattern."""
if self.path.is_file():
yield self.path
return
paths = self.path.glob(self.glob)
for path in paths:
if self.exclude:
if any(path.match(glob) for glob in self.exclude):
continue
if path.is_file():
if self.suffixes and path.suffix not in self.suffixes:
continue
yield path
|
def _yield_paths(self) ->Iterable[Path]:
"""Yield paths that match the requested pattern."""
if self.path.is_file():
yield self.path
return
paths = self.path.glob(self.glob)
for path in paths:
if self.exclude:
if any(path.match(glob) for glob in self.exclude):
continue
if path.is_file():
if self.suffixes and path.suffix not in self.suffixes:
continue
yield path
|
Yield paths that match the requested pattern.
|
__init__
|
"""Initialize with file path."""
if isinstance(file, collections.abc.Sequence):
validate_unstructured_version(min_unstructured_version='0.6.3')
if file:
validate_unstructured_version(min_unstructured_version='0.6.2')
self.url = url
self.api_key = api_key
super().__init__(file=file, mode=mode, **unstructured_kwargs)
|
def __init__(self, file: Union[IO, Sequence[IO]], mode: str='single', url:
str='https://api.unstructured.io/general/v0/general', api_key: str='',
**unstructured_kwargs: Any):
"""Initialize with file path."""
if isinstance(file, collections.abc.Sequence):
validate_unstructured_version(min_unstructured_version='0.6.3')
if file:
validate_unstructured_version(min_unstructured_version='0.6.2')
self.url = url
self.api_key = api_key
super().__init__(file=file, mode=mode, **unstructured_kwargs)
|
Initialize with file path.
|
test_multiple_input_keys
|
chain = load_qa_with_sources_chain(FakeLLM(), chain_type='stuff')
assert chain.input_keys == ['input_documents', 'question']
|
def test_multiple_input_keys() ->None:
chain = load_qa_with_sources_chain(FakeLLM(), chain_type='stuff')
assert chain.input_keys == ['input_documents', 'question']
| null |
__init__
|
self._field = field
self._value: Any = None
self._operator: RedisFilterOperator = RedisFilterOperator.EQ
|
def __init__(self, field: str):
self._field = field
self._value: Any = None
self._operator: RedisFilterOperator = RedisFilterOperator.EQ
| null |
setup_class
|
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
|
@classmethod
def setup_class(cls) ->None:
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
| null |
get_ernie_output_parser
|
"""Get the appropriate function output parser given the user functions.
Args:
functions: Sequence where element is a dictionary, a pydantic.BaseModel class,
or a Python function. If a dictionary is passed in, it is assumed to
already be a valid Ernie function.
Returns:
A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise
a JsonOutputFunctionsParser. If there's only one function and it is
not a Pydantic class, then the output parser will automatically extract
only the function arguments and not the function name.
"""
function_names = [convert_to_ernie_function(f)['name'] for f in functions]
if isinstance(functions[0], type) and issubclass(functions[0], BaseModel):
if len(functions) > 1:
pydantic_schema: Union[Dict, Type[BaseModel]] = {name: fn for name,
fn in zip(function_names, functions)}
else:
pydantic_schema = functions[0]
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser
] = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
else:
output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
return output_parser
|
def get_ernie_output_parser(functions: Sequence[Union[Dict[str, Any], Type[
BaseModel], Callable]]) ->Union[BaseOutputParser,
BaseGenerationOutputParser]:
"""Get the appropriate function output parser given the user functions.
Args:
functions: Sequence where element is a dictionary, a pydantic.BaseModel class,
or a Python function. If a dictionary is passed in, it is assumed to
already be a valid Ernie function.
Returns:
A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise
a JsonOutputFunctionsParser. If there's only one function and it is
not a Pydantic class, then the output parser will automatically extract
only the function arguments and not the function name.
"""
function_names = [convert_to_ernie_function(f)['name'] for f in functions]
if isinstance(functions[0], type) and issubclass(functions[0], BaseModel):
if len(functions) > 1:
pydantic_schema: Union[Dict, Type[BaseModel]] = {name: fn for
name, fn in zip(function_names, functions)}
else:
pydantic_schema = functions[0]
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser
] = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
else:
output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1
)
return output_parser
|
Get the appropriate function output parser given the user functions.
Args:
functions: Sequence where element is a dictionary, a pydantic.BaseModel class,
or a Python function. If a dictionary is passed in, it is assumed to
already be a valid Ernie function.
Returns:
A PydanticOutputFunctionsParser if functions are Pydantic classes, otherwise
a JsonOutputFunctionsParser. If there's only one function and it is
not a Pydantic class, then the output parser will automatically extract
only the function arguments and not the function name.
|
__init__
|
self.message = message
self.score = score
super().__init__(self.message)
|
def __init__(self, message: str='Prompt injection attack detected', score:
float=1.0):
self.message = message
self.score = score
super().__init__(self.message)
| null |
similarity_search
|
docs_and_scores = self.similarity_search_with_score(query, k, radius,
epsilon, timeout, grpc_metadata)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs
|
def similarity_search(self, query: str, k: int=4, radius: float=-1.0,
epsilon: float=0.01, timeout: int=3000000000, grpc_metadata: Optional[
Any]=None, **kwargs: Any) ->List[Document]:
docs_and_scores = self.similarity_search_with_score(query, k, radius,
epsilon, timeout, grpc_metadata)
docs = []
for doc, _ in docs_and_scores:
docs.append(doc)
return docs
| null |
_parse_python_function_docstring
|
"""Parse the function and argument descriptions from the docstring of a function.
Assumes the function docstring follows Google Python style guide.
"""
docstring = inspect.getdoc(function)
if docstring:
docstring_blocks = docstring.split('\n\n')
descriptors = []
args_block = None
past_descriptors = False
for block in docstring_blocks:
if block.startswith('Args:'):
args_block = block
break
elif block.startswith('Returns:') or block.startswith('Example:'):
past_descriptors = True
elif not past_descriptors:
descriptors.append(block)
else:
continue
description = ' '.join(descriptors)
else:
description = ''
args_block = None
arg_descriptions = {}
if args_block:
arg = None
for line in args_block.split('\n')[1:]:
if ':' in line:
arg, desc = line.split(':')
arg_descriptions[arg.strip()] = desc.strip()
elif arg:
arg_descriptions[arg.strip()] += ' ' + line.strip()
return description, arg_descriptions
|
def _parse_python_function_docstring(function: Callable) ->Tuple[str, dict]:
"""Parse the function and argument descriptions from the docstring of a function.
Assumes the function docstring follows Google Python style guide.
"""
docstring = inspect.getdoc(function)
if docstring:
docstring_blocks = docstring.split('\n\n')
descriptors = []
args_block = None
past_descriptors = False
for block in docstring_blocks:
if block.startswith('Args:'):
args_block = block
break
elif block.startswith('Returns:') or block.startswith('Example:'):
past_descriptors = True
elif not past_descriptors:
descriptors.append(block)
else:
continue
description = ' '.join(descriptors)
else:
description = ''
args_block = None
arg_descriptions = {}
if args_block:
arg = None
for line in args_block.split('\n')[1:]:
if ':' in line:
arg, desc = line.split(':')
arg_descriptions[arg.strip()] = desc.strip()
elif arg:
arg_descriptions[arg.strip()] += ' ' + line.strip()
return description, arg_descriptions
|
Parse the function and argument descriptions from the docstring of a function.
Assumes the function docstring follows Google Python style guide.
|
__ne__
|
"""Create a RedisText inequality filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") != "engineer"
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
|
@check_operator_misuse
def __ne__(self, other: str) ->'RedisFilterExpression':
"""Create a RedisText inequality filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") != "engineer"
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
|
Create a RedisText inequality filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") != "engineer"
|
__repr_args__
|
return [(k, v) for k, v in super().__repr_args__() if k not in self.
__fields__ or try_neq_default(v, k, self)]
|
def __repr_args__(self) ->Any:
return [(k, v) for k, v in super().__repr_args__() if k not in self.
__fields__ or try_neq_default(v, k, self)]
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Embeddings:
return self._embedding
| null |
add_recognizer
|
"""Add a recognizer to the analyzer
Args:
recognizer: Recognizer to add to the analyzer.
"""
self._analyzer.registry.add_recognizer(recognizer)
self.analyzed_fields.extend(recognizer.supported_entities)
|
def add_recognizer(self, recognizer: EntityRecognizer) ->None:
"""Add a recognizer to the analyzer
Args:
recognizer: Recognizer to add to the analyzer.
"""
self._analyzer.registry.add_recognizer(recognizer)
self.analyzed_fields.extend(recognizer.supported_entities)
|
Add a recognizer to the analyzer
Args:
recognizer: Recognizer to add to the analyzer.
|
_llm_type
|
"""Return type of NVIDIA AI Foundation Model Interface."""
return 'chat-nvidia-ai-playground'
|
@property
def _llm_type(self) ->str:
"""Return type of NVIDIA AI Foundation Model Interface."""
return 'chat-nvidia-ai-playground'
|
Return type of NVIDIA AI Foundation Model Interface.
|
on_chain_start
|
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs['input']
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp['input'] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
else:
raise ValueError('Unexpected data format provided!')
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs['input']
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp['input'] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.run.log(input_resp)
else:
raise ValueError('Unexpected data format provided!')
|
Run when chain starts running.
|
lc_secrets
|
return {'another_secret': 'ANOTHER_SECRET'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'another_secret': 'ANOTHER_SECRET'}
| null |
log_thought
|
if run_manager:
colors = {ThoughtValidity.VALID_FINAL: 'green', ThoughtValidity.
VALID_INTERMEDIATE: 'yellow', ThoughtValidity.INVALID: 'red'}
text = indent(f'Thought: {thought.text}\n', prefix=' ' * level)
run_manager.on_text(text=text, color=colors[thought.validity], verbose=
self.verbose)
|
def log_thought(self, thought: Thought, level: int, run_manager: Optional[
CallbackManagerForChainRun]=None) ->None:
if run_manager:
colors = {ThoughtValidity.VALID_FINAL: 'green', ThoughtValidity.
VALID_INTERMEDIATE: 'yellow', ThoughtValidity.INVALID: 'red'}
text = indent(f'Thought: {thought.text}\n', prefix=' ' * level)
run_manager.on_text(text=text, color=colors[thought.validity],
verbose=self.verbose)
| null |
_prepare_range_query
|
try:
from redis.commands.search.query import Query
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
return_fields = return_fields or []
vector_key = self._schema.content_vector_key
base_query = f'@{vector_key}:[VECTOR_RANGE $distance_threshold $vector]'
if filter:
base_query = '(' + base_query + ' ' + str(filter) + ')'
query_string = base_query + '=>{$yield_distance_as: distance}'
return Query(query_string).return_fields(*return_fields).sort_by('distance'
).paging(0, k).dialect(2)
|
def _prepare_range_query(self, k: int, filter: Optional[
RedisFilterExpression]=None, return_fields: Optional[List[str]]=None
) ->'Query':
try:
from redis.commands.search.query import Query
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
return_fields = return_fields or []
vector_key = self._schema.content_vector_key
base_query = f'@{vector_key}:[VECTOR_RANGE $distance_threshold $vector]'
if filter:
base_query = '(' + base_query + ' ' + str(filter) + ')'
query_string = base_query + '=>{$yield_distance_as: distance}'
return Query(query_string).return_fields(*return_fields).sort_by('distance'
).paging(0, k).dialect(2)
| null |
__init__
|
"""Initialize an empty store."""
self.store: Dict[str, V] = {}
|
def __init__(self) ->None:
"""Initialize an empty store."""
self.store: Dict[str, V] = {}
|
Initialize an empty store.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
lookup
|
"""Look up based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
item = self.collection.find_one(filter={'_id': doc_id}, projection={
'body_blob': 1})['data']['document']
if item is not None:
generations = _loads_generations(item['body_blob'])
if generations is not None:
return generations
else:
return None
else:
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
item = self.collection.find_one(filter={'_id': doc_id}, projection={
'body_blob': 1})['data']['document']
if item is not None:
generations = _loads_generations(item['body_blob'])
if generations is not None:
return generations
else:
return None
else:
return None
|
Look up based on prompt and llm_string.
|
run
|
"""Run query through StackExchange API and parse results."""
query_key = 'q' if self.query_type == 'all' else self.query_type
output = self.client.fetch('search/excerpts', **{query_key: query}, **self.
fetch_params)
if len(output['items']) < 1:
return f"No relevant results found for '{query}' on Stack Overflow."
questions = [item for item in output['items'] if item['item_type'] ==
'question'][:self.max_results]
answers = [item for item in output['items'] if item['item_type'] == 'answer']
results = []
for question in questions:
res_text = f"Question: {question['title']}\n{question['excerpt']}"
relevant_answers = [answer for answer in answers if answer[
'question_id'] == question['question_id']]
accepted_answers = [answer for answer in relevant_answers if answer[
'is_accepted']]
if relevant_answers:
top_answer = accepted_answers[0
] if accepted_answers else relevant_answers[0]
excerpt = html.unescape(top_answer['excerpt'])
res_text += f'\nAnswer: {excerpt}'
results.append(res_text)
return self.result_separator.join(results)
|
def run(self, query: str) ->str:
"""Run query through StackExchange API and parse results."""
query_key = 'q' if self.query_type == 'all' else self.query_type
output = self.client.fetch('search/excerpts', **{query_key: query}, **
self.fetch_params)
if len(output['items']) < 1:
return f"No relevant results found for '{query}' on Stack Overflow."
questions = [item for item in output['items'] if item['item_type'] ==
'question'][:self.max_results]
answers = [item for item in output['items'] if item['item_type'] ==
'answer']
results = []
for question in questions:
res_text = f"Question: {question['title']}\n{question['excerpt']}"
relevant_answers = [answer for answer in answers if answer[
'question_id'] == question['question_id']]
accepted_answers = [answer for answer in relevant_answers if answer
['is_accepted']]
if relevant_answers:
top_answer = accepted_answers[0
] if accepted_answers else relevant_answers[0]
excerpt = html.unescape(top_answer['excerpt'])
res_text += f'\nAnswer: {excerpt}'
results.append(res_text)
return self.result_separator.join(results)
|
Run query through StackExchange API and parse results.
|
test_maximal_marginal_relevance_lambda_one
|
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 1]
actual = maximal_marginal_relevance(query_embedding, embedding_list,
lambda_mult=1, k=2)
assert expected == actual
|
def test_maximal_marginal_relevance_lambda_one() ->None:
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 1]
actual = maximal_marginal_relevance(query_embedding, embedding_list,
lambda_mult=1, k=2)
assert expected == actual
| null |
is_presign_supported
|
config_endpoint = self.__endpoint + 'config'
response = requests.get(config_endpoint, auth=self.__auth)
response.raise_for_status()
config = response.json()
return config['storage_config']['pre_sign_support']
|
def is_presign_supported(self) ->bool:
config_endpoint = self.__endpoint + 'config'
response = requests.get(config_endpoint, auth=self.__auth)
response.raise_for_status()
config = response.json()
return config['storage_config']['pre_sign_support']
| null |
score_response
|
ranking = self.llm_chain.predict(llm_response=llm_response, **inputs)
ranking = ranking.strip()
try:
resp = float(ranking)
return resp
except Exception as e:
raise RuntimeError(
f'The auto selection scorer did not manage to score the response, there is always the option to try again or tweak the reward prompt. Error: {e}'
)
|
def score_response(self, inputs: Dict[str, Any], llm_response: str, event:
Event) ->float:
ranking = self.llm_chain.predict(llm_response=llm_response, **inputs)
ranking = ranking.strip()
try:
resp = float(ranking)
return resp
except Exception as e:
raise RuntimeError(
f'The auto selection scorer did not manage to score the response, there is always the option to try again or tweak the reward prompt. Error: {e}'
)
| null |
test_pai_eas_call
|
chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'))
response = chat(messages=[HumanMessage(content='Say foo:')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_pai_eas_call() ->None:
chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'))
response = chat(messages=[HumanMessage(content='Say foo:')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
| null |
validate_environment
|
"""Validate that the access token and python package exists in environment."""
token = get_from_dict_or_env(values, 'token', 'PREDICTIONGUARD_TOKEN')
try:
import predictionguard as pg
values['client'] = pg.Client(token=token)
except ImportError:
raise ImportError(
'Could not import predictionguard python package. Please install it with `pip install predictionguard`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the access token and python package exists in environment."""
token = get_from_dict_or_env(values, 'token', 'PREDICTIONGUARD_TOKEN')
try:
import predictionguard as pg
values['client'] = pg.Client(token=token)
except ImportError:
raise ImportError(
'Could not import predictionguard python package. Please install it with `pip install predictionguard`.'
)
return values
|
Validate that the access token and python package exists in environment.
|
initialize_llm_chain
|
if 'llm_chain' not in values:
from langchain.chains.llm import LLMChain
values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt=
PromptTemplate(template=QUERY_CHECKER, input_variables=['dialect',
'query']))
if values['llm_chain'].prompt.input_variables != ['dialect', 'query']:
raise ValueError(
"LLM chain for QueryCheckerTool must have input variables ['query', 'dialect']"
)
return values
|
@root_validator(pre=True)
def initialize_llm_chain(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if 'llm_chain' not in values:
from langchain.chains.llm import LLMChain
values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt=
PromptTemplate(template=QUERY_CHECKER, input_variables=[
'dialect', 'query']))
if values['llm_chain'].prompt.input_variables != ['dialect', 'query']:
raise ValueError(
"LLM chain for QueryCheckerTool must have input variables ['query', 'dialect']"
)
return values
| null |
from_existing_index
|
"""
Get instance of an existing TimescaleVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
service_url = cls.get_service_url(kwargs)
store = cls(service_url=service_url, collection_name=collection_name,
embedding=embedding, distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection)
return store
|
@classmethod
def from_existing_index(cls: Type[TimescaleVector], embedding: Embeddings,
collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool=False, **kwargs: Any) ->TimescaleVector:
"""
Get instance of an existing TimescaleVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
service_url = cls.get_service_url(kwargs)
store = cls(service_url=service_url, collection_name=collection_name,
embedding=embedding, distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection)
return store
|
Get instance of an existing TimescaleVector store.This method will
return the instance of the store without inserting any new
embeddings
|
test_multiple_sessions
|
sql_history, other_history = sql_histories
sql_history.add_user_message('Hello!')
sql_history.add_ai_message('Hi there!')
sql_history.add_user_message('Whats cracking?')
assert len(sql_history.messages) == 3, 'waat'
assert sql_history.messages[0].content == 'Hello!'
assert sql_history.messages[1].content == 'Hi there!'
assert sql_history.messages[2].content == 'Whats cracking?'
other_history.add_user_message('Hellox')
assert len(other_history.messages) == 1
assert len(sql_history.messages) == 3
assert other_history.messages[0].content == 'Hellox'
assert sql_history.messages[0].content == 'Hello!'
assert sql_history.messages[1].content == 'Hi there!'
assert sql_history.messages[2].content == 'Whats cracking?'
|
def test_multiple_sessions(sql_histories: Tuple[SQLChatMessageHistory,
SQLChatMessageHistory]) ->None:
sql_history, other_history = sql_histories
sql_history.add_user_message('Hello!')
sql_history.add_ai_message('Hi there!')
sql_history.add_user_message('Whats cracking?')
assert len(sql_history.messages) == 3, 'waat'
assert sql_history.messages[0].content == 'Hello!'
assert sql_history.messages[1].content == 'Hi there!'
assert sql_history.messages[2].content == 'Whats cracking?'
other_history.add_user_message('Hellox')
assert len(other_history.messages) == 1
assert len(sql_history.messages) == 3
assert other_history.messages[0].content == 'Hellox'
assert sql_history.messages[0].content == 'Hello!'
assert sql_history.messages[1].content == 'Hi there!'
assert sql_history.messages[2].content == 'Whats cracking?'
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.