method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
from_file
|
"""Create a JsonSpec from a file."""
if not path.exists():
raise FileNotFoundError(f'File not found: {path}')
dict_ = json.loads(path.read_text())
return cls(dict_=dict_)
|
@classmethod
def from_file(cls, path: Path) ->JsonSpec:
"""Create a JsonSpec from a file."""
if not path.exists():
raise FileNotFoundError(f'File not found: {path}')
dict_ = json.loads(path.read_text())
return cls(dict_=dict_)
|
Create a JsonSpec from a file.
|
test_input_variables
|
assert CREATE_ASSERTIONS_PROMPT.input_variables == ['summary']
assert CHECK_ASSERTIONS_PROMPT.input_variables == ['assertions']
assert REVISED_SUMMARY_PROMPT.input_variables == ['checked_assertions',
'summary']
assert ARE_ALL_TRUE_PROMPT.input_variables == ['checked_assertions']
|
def test_input_variables() ->None:
assert CREATE_ASSERTIONS_PROMPT.input_variables == ['summary']
assert CHECK_ASSERTIONS_PROMPT.input_variables == ['assertions']
assert REVISED_SUMMARY_PROMPT.input_variables == ['checked_assertions',
'summary']
assert ARE_ALL_TRUE_PROMPT.input_variables == ['checked_assertions']
| null |
as_retriever
|
return LLMRailsRetriever(vectorstore=self, **kwargs)
|
def as_retriever(self, **kwargs: Any) ->LLMRailsRetriever:
return LLMRailsRetriever(vectorstore=self, **kwargs)
| null |
test_invoke
|
"""Test invoke tokens from TritonTensorRTLLM."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
@pytest.mark.skip(reason='Need a working Triton server')
def test_invoke() ->None:
"""Test invoke tokens from TritonTensorRTLLM."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
Test invoke tokens from TritonTensorRTLLM.
|
__init__
|
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
|
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
| null |
test_default_call
|
"""Test default model call."""
chat = ChatTongyi()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_default_call() ->None:
"""Test default model call."""
chat = ChatTongyi()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test default model call.
|
evaluate
|
last_thought = thoughts[-1]
clean_solution = last_thought.replace(' ', '').replace('"', '')
regex_solution = clean_solution.replace('*', '.').replace('|', '\\|')
if sudoku_solution in clean_solution:
return ThoughtValidity.VALID_FINAL
elif re.search(regex_solution, sudoku_solution):
return ThoughtValidity.VALID_INTERMEDIATE
else:
return ThoughtValidity.INVALID
|
def evaluate(self, problem_description: str, thoughts: Tuple[str, ...]=()
) ->ThoughtValidity:
last_thought = thoughts[-1]
clean_solution = last_thought.replace(' ', '').replace('"', '')
regex_solution = clean_solution.replace('*', '.').replace('|', '\\|')
if sudoku_solution in clean_solution:
return ThoughtValidity.VALID_FINAL
elif re.search(regex_solution, sudoku_solution):
return ThoughtValidity.VALID_INTERMEDIATE
else:
return ThoughtValidity.INVALID
| null |
from_run_and_data_type
|
"""
Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator (StringEvaluator): The string evaluator to use.
run_type (str): The type of run being evaluated.
Supported types are LLM and Chain.
data_type (DataType): The type of dataset used in the run.
input_key (str, optional): The key used to map the input from the run.
prediction_key (str, optional): The key used to map the prediction from the run.
reference_key (str, optional): The key used to map the reference from the dataset.
tags (List[str], optional): List of tags to attach to the evaluation chain.
Returns:
StringRunEvaluatorChain: The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
"""
if run_type == 'llm':
run_mapper: StringRunMapper = LLMStringRunMapper()
elif run_type == 'chain':
run_mapper = ChainStringRunMapper(input_key=input_key, prediction_key=
prediction_key)
else:
raise ValueError(
f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'.")
if reference_key is not None or data_type in (DataType.llm, DataType.chat
) or evaluator.requires_reference:
example_mapper = StringExampleMapper(reference_key=reference_key)
elif evaluator.requires_reference:
raise ValueError(
f'Evaluator {evaluator.evaluation_name} requires a reference example from the dataset. Please specify the reference key from amongst the dataset outputs keys.'
)
else:
example_mapper = None
return cls(name=evaluator.evaluation_name, run_mapper=run_mapper,
example_mapper=example_mapper, string_evaluator=evaluator, tags=tags)
|
@classmethod
def from_run_and_data_type(cls, evaluator: StringEvaluator, run_type: str,
data_type: DataType, input_key: Optional[str]=None, prediction_key:
Optional[str]=None, reference_key: Optional[str]=None, tags: Optional[
List[str]]=None) ->StringRunEvaluatorChain:
"""
Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator (StringEvaluator): The string evaluator to use.
run_type (str): The type of run being evaluated.
Supported types are LLM and Chain.
data_type (DataType): The type of dataset used in the run.
input_key (str, optional): The key used to map the input from the run.
prediction_key (str, optional): The key used to map the prediction from the run.
reference_key (str, optional): The key used to map the reference from the dataset.
tags (List[str], optional): List of tags to attach to the evaluation chain.
Returns:
StringRunEvaluatorChain: The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
"""
if run_type == 'llm':
run_mapper: StringRunMapper = LLMStringRunMapper()
elif run_type == 'chain':
run_mapper = ChainStringRunMapper(input_key=input_key,
prediction_key=prediction_key)
else:
raise ValueError(
f"Unsupported run type {run_type}. Expected one of 'llm' or 'chain'."
)
if reference_key is not None or data_type in (DataType.llm, DataType.chat
) or evaluator.requires_reference:
example_mapper = StringExampleMapper(reference_key=reference_key)
elif evaluator.requires_reference:
raise ValueError(
f'Evaluator {evaluator.evaluation_name} requires a reference example from the dataset. Please specify the reference key from amongst the dataset outputs keys.'
)
else:
example_mapper = None
return cls(name=evaluator.evaluation_name, run_mapper=run_mapper,
example_mapper=example_mapper, string_evaluator=evaluator, tags=tags)
|
Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
This method provides an easy way to instantiate a StringRunEvaluatorChain, by
taking an evaluator and information about the type of run and the data.
The method supports LLM and chain runs.
Args:
evaluator (StringEvaluator): The string evaluator to use.
run_type (str): The type of run being evaluated.
Supported types are LLM and Chain.
data_type (DataType): The type of dataset used in the run.
input_key (str, optional): The key used to map the input from the run.
prediction_key (str, optional): The key used to map the prediction from the run.
reference_key (str, optional): The key used to map the reference from the dataset.
tags (List[str], optional): List of tags to attach to the evaluation chain.
Returns:
StringRunEvaluatorChain: The instantiated evaluation chain.
Raises:
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
|
OutputType
|
"""Get the input type for this runnable."""
return str
|
@property
def OutputType(self) ->Type[str]:
"""Get the input type for this runnable."""
return str
|
Get the input type for this runnable.
|
_get_relevant_documents
|
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_distance_threshold':
if self.search_kwargs['distance_threshold'] is None:
raise ValueError('distance_threshold must be provided for ' +
'similarity_distance_threshold retriever')
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_score_threshold':
docs_and_similarities = (self.vectorstore.
similarity_search_with_relevance_scores(query, **self.search_kwargs))
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(query, **self.
search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_distance_threshold':
if self.search_kwargs['distance_threshold'] is None:
raise ValueError('distance_threshold must be provided for ' +
'similarity_distance_threshold retriever')
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_score_threshold':
docs_and_similarities = (self.vectorstore.
similarity_search_with_relevance_scores(query, **self.
search_kwargs))
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(query, **self
.search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
| null |
validate_environment
|
values['llm'] = values.get('llm') or ChatAnthropic(**values)
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
values['llm'] = values.get('llm') or ChatAnthropic(**values)
return values
| null |
test_qdrant_similarity_search_with_relevance_score_with_threshold
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i, 'metadata': {'page': i + 1, 'pages': [i + 2, -1]}} for
i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=
metadatas, location=':memory:', vector_name=vector_name)
score_threshold = 0.98
kwargs = {'score_threshold': score_threshold}
output = docsearch.similarity_search_with_relevance_scores('foo', k=3, **kwargs
)
assert len(output) == 1
assert all([(score >= score_threshold) for _, score in output])
|
@pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_similarity_search_with_relevance_score_with_threshold(
vector_name: Optional[str]) ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i, 'metadata': {'page': i + 1, 'pages': [i + 2, -
1]}} for i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
metadatas=metadatas, location=':memory:', vector_name=vector_name)
score_threshold = 0.98
kwargs = {'score_threshold': score_threshold}
output = docsearch.similarity_search_with_relevance_scores('foo', k=3,
**kwargs)
assert len(output) == 1
assert all([(score >= score_threshold) for _, score in output])
|
Test end to end construction and search.
|
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.'
)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
_default_params
|
"""Get the default parameters for calling NLPCloud API."""
return {'temperature': self.temperature, 'max_length': self.max_length,
'length_no_input': self.length_no_input, 'remove_input': self.
remove_input, 'remove_end_sequence': self.remove_end_sequence,
'bad_words': self.bad_words, 'top_p': self.top_p, 'top_k': self.top_k,
'repetition_penalty': self.repetition_penalty, 'num_beams': self.
num_beams, 'num_return_sequences': self.num_return_sequences}
|
@property
def _default_params(self) ->Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {'temperature': self.temperature, 'max_length': self.max_length,
'length_no_input': self.length_no_input, 'remove_input': self.
remove_input, 'remove_end_sequence': self.remove_end_sequence,
'bad_words': self.bad_words, 'top_p': self.top_p, 'top_k': self.
top_k, 'repetition_penalty': self.repetition_penalty, 'num_beams':
self.num_beams, 'num_return_sequences': self.num_return_sequences}
|
Get the default parameters for calling NLPCloud API.
|
check_redis_module_exist
|
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {module[b'name'].decode('utf-8'): module for module in
installed_modules}
for module in required_modules:
if module['name'] in installed_modules and int(installed_modules[module
['name']][b'ver']) >= int(module['ver']):
return
error_message = (
'Redis cannot be used as a vector database without RediSearch >=2.4Please head to https://redis.io/docs/stack/search/quick_start/to know more about installing the RediSearch module within Redis Stack.'
)
logger.error(error_message)
raise ValueError(error_message)
|
def check_redis_module_exist(client: RedisType, required_modules: List[dict]
) ->None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {module[b'name'].decode('utf-8'): module for module in
installed_modules}
for module in required_modules:
if module['name'] in installed_modules and int(installed_modules[
module['name']][b'ver']) >= int(module['ver']):
return
error_message = (
'Redis cannot be used as a vector database without RediSearch >=2.4Please head to https://redis.io/docs/stack/search/quick_start/to know more about installing the RediSearch module within Redis Stack.'
)
logger.error(error_message)
raise ValueError(error_message)
|
Check if the correct Redis modules are installed.
|
__eq__
|
if isinstance(other, RunnableLambda):
if hasattr(self, 'func') and hasattr(other, 'func'):
return self.func == other.func
elif hasattr(self, 'afunc') and hasattr(other, 'afunc'):
return self.afunc == other.afunc
else:
return False
else:
return False
|
def __eq__(self, other: Any) ->bool:
if isinstance(other, RunnableLambda):
if hasattr(self, 'func') and hasattr(other, 'func'):
return self.func == other.func
elif hasattr(self, 'afunc') and hasattr(other, 'afunc'):
return self.afunc == other.afunc
else:
return False
else:
return False
| null |
_parse_chat_history
|
"""Parse a sequence of messages into history.
Returns:
A list of parsed messages.
"""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message('user', content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message('assistant', content))
if isinstance(message, SystemMessage):
chat_history.append(_parse_message('system', content))
return chat_history
|
def _parse_chat_history(history: List[BaseMessage]) ->List[Dict[str, str]]:
"""Parse a sequence of messages into history.
Returns:
A list of parsed messages.
"""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message('user', content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message('assistant', content))
if isinstance(message, SystemMessage):
chat_history.append(_parse_message('system', content))
return chat_history
|
Parse a sequence of messages into history.
Returns:
A list of parsed messages.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'prompt']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'prompt']
|
Get the namespace of the langchain object.
|
validate_environment
|
"""Validate api key, python package exists, temperature, and top_p."""
mistralai_spec = importlib.util.find_spec('mistralai')
if mistralai_spec is None:
raise MistralException(
'Could not find mistralai python package. Please install it with `pip install mistralai`'
)
values['mistral_api_key'] = get_from_dict_or_env(values, 'mistral_api_key',
'MISTRAL_API_KEY', default='')
values['client'] = MistralClient(api_key=values['mistral_api_key'],
endpoint=values['endpoint'], max_retries=values['max_retries'], timeout
=values['timeout'])
if values['temperature'] is not None and not 0 <= values['temperature'] <= 1:
raise ValueError('temperature must be in the range [0.0, 1.0]')
if values['top_p'] is not None and not 0 <= values['top_p'] <= 1:
raise ValueError('top_p must be in the range [0.0, 1.0]')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate api key, python package exists, temperature, and top_p."""
mistralai_spec = importlib.util.find_spec('mistralai')
if mistralai_spec is None:
raise MistralException(
'Could not find mistralai python package. Please install it with `pip install mistralai`'
)
values['mistral_api_key'] = get_from_dict_or_env(values,
'mistral_api_key', 'MISTRAL_API_KEY', default='')
values['client'] = MistralClient(api_key=values['mistral_api_key'],
endpoint=values['endpoint'], max_retries=values['max_retries'],
timeout=values['timeout'])
if values['temperature'] is not None and not 0 <= values['temperature'
] <= 1:
raise ValueError('temperature must be in the range [0.0, 1.0]')
if values['top_p'] is not None and not 0 <= values['top_p'] <= 1:
raise ValueError('top_p must be in the range [0.0, 1.0]')
return values
|
Validate api key, python package exists, temperature, and top_p.
|
data
|
"""Return the deanonymizer mapping"""
return {k: dict(v) for k, v in self.mapping.items()}
|
@property
def data(self) ->MappingDataType:
"""Return the deanonymizer mapping"""
return {k: dict(v) for k, v in self.mapping.items()}
|
Return the deanonymizer mapping
|
test_boolean_metadata
|
"""Verify boolean metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aBool']
|
def test_boolean_metadata() ->None:
"""Verify boolean metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aBool']
|
Verify boolean metadata is loaded correctly
|
test_skipping_redirects
|
loader = MWDumpLoader(file_path=(PARENT_DIR / 'mwtest_current_pages.xml').
absolute(), skip_redirects=True, stop_on_error=False)
documents = loader.load()
assert len(documents) == 2
|
@pytest.mark.requires('mwparserfromhell', 'mwxml')
def test_skipping_redirects() ->None:
loader = MWDumpLoader(file_path=(PARENT_DIR /
'mwtest_current_pages.xml').absolute(), skip_redirects=True,
stop_on_error=False)
documents = loader.load()
assert len(documents) == 2
| null |
parse
|
raise NotImplementedError()
|
def parse(self, text: str) ->Any:
raise NotImplementedError()
| null |
test_timescalevector_with_filter_in_set
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=2, filter=[{'page':
'0'}, {'page': '2'}])
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0), (Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
def test_timescalevector_with_filter_in_set() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, service_url=SERVICE_URL,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=2, filter=[{
'page': '0'}, {'page': '2'}])
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0), (Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
Test end to end construction and search.
|
test_annoy_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Annoy.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={'page':
1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz',
metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_annoy_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Annoy.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={
'page': 1}), docsearch.index_to_docstore_id[2]: Document(
page_content='baz', metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and search.
|
get_num_tokens
|
"""Calculate num tokens for OpenAI with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main
/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
tiktoken = import_tiktoken()
encoding = tiktoken.encoding_for_model(openai_model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
|
def get_num_tokens(string: str, openai_model_name: str) ->int:
"""Calculate num tokens for OpenAI with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main
/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
tiktoken = import_tiktoken()
encoding = tiktoken.encoding_for_model(openai_model_name)
num_tokens = len(encoding.encode(string))
return num_tokens
|
Calculate num tokens for OpenAI with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/main
/examples/How_to_count_tokens_with_tiktoken.ipynb
|
api_client
|
return GitHubAPIWrapper()
|
@pytest.fixture
def api_client() ->GitHubAPIWrapper:
return GitHubAPIWrapper()
| null |
finish
|
results = self._collect_test_results(batch_results)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
_display_aggregate_results(agg_feedback)
except Exception as e:
logger.debug(f'Failed to print aggregate feedback: {repr(e)}')
try:
self.client.update_project(self.project.id, end_time=datetime.now(
timezone.utc))
except Exception as e:
logger.debug(f'Failed to close project: {repr(e)}')
return results
|
def finish(self, batch_results: list, verbose: bool=False) ->TestResult:
results = self._collect_test_results(batch_results)
if verbose:
try:
agg_feedback = results.get_aggregate_feedback()
_display_aggregate_results(agg_feedback)
except Exception as e:
logger.debug(f'Failed to print aggregate feedback: {repr(e)}')
try:
self.client.update_project(self.project.id, end_time=datetime.now(
timezone.utc))
except Exception as e:
logger.debug(f'Failed to close project: {repr(e)}')
return results
| null |
_llm_type
|
"""Return type of llm."""
return 'cohere'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'cohere'
|
Return type of llm.
|
on_tool_end_common
|
self.tool_ends += 1
self.ends += 1
|
def on_tool_end_common(self) ->None:
self.tool_ends += 1
self.ends += 1
| null |
test_load_invalid_test_content
|
file_path = '/workspaces/langchain/test.json'
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[{"text": "value1"}, {"text": "value2"}]
""")
loader = JSONLoader(file_path=file_path, jq_schema='.[]', text_content=True)
with raises(ValueError):
loader.load()
|
def test_load_invalid_test_content(mocker: MockerFixture) ->None:
file_path = '/workspaces/langchain/test.json'
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[{"text": "value1"}, {"text": "value2"}]
""")
loader = JSONLoader(file_path=file_path, jq_schema='.[]', text_content=True
)
with raises(ValueError):
loader.load()
| null |
test_visit_comparison
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2'])
expected = "(metadata['foo'] < 1 or metadata['foo'] < 2)"
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1',
'2'])
expected = "(metadata['foo'] < 1 or metadata['foo'] < 2)"
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
test_json_equality_evaluator_evaluate_strings_not_equal
|
prediction = '{"a": 1}'
reference = '{"a": 2}'
result = json_equality_evaluator.evaluate_strings(prediction=prediction,
reference=reference)
assert result == {'score': False}
|
def test_json_equality_evaluator_evaluate_strings_not_equal(
json_equality_evaluator: JsonEqualityEvaluator) ->None:
prediction = '{"a": 1}'
reference = '{"a": 2}'
result = json_equality_evaluator.evaluate_strings(prediction=prediction,
reference=reference)
assert result == {'score': False}
| null |
add_message
|
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
|
def add_message(self, message: BaseMessage) ->None:
"""Add a self-created message to the store"""
self.messages.append(message)
self.upsert_messages()
|
Add a self-created message to the store
|
_format_chat_history
|
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]) ->List:
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
from_llm
|
"""Creates a FlareChain from a language model.
Args:
llm: Language model to use.
max_generation_len: Maximum length of the generated response.
**kwargs: Additional arguments to pass to the constructor.
Returns:
FlareChain class with the given language model.
"""
question_gen_chain = QuestionGeneratorChain(llm=llm)
response_llm = OpenAI(max_tokens=max_generation_len, model_kwargs={
'logprobs': 1}, temperature=0)
response_chain = _OpenAIResponseChain(llm=response_llm)
return cls(question_generator_chain=question_gen_chain, response_chain=
response_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, max_generation_len: int=32, **
kwargs: Any) ->FlareChain:
"""Creates a FlareChain from a language model.
Args:
llm: Language model to use.
max_generation_len: Maximum length of the generated response.
**kwargs: Additional arguments to pass to the constructor.
Returns:
FlareChain class with the given language model.
"""
question_gen_chain = QuestionGeneratorChain(llm=llm)
response_llm = OpenAI(max_tokens=max_generation_len, model_kwargs={
'logprobs': 1}, temperature=0)
response_chain = _OpenAIResponseChain(llm=response_llm)
return cls(question_generator_chain=question_gen_chain, response_chain=
response_chain, **kwargs)
|
Creates a FlareChain from a language model.
Args:
llm: Language model to use.
max_generation_len: Maximum length of the generated response.
**kwargs: Additional arguments to pass to the constructor.
Returns:
FlareChain class with the given language model.
|
_retrieve_page_summaries
|
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(DATABASE_URL.format(database_id=self.database_id),
method='POST', query_dict=query_dict)
pages.extend(data.get('results'))
if not data.get('has_more'):
break
query_dict['start_cursor'] = data.get('next_cursor')
return pages
|
def _retrieve_page_summaries(self, query_dict: Dict[str, Any]={'page_size':
100}) ->List[Dict[str, Any]]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(DATABASE_URL.format(database_id=self.
database_id), method='POST', query_dict=query_dict)
pages.extend(data.get('results'))
if not data.get('has_more'):
break
query_dict['start_cursor'] = data.get('next_cursor')
return pages
|
Get all the pages from a Notion database.
|
_setup_evaluation
|
"""Configure the evaluators to run on the results of the chain."""
if evaluation:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
run_inputs, run_outputs = None, None
run_type = 'llm'
else:
run_type = 'chain'
if data_type in (DataType.chat, DataType.llm):
val = data_type.value if isinstance(data_type, Enum) else data_type
raise ValueError(
f"Cannot evaluate a chain on dataset with data_type={val}. Please specify a dataset with the default 'kv' data type."
)
chain = llm_or_chain_factory()
run_inputs = chain.input_keys if isinstance(chain, Chain) else None
run_outputs = chain.output_keys if isinstance(chain, Chain) else None
run_evaluators = _load_run_evaluators(evaluation, run_type, data_type,
list(examples[0].outputs) if examples[0].outputs else None,
run_inputs, run_outputs)
else:
run_evaluators = None
return run_evaluators
|
def _setup_evaluation(llm_or_chain_factory: MCF, examples: List[Example],
evaluation: Optional[smith_eval.RunEvalConfig], data_type: DataType
) ->Optional[List[RunEvaluator]]:
"""Configure the evaluators to run on the results of the chain."""
if evaluation:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
run_inputs, run_outputs = None, None
run_type = 'llm'
else:
run_type = 'chain'
if data_type in (DataType.chat, DataType.llm):
val = data_type.value if isinstance(data_type, Enum
) else data_type
raise ValueError(
f"Cannot evaluate a chain on dataset with data_type={val}. Please specify a dataset with the default 'kv' data type."
)
chain = llm_or_chain_factory()
run_inputs = chain.input_keys if isinstance(chain, Chain) else None
run_outputs = chain.output_keys if isinstance(chain, Chain
) else None
run_evaluators = _load_run_evaluators(evaluation, run_type,
data_type, list(examples[0].outputs) if examples[0].outputs else
None, run_inputs, run_outputs)
else:
run_evaluators = None
return run_evaluators
|
Configure the evaluators to run on the results of the chain.
|
test_update_with_delayed_score_force
|
llm, PROMPT = setup()
auto_val_llm = FakeListChatModel(responses=['3'])
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm),
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score == 3.0
chain.update_with_delayed_score(chain_response=response, score=100,
force_score=True)
assert selection_metadata.selected.score == 100.0
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_update_with_delayed_score_force() ->None:
llm, PROMPT = setup()
auto_val_llm = FakeListChatModel(responses=['3'])
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm),
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed
=False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score == 3.0
chain.update_with_delayed_score(chain_response=response, score=100,
force_score=True)
assert selection_metadata.selected.score == 100.0
| null |
parse
|
"""Parse the output of an LLM call."""
json_string = text.split('```json')[1].strip().strip('```').strip()
try:
return json.loads(json_string)
except json.JSONDecodeError:
return []
|
def parse(self, text: str) ->List[str]:
"""Parse the output of an LLM call."""
json_string = text.split('```json')[1].strip().strip('```').strip()
try:
return json.loads(json_string)
except json.JSONDecodeError:
return []
|
Parse the output of an LLM call.
|
_create_message_dicts
|
params = self._default_params
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
|
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional
[List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._default_params
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
| null |
from_data
|
"""Initialize the blob from in-memory data.
Args:
data: the in-memory data associated with the blob
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
path: if provided, will be set as the source from which the data came
metadata: Metadata to associate with the blob
Returns:
Blob instance
"""
return cls(data=data, mimetype=mime_type, encoding=encoding, path=path,
metadata=metadata if metadata is not None else {})
|
@classmethod
def from_data(cls, data: Union[str, bytes], *, encoding: str='utf-8',
mime_type: Optional[str]=None, path: Optional[str]=None, metadata:
Optional[dict]=None) ->Blob:
"""Initialize the blob from in-memory data.
Args:
data: the in-memory data associated with the blob
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
path: if provided, will be set as the source from which the data came
metadata: Metadata to associate with the blob
Returns:
Blob instance
"""
return cls(data=data, mimetype=mime_type, encoding=encoding, path=path,
metadata=metadata if metadata is not None else {})
|
Initialize the blob from in-memory data.
Args:
data: the in-memory data associated with the blob
encoding: Encoding to use if decoding the bytes into a string
mime_type: if provided, will be set as the mime-type of the data
path: if provided, will be set as the source from which the data came
metadata: Metadata to associate with the blob
Returns:
Blob instance
|
load
|
"""Load records."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load records."""
return list(self.lazy_load())
|
Load records.
|
predict_messages
|
"""Pass a message sequence to the model and return a message prediction.
Use this method when passing in chat messages. If you want to pass in raw text,
use predict.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
|
@abstractmethod
def predict_messages(self, messages: List[BaseMessage], *, stop: Optional[
Sequence[str]]=None, **kwargs: Any) ->BaseMessage:
"""Pass a message sequence to the model and return a message prediction.
Use this method when passing in chat messages. If you want to pass in raw text,
use predict.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
|
Pass a message sequence to the model and return a message prediction.
Use this method when passing in chat messages. If you want to pass in raw text,
use predict.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
|
on_tool_end_common
|
self.tool_ends += 1
self.ends += 1
|
def on_tool_end_common(self) ->None:
self.tool_ends += 1
self.ends += 1
| null |
test_visit_comparison_range_like
|
comp = Comparison(comparator=Comparator.LIKE, attribute='foo', value='bar')
expected = {'match': {'metadata.foo': {'query': 'bar', 'fuzziness': 'AUTO'}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_range_like() ->None:
comp = Comparison(comparator=Comparator.LIKE, attribute='foo', value='bar')
expected = {'match': {'metadata.foo': {'query': 'bar', 'fuzziness':
'AUTO'}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
test_openai_invoke
|
"""Test invoke tokens from ChatOpenAI."""
llm = ChatOpenAI(max_tokens=10)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
@pytest.mark.scheduled
def test_openai_invoke() ->None:
"""Test invoke tokens from ChatOpenAI."""
llm = ChatOpenAI(max_tokens=10)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
Test invoke tokens from ChatOpenAI.
|
save_context
|
"""Save context from this conversation to buffer. Pruned."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
steps = format_to_openai_function_messages(outputs[self.intermediate_steps_key]
)
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) ->None:
"""Save context from this conversation to buffer. Pruned."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
steps = format_to_openai_function_messages(outputs[self.
intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
Save context from this conversation to buffer. Pruned.
|
split_text
|
"""Split incoming text and return chunks."""
separator = self._separator if self._is_separator_regex else re.escape(self
._separator)
splits = _split_text_with_regex(text, separator, self._keep_separator)
_separator = '' if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
|
def split_text(self, text: str) ->List[str]:
"""Split incoming text and return chunks."""
separator = self._separator if self._is_separator_regex else re.escape(self
._separator)
splits = _split_text_with_regex(text, separator, self._keep_separator)
_separator = '' if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
|
Split incoming text and return chunks.
|
on_agent_finish
|
"""Run on agent end."""
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
"""Run on agent end."""
|
Run on agent end.
|
test_system_invoke
|
"""Test invoke tokens with a system message"""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
prompt = ChatPromptTemplate.from_messages([('system',
'You are an expert cartographer. If asked, you are a cartographer. STAY IN CHARACTER'
), ('human', 'Are you a mathematician?')])
chain = prompt | llm
result = chain.invoke({})
assert isinstance(result.content, str)
|
def test_system_invoke() ->None:
"""Test invoke tokens with a system message"""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
prompt = ChatPromptTemplate.from_messages([('system',
'You are an expert cartographer. If asked, you are a cartographer. STAY IN CHARACTER'
), ('human', 'Are you a mathematician?')])
chain = prompt | llm
result = chain.invoke({})
assert isinstance(result.content, str)
|
Test invoke tokens with a system message
|
get_tools
|
"""Get the tools in the toolkit."""
tool_classes: List[Type[BaseBrowserTool]] = [ClickTool, NavigateTool,
NavigateBackTool, ExtractTextTool, ExtractHyperlinksTool,
GetElementsTool, CurrentWebPageTool]
tools = [tool_cls.from_browser(sync_browser=self.sync_browser,
async_browser=self.async_browser) for tool_cls in tool_classes]
return cast(List[BaseTool], tools)
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
tool_classes: List[Type[BaseBrowserTool]] = [ClickTool, NavigateTool,
NavigateBackTool, ExtractTextTool, ExtractHyperlinksTool,
GetElementsTool, CurrentWebPageTool]
tools = [tool_cls.from_browser(sync_browser=self.sync_browser,
async_browser=self.async_browser) for tool_cls in tool_classes]
return cast(List[BaseTool], tools)
|
Get the tools in the toolkit.
|
from_llm
|
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **llm_chain_kwargs or {})
return cls(llm_chain=llm_chain, get_input=_get_input)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]=
None, get_input: Optional[Callable[[str, Document], str]]=None,
llm_chain_kwargs: Optional[dict]=None) ->LLMChainExtractor:
"""Initialize from LLM."""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
_get_input = get_input if get_input is not None else default_get_input
llm_chain = LLMChain(llm=llm, prompt=_prompt, **llm_chain_kwargs or {})
return cls(llm_chain=llm_chain, get_input=_get_input)
|
Initialize from LLM.
|
embed_query
|
"""Embed query text."""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Embed query text."""
return self.embed_documents([text])[0]
|
Embed query text.
|
_get_create_table_stmt
|
statement = self._spark.sql(f'SHOW CREATE TABLE {table}').collect()[0
].createtab_stmt
using_clause_index = statement.find('USING')
return statement[:using_clause_index] + ';'
|
def _get_create_table_stmt(self, table: str) ->str:
statement = self._spark.sql(f'SHOW CREATE TABLE {table}').collect()[0
].createtab_stmt
using_clause_index = statement.find('USING')
return statement[:using_clause_index] + ';'
| null |
test_remove_style
|
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
'<html><style>my_funky_style</style><p>First paragraph.</p></html>')
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['html'])
assert docs_transformed[0].page_content == 'First paragraph.'
|
@pytest.mark.requires('bs4')
def test_remove_style() ->None:
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
'<html><style>my_funky_style</style><p>First paragraph.</p></html>')
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['html'])
assert docs_transformed[0].page_content == 'First paragraph.'
| null |
_call
|
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
'stop sequences found in both the input and default params.')
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f'{self.base_url}/{self.feature}/{self.subfeature}'
headers = {'Authorization': f'Bearer {self.edenai_api_key}', 'User-Agent':
self.get_user_agent()}
payload: Dict[str, Any] = {'providers': self.provider, 'text': prompt,
'max_tokens': self.max_tokens, 'temperature': self.temperature,
'resolution': self.resolution, **self.params, **kwargs, 'num_images': 1}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload['settings'] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f'EdenAI Server: Error {response.status_code}')
elif response.status_code >= 400:
raise ValueError(f'EdenAI received an invalid payload: {response.text}')
elif response.status_code != 200:
raise Exception(
f'EdenAI returned an unexpected response with status {response.status_code}: {response.text}'
)
data = response.json()
provider_response = data[self.provider]
if provider_response.get('status') == 'fail':
err_msg = provider_response.get('error', {}).get('message')
raise Exception(err_msg)
output = self._format_output(data)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
'stop sequences found in both the input and default params.')
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f'{self.base_url}/{self.feature}/{self.subfeature}'
headers = {'Authorization': f'Bearer {self.edenai_api_key}',
'User-Agent': self.get_user_agent()}
payload: Dict[str, Any] = {'providers': self.provider, 'text': prompt,
'max_tokens': self.max_tokens, 'temperature': self.temperature,
'resolution': self.resolution, **self.params, **kwargs, 'num_images': 1
}
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload['settings'] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f'EdenAI Server: Error {response.status_code}')
elif response.status_code >= 400:
raise ValueError(f'EdenAI received an invalid payload: {response.text}'
)
elif response.status_code != 200:
raise Exception(
f'EdenAI returned an unexpected response with status {response.status_code}: {response.text}'
)
data = response.json()
provider_response = data[self.provider]
if provider_response.get('status') == 'fail':
err_msg = provider_response.get('error', {}).get('message')
raise Exception(err_msg)
output = self._format_output(data)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
|
Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
|
input_keys
|
"""Return the input keys.
Returns:
List of input keys.
"""
return list(set(self.llm_chain.input_keys) - {'intermediate_steps'})
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
Returns:
List of input keys.
"""
return list(set(self.llm_chain.input_keys) - {'intermediate_steps'})
|
Return the input keys.
Returns:
List of input keys.
|
query
|
"""
Query the graph.
"""
from rdflib.exceptions import ParserError
from rdflib.query import ResultRow
try:
res = self.graph.query(query)
except ParserError as e:
raise ValueError(f'Generated SPARQL statement is invalid\n{e}')
return [r for r in res if isinstance(r, ResultRow)]
|
def query(self, query: str) ->List[rdflib.query.ResultRow]:
"""
Query the graph.
"""
from rdflib.exceptions import ParserError
from rdflib.query import ResultRow
try:
res = self.graph.query(query)
except ParserError as e:
raise ValueError(f'Generated SPARQL statement is invalid\n{e}')
return [r for r in res if isinstance(r, ResultRow)]
|
Query the graph.
|
index
|
"""Create the mapping for the Elasticsearch index."""
if similarity is DistanceStrategy.COSINE:
similarityAlgo = 'cosine'
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = 'l2_norm'
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = 'dot_product'
else:
raise ValueError(f'Similarity {similarity} not supported.')
return {'mappings': {'properties': {vector_query_field: {'type':
'dense_vector', 'dims': dims_length, 'index': True, 'similarity':
similarityAlgo}}}}
|
def index(self, dims_length: Union[int, None], vector_query_field: str,
similarity: Union[DistanceStrategy, None]) ->Dict:
"""Create the mapping for the Elasticsearch index."""
if similarity is DistanceStrategy.COSINE:
similarityAlgo = 'cosine'
elif similarity is DistanceStrategy.EUCLIDEAN_DISTANCE:
similarityAlgo = 'l2_norm'
elif similarity is DistanceStrategy.DOT_PRODUCT:
similarityAlgo = 'dot_product'
else:
raise ValueError(f'Similarity {similarity} not supported.')
return {'mappings': {'properties': {vector_query_field: {'type':
'dense_vector', 'dims': dims_length, 'index': True, 'similarity':
similarityAlgo}}}}
|
Create the mapping for the Elasticsearch index.
|
similarity_search
|
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(embedding_vector, k, filter=filter)
|
def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str,
str]]=None, **kwargs: Any) ->List[Document]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_by_vector(embedding_vector, k, filter=filter)
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_is_arxiv_identifier
|
"""Test that is_arxiv_identifier returns True for valid arxiv identifiers"""
api_client = ArxivAPIWrapper()
assert api_client.is_arxiv_identifier('1605.08386v1')
assert api_client.is_arxiv_identifier('0705.0123')
assert api_client.is_arxiv_identifier('2308.07912')
assert api_client.is_arxiv_identifier('9603067 2308.07912 2308.07912')
assert not api_client.is_arxiv_identifier('12345')
assert not api_client.is_arxiv_identifier('0705.012')
assert not api_client.is_arxiv_identifier('0705.012300')
assert not api_client.is_arxiv_identifier('1605.08386w1')
|
@pytest.mark.requires('arxiv')
def test_is_arxiv_identifier() ->None:
"""Test that is_arxiv_identifier returns True for valid arxiv identifiers"""
api_client = ArxivAPIWrapper()
assert api_client.is_arxiv_identifier('1605.08386v1')
assert api_client.is_arxiv_identifier('0705.0123')
assert api_client.is_arxiv_identifier('2308.07912')
assert api_client.is_arxiv_identifier('9603067 2308.07912 2308.07912')
assert not api_client.is_arxiv_identifier('12345')
assert not api_client.is_arxiv_identifier('0705.012')
assert not api_client.is_arxiv_identifier('0705.012300')
assert not api_client.is_arxiv_identifier('1605.08386w1')
|
Test that is_arxiv_identifier returns True for valid arxiv identifiers
|
add_step
|
"""Add step and step response to the container."""
|
@abstractmethod
def add_step(self, step: Step, step_response: StepResponse) ->None:
"""Add step and step response to the container."""
|
Add step and step response to the container.
|
_identifying_params
|
"""Return the identifying parameters."""
return {'endpoint_url': self.endpoint_url, 'model_kwargs': self.
model_kwargs or {}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Return the identifying parameters."""
return {'endpoint_url': self.endpoint_url, 'model_kwargs': self.
model_kwargs or {}}
|
Return the identifying parameters.
|
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
raise NotImplementedError
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, **kwargs: Any) ->List[
Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
raise NotImplementedError
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
_convert_prompt_msg_params
|
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {'messages': [convert_message_to_dict(m) for
m in messages if not isinstance(m, SystemMessage)]}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if 'system' not in messages_dict:
messages_dict['system'] = ''
messages_dict['system'] += cast(str, messages[i].content) + '\n'
return {**messages_dict, **self._default_params, **kwargs}
|
def _convert_prompt_msg_params(self, messages: List[BaseMessage], **kwargs: Any
) ->Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {'messages': [convert_message_to_dict(m
) for m in messages if not isinstance(m, SystemMessage)]}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)
]:
if 'system' not in messages_dict:
messages_dict['system'] = ''
messages_dict['system'] += cast(str, messages[i].content) + '\n'
return {**messages_dict, **self._default_params, **kwargs}
|
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
|
pytest_collection_modifyitems
|
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption('--only-extended') or False
only_core = config.getoption('--only-core') or False
if only_extended and only_core:
raise ValueError('Cannot specify both `--only-extended` and `--only-core`.'
)
for item in items:
requires_marker = item.get_closest_marker('requires')
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason=
'Skipping not a core test.'))
continue
required_pkgs = requires_marker.args
for pkg in required_pkgs:
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.'
)
else:
item.add_marker(pytest.mark.skip(reason=
f'Requires pkg: `{pkg}`'))
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason=
'Skipping not an extended test.'))
|
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]
) ->None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption('--only-extended') or False
only_core = config.getoption('--only-core') or False
if only_extended and only_core:
raise ValueError(
'Cannot specify both `--only-extended` and `--only-core`.')
for item in items:
requires_marker = item.get_closest_marker('requires')
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason=
'Skipping not a core test.'))
continue
required_pkgs = requires_marker.args
for pkg in required_pkgs:
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f'Package `{pkg}` is not installed but is required for extended tests. Please install the given package and try again.'
)
else:
item.add_marker(pytest.mark.skip(reason=
f'Requires pkg: `{pkg}`'))
break
elif only_extended:
item.add_marker(pytest.mark.skip(reason=
'Skipping not an extended test.'))
|
Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
|
_import_matching_engine
|
from langchain_community.vectorstores.matching_engine import MatchingEngine
return MatchingEngine
|
def _import_matching_engine() ->Any:
from langchain_community.vectorstores.matching_engine import MatchingEngine
return MatchingEngine
| null |
load_memory_variables
|
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
result: Union[List[Document], str]
if not self.return_docs:
result = '\n'.join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Union[
List[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.get_relevant_documents(query)
result: Union[List[Document], str]
if not self.return_docs:
result = '\n'.join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
|
Return history buffer.
|
extract_cypher
|
"""
Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
pattern = '```(.*?)```'
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
|
def extract_cypher(text: str) ->str:
"""
Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
"""
pattern = '```(.*?)```'
matches = re.findall(pattern, text, re.DOTALL)
return matches[0] if matches else text
|
Extract Cypher code from a text.
Args:
text: Text to extract Cypher code from.
Returns:
Cypher code extracted from the text.
|
_default_scripting_text_mapping
|
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {'mappings': {'properties': {vector_field: {'type': 'knn_vector',
'dimension': dim}}}}
|
def _default_scripting_text_mapping(dim: int, vector_field: str='vector_field'
) ->Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {'mappings': {'properties': {vector_field: {'type': 'knn_vector',
'dimension': dim}}}}
|
For Painless Scripting or Script Scoring,the default mapping to create index.
|
_parse_stream_helper
|
if line and line.startswith(b'data:'):
if line.startswith(b'data: '):
line = line[len(b'data: '):]
else:
line = line[len(b'data:'):]
if line.strip() == b'[DONE]':
return None
else:
return line.decode('utf-8')
return None
|
def _parse_stream_helper(line: bytes) ->Optional[str]:
if line and line.startswith(b'data:'):
if line.startswith(b'data: '):
line = line[len(b'data: '):]
else:
line = line[len(b'data:'):]
if line.strip() == b'[DONE]':
return None
else:
return line.decode('utf-8')
return None
| null |
__init__
|
self.openai_api_key = openai_api_key or get_from_env('openai_api_key',
'OPENAI_API_KEY')
self.openai_api_model = openai_api_model or get_from_env('openai_api_model',
'OPENAI_API_MODEL')
self.language = language
|
def __init__(self, openai_api_key: Optional[str]=None, language: str=
'english', openai_api_model: Optional[str]=None) ->None:
self.openai_api_key = openai_api_key or get_from_env('openai_api_key',
'OPENAI_API_KEY')
self.openai_api_model = openai_api_model or get_from_env('openai_api_model'
, 'OPENAI_API_MODEL')
self.language = language
| null |
base_url
|
"""Get the base url."""
return self.servers[0].url
|
@property
def base_url(self) ->str:
"""Get the base url."""
return self.servers[0].url
|
Get the base url.
|
validate_environment
|
"""Validate that ``deepsparse`` package is installed."""
try:
from deepsparse import Pipeline
except ImportError:
raise ImportError(
'Could not import `deepsparse` package. Please install it with `pip install deepsparse[llm]`'
)
model_config = values['model_config'] or {}
values['pipeline'] = Pipeline.create(task='text_generation', model_path=
values['model'], **model_config)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that ``deepsparse`` package is installed."""
try:
from deepsparse import Pipeline
except ImportError:
raise ImportError(
'Could not import `deepsparse` package. Please install it with `pip install deepsparse[llm]`'
)
model_config = values['model_config'] or {}
values['pipeline'] = Pipeline.create(task='text_generation', model_path
=values['model'], **model_config)
return values
|
Validate that ``deepsparse`` package is installed.
|
query_tasks
|
"""
Query tasks that match certain fields
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {'Error': error}
url = f"{DEFAULT_URL}/list/{params['list_id']}/task"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
def query_tasks(self, query: str) ->Dict:
"""
Query tasks that match certain fields
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {'Error': error}
url = f"{DEFAULT_URL}/list/{params['list_id']}/task"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
Query tasks that match certain fields
|
_stream
|
params = {'model': self.model, 'prompt': prompt, 'stream': True, **self.
model_kwargs}
for stream_resp in completion_with_retry(self, self.use_retry, run_manager=
run_manager, stop=stop, **params):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
params = {'model': self.model, 'prompt': prompt, 'stream': True, **self
.model_kwargs}
for stream_resp in completion_with_retry(self, self.use_retry,
run_manager=run_manager, stop=stop, **params):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| null |
on_chain_start
|
"""On chain start, do nothing."""
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""On chain start, do nothing."""
|
On chain start, do nothing.
|
call_actor_task
|
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
task_call = self.apify_client.task(task_id).call(task_input=task_input,
build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs)
return ApifyDatasetLoader(dataset_id=task_call['defaultDatasetId'],
dataset_mapping_function=dataset_mapping_function)
|
def call_actor_task(self, task_id: str, task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document], *, build:
Optional[str]=None, memory_mbytes: Optional[int]=None, timeout_secs:
Optional[int]=None) ->'ApifyDatasetLoader':
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain_community.document_loaders import ApifyDatasetLoader
task_call = self.apify_client.task(task_id).call(task_input=task_input,
build=build, memory_mbytes=memory_mbytes, timeout_secs=timeout_secs)
return ApifyDatasetLoader(dataset_id=task_call['defaultDatasetId'],
dataset_mapping_function=dataset_mapping_function)
|
Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
|
_ensure_cache_exists
|
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(
f'Unexpected response cache creation: {create_cache_response}')
|
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str
) ->None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(
f'Unexpected response cache creation: {create_cache_response}')
|
Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
|
aconfig_with_context
|
"""Asynchronously patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
"""
return _config_with_context(config, steps, _asetter, _agetter, asyncio.Event)
|
def aconfig_with_context(config: RunnableConfig, steps: List[Runnable]
) ->RunnableConfig:
"""Asynchronously patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
"""
return _config_with_context(config, steps, _asetter, _agetter, asyncio.
Event)
|
Asynchronously patch a runnable config with context getters and setters.
Args:
config: The runnable config.
steps: The runnable steps.
Returns:
The patched runnable config.
|
test_chat_google_genai_stream
|
"""Test streaming tokens from Gemini."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
for token in llm.stream("This is a test. Say 'foo'"):
assert isinstance(token.content, str)
|
def test_chat_google_genai_stream() ->None:
"""Test streaming tokens from Gemini."""
llm = ChatGoogleGenerativeAI(model=_MODEL)
for token in llm.stream("This is a test. Say 'foo'"):
assert isinstance(token.content, str)
|
Test streaming tokens from Gemini.
|
client_options
|
from google.api_core.client_options import ClientOptions
return ClientOptions(api_endpoint=
f'{self.location_id}-discoveryengine.googleapis.com' if self.
location_id != 'global' else None)
|
@property
def client_options(self) ->'ClientOptions':
from google.api_core.client_options import ClientOptions
return ClientOptions(api_endpoint=
f'{self.location_id}-discoveryengine.googleapis.com' if self.
location_id != 'global' else None)
| null |
get_parser
|
return TextParser()
|
@staticmethod
def get_parser(**kwargs: Any) ->BaseBlobParser:
return TextParser()
| null |
fake_self_query_retriever
|
return SelfQueryRetriever.from_llm(llm=fake_llm, vectorstore=
fake_vectorstore, document_contents='test', metadata_field_info=[
AttributeInfo(name='foo', type='string', description='test')],
structured_query_translator=FakeTranslator())
|
@pytest.fixture()
def fake_self_query_retriever(fake_llm: FakeLLM, fake_vectorstore:
InMemoryVectorstoreWithSearch) ->SelfQueryRetriever:
return SelfQueryRetriever.from_llm(llm=fake_llm, vectorstore=
fake_vectorstore, document_contents='test', metadata_field_info=[
AttributeInfo(name='foo', type='string', description='test')],
structured_query_translator=FakeTranslator())
| null |
test_output_message
|
runnable = RunnableLambda(lambda input: AIMessage(content='you said: ' +
'\n'.join([str(m.content) for m in input['history'] if isinstance(m,
HumanMessage)] + [input['input']])))
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
input_messages_key='input', history_messages_key='history')
config: RunnableConfig = {'configurable': {'session_id': '4'}}
output = with_history.invoke({'input': 'hello'}, config)
assert output == AIMessage(content='you said: hello')
output = with_history.invoke({'input': 'good bye'}, config)
assert output == AIMessage(content="""you said: hello
good bye""")
|
def test_output_message() ->None:
runnable = RunnableLambda(lambda input: AIMessage(content='you said: ' +
'\n'.join([str(m.content) for m in input['history'] if isinstance(m,
HumanMessage)] + [input['input']])))
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
input_messages_key='input', history_messages_key='history')
config: RunnableConfig = {'configurable': {'session_id': '4'}}
output = with_history.invoke({'input': 'hello'}, config)
assert output == AIMessage(content='you said: hello')
output = with_history.invoke({'input': 'good bye'}, config)
assert output == AIMessage(content='you said: hello\ngood bye')
| null |
test_to_document
|
"""Test to_document method."""
hashed_document = _HashedDocument(page_content='Lorem ipsum dolor sit amet',
metadata={'key': 'value'})
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == 'Lorem ipsum dolor sit amet'
assert doc.metadata == {'key': 'value'}
|
def test_to_document() ->None:
"""Test to_document method."""
hashed_document = _HashedDocument(page_content=
'Lorem ipsum dolor sit amet', metadata={'key': 'value'})
doc = hashed_document.to_document()
assert isinstance(doc, Document)
assert doc.page_content == 'Lorem ipsum dolor sit amet'
assert doc.metadata == {'key': 'value'}
|
Test to_document method.
|
mock_lakefs_client_no_presign_local
|
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'local:///physical_address_bla')]
mock_lakefs_client.return_value.is_presign_supported.return_value = False
yield mock_lakefs_client.return_value
|
@pytest.fixture
def mock_lakefs_client_no_presign_local() ->Any:
with patch('langchain_community.document_loaders.lakefs.LakeFSClient'
) as mock_lakefs_client:
mock_lakefs_client.return_value.ls_objects.return_value = [(
'path_bla.txt', 'local:///physical_address_bla')]
(mock_lakefs_client.return_value.is_presign_supported.return_value
) = False
yield mock_lakefs_client.return_value
| null |
_run
|
"""Use the tool."""
query_params = {'file_url': query, 'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
query_params = {'file_url': query, 'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
Use the tool.
|
_get_embeddings
|
return AzureOpenAIEmbeddings(azure_deployment=DEPLOYMENT_NAME, api_version=
OPENAI_API_VERSION, openai_api_base=OPENAI_API_BASE, openai_api_key=
OPENAI_API_KEY, **kwargs)
|
def _get_embeddings(**kwargs: Any) ->AzureOpenAIEmbeddings:
return AzureOpenAIEmbeddings(azure_deployment=DEPLOYMENT_NAME,
api_version=OPENAI_API_VERSION, openai_api_base=OPENAI_API_BASE,
openai_api_key=OPENAI_API_KEY, **kwargs)
| null |
create
|
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
es = Elasticsearch(elasticsearch_url)
settings = {'analysis': {'analyzer': {'default': {'type': 'standard'}}},
'similarity': {'custom_bm25': {'type': 'BM25', 'k1': k1, 'b': b}}}
mappings = {'properties': {'content': {'type': 'text', 'similarity':
'custom_bm25'}}}
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
|
@classmethod
def create(cls, elasticsearch_url: str, index_name: str, k1: float=2.0, b:
float=0.75) ->ElasticSearchBM25Retriever:
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
es = Elasticsearch(elasticsearch_url)
settings = {'analysis': {'analyzer': {'default': {'type': 'standard'}}},
'similarity': {'custom_bm25': {'type': 'BM25', 'k1': k1, 'b': b}}}
mappings = {'properties': {'content': {'type': 'text', 'similarity':
'custom_bm25'}}}
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
|
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
|
convert_message_to_dict
|
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
if message_dict['content'] == '':
message_dict['content'] = None
if 'tool_calls' in message.additional_kwargs:
message_dict['tool_calls'] = message.additional_kwargs['tool_calls']
if message_dict['content'] == '':
message_dict['content'] = None
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content, 'name':
message.name}
elif isinstance(message, ToolMessage):
message_dict = {'role': 'tool', 'content': message.content,
'tool_call_id': message.tool_call_id}
else:
raise TypeError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict
|
def convert_message_to_dict(message: BaseMessage) ->dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
if message_dict['content'] == '':
message_dict['content'] = None
if 'tool_calls' in message.additional_kwargs:
message_dict['tool_calls'] = message.additional_kwargs['tool_calls'
]
if message_dict['content'] == '':
message_dict['content'] = None
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content,
'name': message.name}
elif isinstance(message, ToolMessage):
message_dict = {'role': 'tool', 'content': message.content,
'tool_call_id': message.tool_call_id}
else:
raise TypeError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict
|
Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
|
__mod__
|
"""Create a RedisText "LIKE" filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") % "engine*" # suffix wild card match
>>> filter = RedisText("job") % "%%engine%%" # fuzzy match w/ LD
>>> filter = RedisText("job") % "engineer|doctor" # contains either term
>>> filter = RedisText("job") % "engineer doctor" # contains both terms
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LIKE)
return RedisFilterExpression(str(self))
|
def __mod__(self, other: str) ->'RedisFilterExpression':
"""Create a RedisText "LIKE" filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") % "engine*" # suffix wild card match
>>> filter = RedisText("job") % "%%engine%%" # fuzzy match w/ LD
>>> filter = RedisText("job") % "engineer|doctor" # contains either term
>>> filter = RedisText("job") % "engineer doctor" # contains both terms
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LIKE)
return RedisFilterExpression(str(self))
|
Create a RedisText "LIKE" filter expression.
Args:
other (str): The text value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisText
>>> filter = RedisText("job") % "engine*" # suffix wild card match
>>> filter = RedisText("job") % "%%engine%%" # fuzzy match w/ LD
>>> filter = RedisText("job") % "engineer|doctor" # contains either term
>>> filter = RedisText("job") % "engineer doctor" # contains both terms
|
get_or_create
|
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
|
@classmethod
def get_or_create(cls, session: Session, name: str, cmetadata: Optional[
dict]=None) ->Tuple['CollectionStore', bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
|
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
|
_import_shell_tool
|
from langchain_community.tools.shell.tool import ShellTool
return ShellTool
|
def _import_shell_tool() ->Any:
from langchain_community.tools.shell.tool import ShellTool
return ShellTool
| null |
invoke
|
return self._call_with_config(self._invoke, input, config, **kwargs)
|
def invoke(self, input: Dict[str, Any], config: Optional[RunnableConfig]=
None, **kwargs: Any) ->Dict[str, Any]:
return self._call_with_config(self._invoke, input, config, **kwargs)
| null |
__init__
|
"""Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
"""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
|
def __init__(self, api_token: str, urls: List[str], continue_on_failure:
bool=True):
"""Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
"""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
|
Initialize with API token, ids, and key.
Args:
api_token: Diffbot API token.
urls: List of URLs to load.
continue_on_failure: Whether to continue loading other URLs if one fails.
Defaults to True.
|
_import_analyticdb
|
from langchain_community.vectorstores.analyticdb import AnalyticDB
return AnalyticDB
|
def _import_analyticdb() ->Any:
from langchain_community.vectorstores.analyticdb import AnalyticDB
return AnalyticDB
| null |
get_video_captions_location
|
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/captions/' + query)
return response.json()
|
def get_video_captions_location(self, query: str) ->str:
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/captions/' + query)
return response.json()
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.