method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
validate_client
|
"""Validate that api key and python package exist in environment."""
values['endpoint_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'endpoint_api_key', 'AZUREML_ENDPOINT_API_KEY'))
endpoint_url = get_from_dict_or_env(values, 'endpoint_url',
'AZUREML_ENDPOINT_URL')
http_client = AzureMLEndpointClient(endpoint_url, values['endpoint_api_key'
].get_secret_value())
return http_client
|
@validator('http_client', always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict
) ->AzureMLEndpointClient:
"""Validate that api key and python package exist in environment."""
values['endpoint_api_key'] = convert_to_secret_str(get_from_dict_or_env
(values, 'endpoint_api_key', 'AZUREML_ENDPOINT_API_KEY'))
endpoint_url = get_from_dict_or_env(values, 'endpoint_url',
'AZUREML_ENDPOINT_URL')
http_client = AzureMLEndpointClient(endpoint_url, values[
'endpoint_api_key'].get_secret_value())
return http_client
|
Validate that api key and python package exist in environment.
|
_client_params
|
"""Get the parameters used for the konko client."""
return {**self._default_params}
|
@property
def _client_params(self) ->Dict[str, Any]:
"""Get the parameters used for the konko client."""
return {**self._default_params}
|
Get the parameters used for the konko client.
|
check_intervention_is_valid
|
valid_names = [e.name for e in values['causal_operations'].entities]
for setting in values['intervention'].entity_settings:
if setting.name not in valid_names:
error_msg = f"""
Hypothetical question has an invalid entity name.
`{setting.name}` not in `{valid_names}`
"""
raise ValueError(error_msg)
return values
|
@root_validator
def check_intervention_is_valid(cls, values: dict) ->dict:
valid_names = [e.name for e in values['causal_operations'].entities]
for setting in values['intervention'].entity_settings:
if setting.name not in valid_names:
error_msg = f"""
Hypothetical question has an invalid entity name.
`{setting.name}` not in `{valid_names}`
"""
raise ValueError(error_msg)
return values
| null |
save
|
pass
|
def save(self) ->None:
pass
| null |
_on_llm_new_token
|
"""Process new LLM token."""
index = self._key_map_by_run_id.get(run.id)
if index is None:
return
self.send_stream.send_nowait(RunLogPatch({'op': 'add', 'path':
f'/logs/{index}/streamed_output_str/-', 'value': token}, {'op': 'add',
'path': f'/logs/{index}/streamed_output/-', 'value': chunk.message if
isinstance(chunk, ChatGenerationChunk) else token}))
|
def _on_llm_new_token(self, run: Run, token: str, chunk: Optional[Union[
GenerationChunk, ChatGenerationChunk]]) ->None:
"""Process new LLM token."""
index = self._key_map_by_run_id.get(run.id)
if index is None:
return
self.send_stream.send_nowait(RunLogPatch({'op': 'add', 'path':
f'/logs/{index}/streamed_output_str/-', 'value': token}, {'op':
'add', 'path': f'/logs/{index}/streamed_output/-', 'value': chunk.
message if isinstance(chunk, ChatGenerationChunk) else token}))
|
Process new LLM token.
|
parse_array
|
parsed_array: List[Union[int, str]] = []
if re.match('\\[\\d+(,\\s*\\d+)*\\]', array):
parsed_array = [int(i) for i in re.findall('\\d+', array)]
elif re.match('\\[(\\d+)\\.\\.(\\d+)\\]', array):
match = re.match('\\[(\\d+)\\.\\.(\\d+)\\]', array)
if match:
start, end = map(int, match.groups())
parsed_array = list(range(start, end + 1))
else:
raise OutputParserException(
f'Unable to parse the array provided in {array}. Please check the format instructions.'
)
elif re.match('\\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\\]', array):
match = re.match('\\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\\]', array)
if match:
parsed_array = list(map(str, match.group().strip('[]').split(',')))
else:
raise OutputParserException(
f'Unable to parse the array provided in {array}. Please check the format instructions.'
)
if not parsed_array:
raise OutputParserException(
f"Invalid array format in '{original_request_params}'. Please check the format instructions."
)
elif isinstance(parsed_array[0], int) and parsed_array[-1
] > self.dataframe.index.max():
raise OutputParserException(
f'The maximum index {parsed_array[-1]} exceeds the maximum index of the Pandas DataFrame {self.dataframe.index.max()}.'
)
return parsed_array, original_request_params.split('[')[0]
|
def parse_array(self, array: str, original_request_params: str) ->Tuple[
List[Union[int, str]], str]:
parsed_array: List[Union[int, str]] = []
if re.match('\\[\\d+(,\\s*\\d+)*\\]', array):
parsed_array = [int(i) for i in re.findall('\\d+', array)]
elif re.match('\\[(\\d+)\\.\\.(\\d+)\\]', array):
match = re.match('\\[(\\d+)\\.\\.(\\d+)\\]', array)
if match:
start, end = map(int, match.groups())
parsed_array = list(range(start, end + 1))
else:
raise OutputParserException(
f'Unable to parse the array provided in {array}. Please check the format instructions.'
)
elif re.match('\\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\\]', array):
match = re.match('\\[[a-zA-Z0-9_]+(?:,[a-zA-Z0-9_]+)*\\]', array)
if match:
parsed_array = list(map(str, match.group().strip('[]').split(',')))
else:
raise OutputParserException(
f'Unable to parse the array provided in {array}. Please check the format instructions.'
)
if not parsed_array:
raise OutputParserException(
f"Invalid array format in '{original_request_params}'. Please check the format instructions."
)
elif isinstance(parsed_array[0], int) and parsed_array[-1
] > self.dataframe.index.max():
raise OutputParserException(
f'The maximum index {parsed_array[-1]} exceeds the maximum index of the Pandas DataFrame {self.dataframe.index.max()}.'
)
return parsed_array, original_request_params.split('[')[0]
| null |
mock_quip
|
with patch('quip_api.quip.QuipClient') as mock_quip:
yield mock_quip
|
@pytest.fixture
def mock_quip():
with patch('quip_api.quip.QuipClient') as mock_quip:
yield mock_quip
| null |
_import_edenai_EdenAiSpeechToTextTool
|
from langchain_community.tools.edenai import EdenAiSpeechToTextTool
return EdenAiSpeechToTextTool
|
def _import_edenai_EdenAiSpeechToTextTool() ->Any:
from langchain_community.tools.edenai import EdenAiSpeechToTextTool
return EdenAiSpeechToTextTool
| null |
_get_lambda_api
|
return Tool(name=kwargs['awslambda_tool_name'], description=kwargs[
'awslambda_tool_description'], func=LambdaWrapper(**kwargs).run)
|
def _get_lambda_api(**kwargs: Any) ->BaseTool:
return Tool(name=kwargs['awslambda_tool_name'], description=kwargs[
'awslambda_tool_description'], func=LambdaWrapper(**kwargs).run)
| null |
_get_relevant_documents
|
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = self.llm.generate(messages, connectors=self.connectors, callbacks=
run_manager.get_child(), **kwargs).generations[0][0]
return _get_docs(res)
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]:
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = self.llm.generate(messages, connectors=self.connectors, callbacks
=run_manager.get_child(), **kwargs).generations[0][0]
return _get_docs(res)
| null |
_chat_with_retry
|
"""
Executes a chat generation method with retry logic using tenacity.
This function is a wrapper that applies a retry mechanism to a provided
chat generation function. It is useful for handling intermittent issues
like network errors or temporary service unavailability.
Args:
generation_method (Callable): The chat generation method to be executed.
**kwargs (Any): Additional keyword arguments to pass to the generation method.
Returns:
Any: The result from the chat generation method.
"""
retry_decorator = _create_retry_decorator()
from google.api_core.exceptions import InvalidArgument
@retry_decorator
def _chat_with_retry(**kwargs: Any) ->Any:
try:
return generation_method(**kwargs)
except InvalidArgument as e:
raise ChatGoogleGenerativeAIError(
f'Invalid argument provided to Gemini: {e}') from e
except Exception as e:
raise e
return _chat_with_retry(**kwargs)
|
def _chat_with_retry(generation_method: Callable, **kwargs: Any) ->Any:
"""
Executes a chat generation method with retry logic using tenacity.
This function is a wrapper that applies a retry mechanism to a provided
chat generation function. It is useful for handling intermittent issues
like network errors or temporary service unavailability.
Args:
generation_method (Callable): The chat generation method to be executed.
**kwargs (Any): Additional keyword arguments to pass to the generation method.
Returns:
Any: The result from the chat generation method.
"""
retry_decorator = _create_retry_decorator()
from google.api_core.exceptions import InvalidArgument
@retry_decorator
def _chat_with_retry(**kwargs: Any) ->Any:
try:
return generation_method(**kwargs)
except InvalidArgument as e:
raise ChatGoogleGenerativeAIError(
f'Invalid argument provided to Gemini: {e}') from e
except Exception as e:
raise e
return _chat_with_retry(**kwargs)
|
Executes a chat generation method with retry logic using tenacity.
This function is a wrapper that applies a retry mechanism to a provided
chat generation function. It is useful for handling intermittent issues
like network errors or temporary service unavailability.
Args:
generation_method (Callable): The chat generation method to be executed.
**kwargs (Any): Additional keyword arguments to pass to the generation method.
Returns:
Any: The result from the chat generation method.
|
test_extract_functions_classes
|
segmenter = PythonSegmenter(self.example_code)
extracted_code = segmenter.extract_functions_classes()
self.assertEqual(extracted_code, self.expected_extracted_code)
|
def test_extract_functions_classes(self) ->None:
segmenter = PythonSegmenter(self.example_code)
extracted_code = segmenter.extract_functions_classes()
self.assertEqual(extracted_code, self.expected_extracted_code)
| null |
__str__
|
return f'ContextGet({_print_keys(self.key)})'
|
def __str__(self) ->str:
return f'ContextGet({_print_keys(self.key)})'
| null |
_run
|
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
q = calendar.new_query('start').greater_equal(start_datetime_query)
q.chain('and').on_attribute('end').less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results
)
output_events = []
for event in events:
output_event = {}
output_event['organizer'] = event.organizer
output_event['subject'] = event.subject
if truncate:
output_event['body'] = clean_body(event.body)[:truncate_limit]
else:
output_event['body'] = clean_body(event.body)
time_zone = start_datetime_query.tzinfo
output_event['start_datetime'] = event.start.astimezone(time_zone
).strftime(UTC_FORMAT)
output_event['end_datetime'] = event.end.astimezone(time_zone).strftime(
UTC_FORMAT)
output_event['modified_date'] = event.modified.astimezone(time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
|
def _run(self, start_datetime: str, end_datetime: str, max_results: int=10,
truncate: bool=True, run_manager: Optional[CallbackManagerForToolRun]=
None, truncate_limit: int=150) ->List[Dict[str, Any]]:
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
q = calendar.new_query('start').greater_equal(start_datetime_query)
q.chain('and').on_attribute('end').less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=
max_results)
output_events = []
for event in events:
output_event = {}
output_event['organizer'] = event.organizer
output_event['subject'] = event.subject
if truncate:
output_event['body'] = clean_body(event.body)[:truncate_limit]
else:
output_event['body'] = clean_body(event.body)
time_zone = start_datetime_query.tzinfo
output_event['start_datetime'] = event.start.astimezone(time_zone
).strftime(UTC_FORMAT)
output_event['end_datetime'] = event.end.astimezone(time_zone
).strftime(UTC_FORMAT)
output_event['modified_date'] = event.modified.astimezone(time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
| null |
_format_headers
|
"""Format headers for requests."""
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
if self.zapier_nla_oauth_access_token:
headers.update({'Authorization':
f'Bearer {self.zapier_nla_oauth_access_token}'})
else:
headers.update({'X-API-Key': self.zapier_nla_api_key})
return headers
|
def _format_headers(self) ->Dict[str, str]:
"""Format headers for requests."""
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'
}
if self.zapier_nla_oauth_access_token:
headers.update({'Authorization':
f'Bearer {self.zapier_nla_oauth_access_token}'})
else:
headers.update({'X-API-Key': self.zapier_nla_api_key})
return headers
|
Format headers for requests.
|
_import_e2b_data_analysis
|
from langchain_community.tools.e2b_data_analysis.tool import E2BDataAnalysisTool
return E2BDataAnalysisTool
|
def _import_e2b_data_analysis() ->Any:
from langchain_community.tools.e2b_data_analysis.tool import E2BDataAnalysisTool
return E2BDataAnalysisTool
| null |
validate_environment
|
"""Validate that the python package exists in environment."""
try:
from semanticscholar import SemanticScholar
sch = SemanticScholar()
values['semanticscholar_search'] = sch.search_paper
except ImportError:
raise ImportError(
'Could not import Semanticscholar python package. Please install it with `pip install semanticscholar`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in environment."""
try:
from semanticscholar import SemanticScholar
sch = SemanticScholar()
values['semanticscholar_search'] = sch.search_paper
except ImportError:
raise ImportError(
'Could not import Semanticscholar python package. Please install it with `pip install semanticscholar`.'
)
return values
|
Validate that the python package exists in environment.
|
_to_chat_result
|
chat_generations = []
for g in llm_result.generations[0]:
chat_generation = ChatGeneration(message=AIMessage(content=g.text),
generation_info=g.generation_info)
chat_generations.append(chat_generation)
return ChatResult(generations=chat_generations, llm_output=llm_result.
llm_output)
|
@staticmethod
def _to_chat_result(llm_result: LLMResult) ->ChatResult:
chat_generations = []
for g in llm_result.generations[0]:
chat_generation = ChatGeneration(message=AIMessage(content=g.text),
generation_info=g.generation_info)
chat_generations.append(chat_generation)
return ChatResult(generations=chat_generations, llm_output=llm_result.
llm_output)
| null |
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query with distance.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs_with_scores: List[Tuple[Document, float]] = []
for id, score in zip(matches.keys, matches.distances):
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {id}, got {doc}')
docs_with_scores.append((doc, score))
return docs_with_scores
|
def similarity_search_with_score(self, query: str, k: int=4) ->List[Tuple[
Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query with distance.
"""
query_embedding = self.embedding.embed_query(query)
matches = self.index.search(np.array(query_embedding), k)
docs_with_scores: List[Tuple[Document, float]] = []
for id, score in zip(matches.keys, matches.distances):
doc = self.docstore.search(str(id))
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {id}, got {doc}')
docs_with_scores.append((doc, score))
return docs_with_scores
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query with distance.
|
_merge_module_members
|
"""Merge module members."""
classes_: List[ClassInfo] = []
functions: List[FunctionInfo] = []
for module in module_members:
classes_.extend(module['classes_'])
functions.extend(module['functions'])
return ModuleMembers(classes_=classes_, functions=functions)
|
def _merge_module_members(module_members: Sequence[ModuleMembers]
) ->ModuleMembers:
"""Merge module members."""
classes_: List[ClassInfo] = []
functions: List[FunctionInfo] = []
for module in module_members:
classes_.extend(module['classes_'])
functions.extend(module['functions'])
return ModuleMembers(classes_=classes_, functions=functions)
|
Merge module members.
|
get_pipeline
|
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = 'facebook/bart-base'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline('feature-extraction', model=model, tokenizer=tokenizer)
|
def get_pipeline() ->Any:
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = 'facebook/bart-base'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline('feature-extraction', model=model, tokenizer=tokenizer)
|
Get pipeline for testing.
|
get_table_info
|
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return 'No (valid) tables requested.'
tables_todo = self._get_tables_todo(tables_requested)
for table in tables_todo:
self._get_schema(table)
return self._get_schema_for_tables(tables_requested)
|
def get_table_info(self, table_names: Optional[Union[List[str], str]]=None
) ->str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return 'No (valid) tables requested.'
tables_todo = self._get_tables_todo(tables_requested)
for table in tables_todo:
self._get_schema(table)
return self._get_schema_for_tables(tables_requested)
|
Get information about specified tables.
|
_tracing_v2_is_enabled
|
return env_var_is_set('LANGCHAIN_TRACING_V2') or tracing_v2_callback_var.get(
) is not None or get_run_tree_context() is not None or env_var_is_set(
'LANGCHAIN_TRACING')
|
def _tracing_v2_is_enabled() ->bool:
return env_var_is_set('LANGCHAIN_TRACING_V2'
) or tracing_v2_callback_var.get() is not None or get_run_tree_context(
) is not None or env_var_is_set('LANGCHAIN_TRACING')
| null |
test_json_schema_evaluator_missing_property
|
prediction = '{"name": "John"}'
reference = {'type': 'object', 'properties': {'name': {'type': 'string'},
'age': {'type': 'integer'}}, 'required': ['name', 'age']}
result = json_schema_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
assert result['score'] is False
assert 'reasoning' in result
|
@pytest.mark.requires('jsonschema')
def test_json_schema_evaluator_missing_property(json_schema_evaluator:
JsonSchemaEvaluator) ->None:
prediction = '{"name": "John"}'
reference = {'type': 'object', 'properties': {'name': {'type': 'string'
}, 'age': {'type': 'integer'}}, 'required': ['name', 'age']}
result = json_schema_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
assert result['score'] is False
assert 'reasoning' in result
| null |
run_as_str
|
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
|
def run_as_str(self, *args, **kwargs) ->str:
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
|
Same as run, but returns a stringified version of the JSON for
insertting back into an LLM.
|
default_get_input
|
"""Return the compression chain input."""
return {'question': query, 'context': doc.page_content}
|
def default_get_input(query: str, doc: Document) ->Dict[str, Any]:
"""Return the compression chain input."""
return {'question': query, 'context': doc.page_content}
|
Return the compression chain input.
|
delete
|
"""Delete by vector ids.
Args:
ids (Optional[List[str]]): List of ids to delete.
concurrency (Optional[int]): max number of threads issuing
single-doc delete requests. Defaults to instance-level setting.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if kwargs:
warnings.warn(
f"Method 'delete' of AstraDB vector store invoked with unsupported arguments ({', '.join(sorted(kwargs.keys()))}), which will be ignored."
)
if ids is None:
raise ValueError('No ids provided to delete.')
_max_workers = concurrency or self.bulk_delete_concurrency
with ThreadPoolExecutor(max_workers=_max_workers) as tpe:
_ = list(tpe.map(self.delete_by_document_id, ids))
return True
|
def delete(self, ids: Optional[List[str]]=None, concurrency: Optional[int]=
None, **kwargs: Any) ->Optional[bool]:
"""Delete by vector ids.
Args:
ids (Optional[List[str]]): List of ids to delete.
concurrency (Optional[int]): max number of threads issuing
single-doc delete requests. Defaults to instance-level setting.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if kwargs:
warnings.warn(
f"Method 'delete' of AstraDB vector store invoked with unsupported arguments ({', '.join(sorted(kwargs.keys()))}), which will be ignored."
)
if ids is None:
raise ValueError('No ids provided to delete.')
_max_workers = concurrency or self.bulk_delete_concurrency
with ThreadPoolExecutor(max_workers=_max_workers) as tpe:
_ = list(tpe.map(self.delete_by_document_id, ids))
return True
|
Delete by vector ids.
Args:
ids (Optional[List[str]]): List of ids to delete.
concurrency (Optional[int]): max number of threads issuing
single-doc delete requests. Defaults to instance-level setting.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
|
_run
|
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result['channels']
filtered_result = [{key: channel[key] for key in ('id', 'name',
'created', 'num_members')} for channel in channels if 'id' in
channel and 'name' in channel and 'created' in channel and
'num_members' in channel]
return json.dumps(filtered_result)
except Exception as e:
return 'Error creating conversation: {}'.format(e)
|
def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result['channels']
filtered_result = [{key: channel[key] for key in ('id', 'name',
'created', 'num_members')} for channel in channels if 'id' in
channel and 'name' in channel and 'created' in channel and
'num_members' in channel]
return json.dumps(filtered_result)
except Exception as e:
return 'Error creating conversation: {}'.format(e)
| null |
__init__
|
"""Initialize with urls or OPML."""
if (urls is None) == (opml is None):
raise ValueError(
'Provide either the urls or the opml argument, but not both.')
self.urls = urls
self.opml = opml
self.continue_on_failure = continue_on_failure
self.show_progress_bar = show_progress_bar
self.newsloader_kwargs = newsloader_kwargs
|
def __init__(self, urls: Optional[Sequence[str]]=None, opml: Optional[str]=
None, continue_on_failure: bool=True, show_progress_bar: bool=False, **
newsloader_kwargs: Any) ->None:
"""Initialize with urls or OPML."""
if (urls is None) == (opml is None):
raise ValueError(
'Provide either the urls or the opml argument, but not both.')
self.urls = urls
self.opml = opml
self.continue_on_failure = continue_on_failure
self.show_progress_bar = show_progress_bar
self.newsloader_kwargs = newsloader_kwargs
|
Initialize with urls or OPML.
|
test_error
|
"""Test question that raises error."""
with pytest.raises(ValueError):
fake_llm_math_chain.run('foo')
|
@pytest.mark.requires('numexpr')
def test_error(fake_llm_math_chain: LLMMathChain) ->None:
"""Test question that raises error."""
with pytest.raises(ValueError):
fake_llm_math_chain.run('foo')
|
Test question that raises error.
|
convert_message_chunk_to_message
|
if isinstance(message_chunk, HumanMessageChunk):
return HumanMessage(content=message_chunk.content)
elif isinstance(message_chunk, AIMessageChunk):
return AIMessage(content=message_chunk.content)
elif isinstance(message_chunk, SystemMessageChunk):
return SystemMessage(content=message_chunk.content)
elif isinstance(message_chunk, ChatMessageChunk):
return ChatMessage(role=message_chunk.role, content=message_chunk.content)
else:
raise TypeError(f'Got unknown type {message_chunk}')
|
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk
) ->BaseMessage:
if isinstance(message_chunk, HumanMessageChunk):
return HumanMessage(content=message_chunk.content)
elif isinstance(message_chunk, AIMessageChunk):
return AIMessage(content=message_chunk.content)
elif isinstance(message_chunk, SystemMessageChunk):
return SystemMessage(content=message_chunk.content)
elif isinstance(message_chunk, ChatMessageChunk):
return ChatMessage(role=message_chunk.role, content=message_chunk.
content)
else:
raise TypeError(f'Got unknown type {message_chunk}')
| null |
_Nonlocal
|
self.fill('nonlocal ')
interleave(lambda : self.write(', '), self.write, t.names)
|
def _Nonlocal(self, t):
self.fill('nonlocal ')
interleave(lambda : self.write(', '), self.write, t.names)
| null |
test_jinachat_system_message
|
"""Test JinaChat wrapper with system message."""
chat = JinaChat(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_jinachat_system_message() ->None:
"""Test JinaChat wrapper with system message."""
chat = JinaChat(max_tokens=10)
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test JinaChat wrapper with system message.
|
_get_relevant_documents
|
import requests
headers = {'X-API-Key': self.ydc_api_key}
if self.endpoint_type == 'web':
results = requests.get(f'https://api.ydc-index.io/search?query={query}',
headers=headers).json()
docs = []
n_hits = self.n_hits or len(results['hits'])
for hit in results['hits'][:n_hits]:
n_snippets_per_hit = self.n_snippets_per_hit or len(hit['snippets'])
for snippet in hit['snippets'][:n_snippets_per_hit]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == 'snippet':
results = requests.get(
f'https://api.ydc-index.io/snippet_search?query={query}', headers=
headers).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f'Invalid endpoint type provided {self.endpoint_type}')
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
import requests
headers = {'X-API-Key': self.ydc_api_key}
if self.endpoint_type == 'web':
results = requests.get(f'https://api.ydc-index.io/search?query={query}'
, headers=headers).json()
docs = []
n_hits = self.n_hits or len(results['hits'])
for hit in results['hits'][:n_hits]:
n_snippets_per_hit = self.n_snippets_per_hit or len(hit['snippets']
)
for snippet in hit['snippets'][:n_snippets_per_hit]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == 'snippet':
results = requests.get(
f'https://api.ydc-index.io/snippet_search?query={query}',
headers=headers).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(
f'Invalid endpoint type provided {self.endpoint_type}')
| null |
delete
|
"""Delete by Zep vector UUIDs.
Parameters
----------
ids : Optional[List[str]]
The UUIDs of the vectors to delete.
Raises
------
ValueError
If no UUIDs are provided.
"""
if ids is None or len(ids) == 0:
raise ValueError('No uuids provided to delete.')
if self._collection is None:
raise ValueError('No collection name provided.')
for u in ids:
self._collection.delete_document(u)
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None:
"""Delete by Zep vector UUIDs.
Parameters
----------
ids : Optional[List[str]]
The UUIDs of the vectors to delete.
Raises
------
ValueError
If no UUIDs are provided.
"""
if ids is None or len(ids) == 0:
raise ValueError('No uuids provided to delete.')
if self._collection is None:
raise ValueError('No collection name provided.')
for u in ids:
self._collection.delete_document(u)
|
Delete by Zep vector UUIDs.
Parameters
----------
ids : Optional[List[str]]
The UUIDs of the vectors to delete.
Raises
------
ValueError
If no UUIDs are provided.
|
_chain_type
|
return 'llm_checker_chain'
|
@property
def _chain_type(self) ->str:
return 'llm_checker_chain'
| null |
dict
|
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict['_type'] = self._llm_type
return starter_dict
|
def dict(self, **kwargs: Any) ->Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict['_type'] = self._llm_type
return starter_dict
|
Return a dictionary of the LLM.
|
__init__
|
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ImportError(
'Could not import xata python package. Please install it with `pip install xata`.'
)
self._client = XataClient(api_key=api_key, db_url=db_url)
self._embedding: Embeddings = embedding
self._table_name = table_name or 'vectors'
|
def __init__(self, api_key: str, db_url: str, embedding: Embeddings,
table_name: str) ->None:
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ImportError(
'Could not import xata python package. Please install it with `pip install xata`.'
)
self._client = XataClient(api_key=api_key, db_url=db_url)
self._embedding: Embeddings = embedding
self._table_name = table_name or 'vectors'
|
Initialize with Xata client.
|
test_caching
|
"""Test caching behavior."""
set_llm_cache(InMemoryCache())
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['foo', 'bar', 'foo'])
expected_cache_output = [Generation(text='foo')]
cache_output = get_llm_cache().lookup('bar', llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [[Generation(text='fizz')], [Generation(text='foo')],
[Generation(text='fizz')]]
expected_output = LLMResult(generations=expected_generations, llm_output=None)
assert output == expected_output
|
def test_caching() ->None:
"""Test caching behavior."""
set_llm_cache(InMemoryCache())
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update('foo', llm_string, [Generation(text='fizz')])
output = llm.generate(['foo', 'bar', 'foo'])
expected_cache_output = [Generation(text='foo')]
cache_output = get_llm_cache().lookup('bar', llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [[Generation(text='fizz')], [Generation(text=
'foo')], [Generation(text='fizz')]]
expected_output = LLMResult(generations=expected_generations,
llm_output=None)
assert output == expected_output
|
Test caching behavior.
|
_evaluate_strings
|
"""Evaluate the embedding distance between a prediction and
reference.
Args:
prediction (str): The output string from the first model.
reference (str): The reference string (required)
callbacks (Callbacks, optional): The callbacks to use.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- score: The embedding distance between the two
predictions.
"""
result = self(inputs={'prediction': prediction, 'reference': reference},
callbacks=callbacks, tags=tags, metadata=metadata, include_run_info=
include_run_info)
return self._prepare_output(result)
|
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]=
None, callbacks: Callbacks=None, tags: Optional[List[str]]=None,
metadata: Optional[Dict[str, Any]]=None, include_run_info: bool=False,
**kwargs: Any) ->dict:
"""Evaluate the embedding distance between a prediction and
reference.
Args:
prediction (str): The output string from the first model.
reference (str): The reference string (required)
callbacks (Callbacks, optional): The callbacks to use.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- score: The embedding distance between the two
predictions.
"""
result = self(inputs={'prediction': prediction, 'reference': reference},
callbacks=callbacks, tags=tags, metadata=metadata, include_run_info
=include_run_info)
return self._prepare_output(result)
|
Evaluate the embedding distance between a prediction and
reference.
Args:
prediction (str): The output string from the first model.
reference (str): The reference string (required)
callbacks (Callbacks, optional): The callbacks to use.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- score: The embedding distance between the two
predictions.
|
create_schema_from_function
|
"""Create a pydantic schema from a function's signature.
Args:
model_name: Name to assign to the generated pydandic schema
func: Function to generate the schema from
Returns:
A pydantic model with the same arguments as the function
"""
validated = validate_arguments(func, config=_SchemaConfig)
inferred_model = validated.model
if 'run_manager' in inferred_model.__fields__:
del inferred_model.__fields__['run_manager']
if 'callbacks' in inferred_model.__fields__:
del inferred_model.__fields__['callbacks']
valid_properties = _get_filtered_args(inferred_model, func)
return _create_subset_model(f'{model_name}Schema', inferred_model, list(
valid_properties))
|
def create_schema_from_function(model_name: str, func: Callable) ->Type[
BaseModel]:
"""Create a pydantic schema from a function's signature.
Args:
model_name: Name to assign to the generated pydandic schema
func: Function to generate the schema from
Returns:
A pydantic model with the same arguments as the function
"""
validated = validate_arguments(func, config=_SchemaConfig)
inferred_model = validated.model
if 'run_manager' in inferred_model.__fields__:
del inferred_model.__fields__['run_manager']
if 'callbacks' in inferred_model.__fields__:
del inferred_model.__fields__['callbacks']
valid_properties = _get_filtered_args(inferred_model, func)
return _create_subset_model(f'{model_name}Schema', inferred_model, list
(valid_properties))
|
Create a pydantic schema from a function's signature.
Args:
model_name: Name to assign to the generated pydandic schema
func: Function to generate the schema from
Returns:
A pydantic model with the same arguments as the function
|
_llm_type
|
"""Return type of llm."""
|
@property
@abstractmethod
def _llm_type(self) ->str:
"""Return type of llm."""
|
Return type of llm.
|
similarity_search_with_score
|
emb = self._embedding.embed_query(query)
docs_and_scores = self.similarity_search_with_score_by_vector(emb, k,
radius, epsilon, timeout, grpc_metadata)
return docs_and_scores
|
def similarity_search_with_score(self, query: str, k: int=4, radius: float=
-1.0, epsilon: float=0.01, timeout: int=3000000000, grpc_metadata:
Optional[Any]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
emb = self._embedding.embed_query(query)
docs_and_scores = self.similarity_search_with_score_by_vector(emb, k,
radius, epsilon, timeout, grpc_metadata)
return docs_and_scores
| null |
_generate_numbered_list
|
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list.
Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == 'command':
command_strings = [f'{i + 1}. {self._generate_command_string(item)}' for
i, item in enumerate(items)]
finish_description = (
'use this to signal that you have finished all your objectives')
finish_args = (
'"response": "final response to let people know you have finished your objectives"'
)
finish_string = (
f'{len(items) + 1}. {FINISH_NAME}: {finish_description}, args: {finish_args}'
)
return '\n'.join(command_strings + [finish_string])
else:
return '\n'.join(f'{i + 1}. {item}' for i, item in enumerate(items))
|
def _generate_numbered_list(self, items: list, item_type: str='list') ->str:
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list.
Defaults to 'list'.
Returns:
str: The formatted numbered list.
"""
if item_type == 'command':
command_strings = [f'{i + 1}. {self._generate_command_string(item)}'
for i, item in enumerate(items)]
finish_description = (
'use this to signal that you have finished all your objectives')
finish_args = (
'"response": "final response to let people know you have finished your objectives"'
)
finish_string = (
f'{len(items) + 1}. {FINISH_NAME}: {finish_description}, args: {finish_args}'
)
return '\n'.join(command_strings + [finish_string])
else:
return '\n'.join(f'{i + 1}. {item}' for i, item in enumerate(items))
|
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
item_type (str, optional): The type of items in the list.
Defaults to 'list'.
Returns:
str: The formatted numbered list.
|
get_format_instructions
|
return FORMAT_INSTRUCTIONS
|
def get_format_instructions(self) ->str:
return FORMAT_INSTRUCTIONS
| null |
visit_operation
|
args = [arg.accept(self) for arg in operation.arguments]
return {'operator': self._format_func(operation.operator), 'operands': args}
|
def visit_operation(self, operation: Operation) ->Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {'operator': self._format_func(operation.operator), 'operands': args
}
| null |
_parse_search_response
|
"""Parse the search response into a list of Documents with score."""
columns = [col['name'] for col in search_resp.get('manifest', dict()).get(
'columns', [])]
docs_with_score = []
for result in search_resp.get('result', dict()).get('data_array', []):
doc_id = result[columns.index(self.primary_key)]
text_content = result[columns.index(self.text_column)]
metadata = {col: value for col, value in zip(columns[:-1], result[:-1]) if
col not in [self.primary_key, self.text_column]}
metadata[self.primary_key] = doc_id
score = result[-1]
doc = Document(page_content=text_content, metadata=metadata)
docs_with_score.append((doc, score))
return docs_with_score
|
def _parse_search_response(self, search_resp: dict) ->List[Tuple[Document,
float]]:
"""Parse the search response into a list of Documents with score."""
columns = [col['name'] for col in search_resp.get('manifest', dict()).
get('columns', [])]
docs_with_score = []
for result in search_resp.get('result', dict()).get('data_array', []):
doc_id = result[columns.index(self.primary_key)]
text_content = result[columns.index(self.text_column)]
metadata = {col: value for col, value in zip(columns[:-1], result[:
-1]) if col not in [self.primary_key, self.text_column]}
metadata[self.primary_key] = doc_id
score = result[-1]
doc = Document(page_content=text_content, metadata=metadata)
docs_with_score.append((doc, score))
return docs_with_score
|
Parse the search response into a list of Documents with score.
|
test_octoai_endpoint_call_error
|
"""Test valid call to OctoAI that errors."""
llm = OctoAIEndpoint(endpoint_url=
'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate', model_kwargs={
'max_new_tokens': -1})
with pytest.raises(ValueError):
llm('Which state is Los Angeles in?')
|
def test_octoai_endpoint_call_error() ->None:
"""Test valid call to OctoAI that errors."""
llm = OctoAIEndpoint(endpoint_url=
'https://mpt-7b-demo-f1kzsig6xes9.octoai.run/generate',
model_kwargs={'max_new_tokens': -1})
with pytest.raises(ValueError):
llm('Which state is Los Angeles in?')
|
Test valid call to OctoAI that errors.
|
embed_documents
|
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, input_type='search_document')
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, input_type='search_document')
|
Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
test_ainetwork_toolkit
|
def get(path: str, type: str='value', default: Any=None) ->Any:
ref = ain.db.ref(path)
value = asyncio.run({'value': ref.getValue, 'rule': ref.getRule,
'owner': ref.getOwner}[type]())
return default if value is None else value
def validate(path: str, template: Any, type: str='value') ->bool:
value = get(path, type)
return Match.match(value, template)
if not os.environ.get('AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY', None):
from ain.account import Account
account = Account.create()
os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY'] = account.private_key
interface = authenticate(network='testnet')
toolkit = AINetworkToolkit(network='testnet', interface=interface)
llm = ChatOpenAI(model='gpt-4', temperature=0)
agent = initialize_agent(tools=toolkit.get_tools(), llm=llm, verbose=True,
agent=AgentType.OPENAI_FUNCTIONS)
ain = interface
self_address = ain.wallet.defaultAccount.address
co_address = '0x6813Eb9362372EEF6200f3b1dbC3f819671cBA69'
UUID = uuid.UUID(int=int(time.time() * 1000) << 64 | uuid.uuid4().int & (1 <<
64) - 1)
app_name = f"_langchain_test__{str(UUID).replace('-', '_')}"
agent.run(f'Create app {app_name}')
validate(f'/manage_app/{app_name}/config', {'admin': {self_address: True}})
validate(f'/apps/{app_name}/DB', None, 'owner')
agent.run(f'Read owner config of /apps/{app_name}/DB .')
assert ...
agent.run(
f'Grant owner authority to {co_address} for edit write rule permission of /apps/{app_name}/DB_co .'
)
validate(f'/apps/{app_name}/DB_co', {'.owner': {'owners': {co_address: {
'branch_owner': False, 'write_function': False, 'write_owner': False,
'write_rule': True}}}}, 'owner')
agent.run(f'Read owner config of /apps/{app_name}/DB_co .')
assert ...
agent.run(f'Read owner config of /apps/{app_name}/DB .')
assert ...
agent.run(f'Read value in /apps/{app_name}/DB')
assert ...
agent.run(f'Write value {{1: 1904, 2: 43}} in /apps/{app_name}/DB')
validate(f'/apps/{app_name}/DB', {(1): 1904, (2): 43})
agent.run(f'Read value in /apps/{app_name}/DB')
assert ...
agent.run(f'Read write rule of app {app_name} .')
assert ...
self_balance = get(f'/accounts/{self_address}/balance', default=0)
transaction_history = get(f'/transfer/{self_address}/{co_address}', default={})
if self_balance < 1:
try:
with urllib.request.urlopen(
f'http://faucet.ainetwork.ai/api/test/{self_address}/'
) as response:
try_test = response.getcode()
except HTTPError as e:
try_test = e.getcode()
else:
try_test = 200
if try_test == 200:
agent.run(f'Send 1 AIN to {co_address}')
transaction_update = get(f'/transfer/{self_address}/{co_address}',
default={})
assert any(transaction_update[key]['value'] == 1 for key in
transaction_update.keys() - transaction_history.keys())
|
@pytest.mark.requires('ain')
def test_ainetwork_toolkit() ->None:
def get(path: str, type: str='value', default: Any=None) ->Any:
ref = ain.db.ref(path)
value = asyncio.run({'value': ref.getValue, 'rule': ref.getRule,
'owner': ref.getOwner}[type]())
return default if value is None else value
def validate(path: str, template: Any, type: str='value') ->bool:
value = get(path, type)
return Match.match(value, template)
if not os.environ.get('AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY', None):
from ain.account import Account
account = Account.create()
os.environ['AIN_BLOCKCHAIN_ACCOUNT_PRIVATE_KEY'] = account.private_key
interface = authenticate(network='testnet')
toolkit = AINetworkToolkit(network='testnet', interface=interface)
llm = ChatOpenAI(model='gpt-4', temperature=0)
agent = initialize_agent(tools=toolkit.get_tools(), llm=llm, verbose=
True, agent=AgentType.OPENAI_FUNCTIONS)
ain = interface
self_address = ain.wallet.defaultAccount.address
co_address = '0x6813Eb9362372EEF6200f3b1dbC3f819671cBA69'
UUID = uuid.UUID(int=int(time.time() * 1000) << 64 | uuid.uuid4().int &
(1 << 64) - 1)
app_name = f"_langchain_test__{str(UUID).replace('-', '_')}"
agent.run(f'Create app {app_name}')
validate(f'/manage_app/{app_name}/config', {'admin': {self_address: True}})
validate(f'/apps/{app_name}/DB', None, 'owner')
agent.run(f'Read owner config of /apps/{app_name}/DB .')
assert ...
agent.run(
f'Grant owner authority to {co_address} for edit write rule permission of /apps/{app_name}/DB_co .'
)
validate(f'/apps/{app_name}/DB_co', {'.owner': {'owners': {co_address:
{'branch_owner': False, 'write_function': False, 'write_owner':
False, 'write_rule': True}}}}, 'owner')
agent.run(f'Read owner config of /apps/{app_name}/DB_co .')
assert ...
agent.run(f'Read owner config of /apps/{app_name}/DB .')
assert ...
agent.run(f'Read value in /apps/{app_name}/DB')
assert ...
agent.run(f'Write value {{1: 1904, 2: 43}} in /apps/{app_name}/DB')
validate(f'/apps/{app_name}/DB', {(1): 1904, (2): 43})
agent.run(f'Read value in /apps/{app_name}/DB')
assert ...
agent.run(f'Read write rule of app {app_name} .')
assert ...
self_balance = get(f'/accounts/{self_address}/balance', default=0)
transaction_history = get(f'/transfer/{self_address}/{co_address}',
default={})
if self_balance < 1:
try:
with urllib.request.urlopen(
f'http://faucet.ainetwork.ai/api/test/{self_address}/'
) as response:
try_test = response.getcode()
except HTTPError as e:
try_test = e.getcode()
else:
try_test = 200
if try_test == 200:
agent.run(f'Send 1 AIN to {co_address}')
transaction_update = get(f'/transfer/{self_address}/{co_address}',
default={})
assert any(transaction_update[key]['value'] == 1 for key in
transaction_update.keys() - transaction_history.keys())
| null |
get_llm_table
|
llm_feat_table = {}
for cm in llms.__all__:
llm_feat_table[cm] = {}
cls = getattr(llms, cm)
if issubclass(cls, LLM):
for feat in ('_stream', '_astream', ('_acall', '_agenerate')):
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(LLM, feat)
else:
for feat in ['_stream', '_astream', ('_generate', 'batch_generate'),
'_agenerate', ('_agenerate', 'batch_agenerate')]:
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(BaseLLM,
feat)
final_feats = {k: v for k, v in {**llm_feat_table, **
LLM_FEAT_TABLE_CORRECTION}.items() if k not in LLM_IGNORE}
header = ['model', '_agenerate', '_stream', '_astream', 'batch_generate',
'batch_agenerate']
title = ['Model', 'Invoke', 'Async invoke', 'Stream', 'Async stream',
'Batch', 'Async batch']
rows = [title, [':-'] + [':-:'] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
rows += [[llm, '✅'] + [('✅' if feats.get(h) else '❌') for h in header[1:]]]
return '\n'.join(['|'.join(row) for row in rows])
|
def get_llm_table():
llm_feat_table = {}
for cm in llms.__all__:
llm_feat_table[cm] = {}
cls = getattr(llms, cm)
if issubclass(cls, LLM):
for feat in ('_stream', '_astream', ('_acall', '_agenerate')):
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(LLM,
feat)
else:
for feat in ['_stream', '_astream', ('_generate',
'batch_generate'), '_agenerate', ('_agenerate',
'batch_agenerate')]:
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(
BaseLLM, feat)
final_feats = {k: v for k, v in {**llm_feat_table, **
LLM_FEAT_TABLE_CORRECTION}.items() if k not in LLM_IGNORE}
header = ['model', '_agenerate', '_stream', '_astream',
'batch_generate', 'batch_agenerate']
title = ['Model', 'Invoke', 'Async invoke', 'Stream', 'Async stream',
'Batch', 'Async batch']
rows = [title, [':-'] + [':-:'] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
rows += [[llm, '✅'] + [('✅' if feats.get(h) else '❌') for h in
header[1:]]]
return '\n'.join(['|'.join(row) for row in rows])
| null |
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Upsert optimization is done by chunking the embeddings and upserting them.
This is done to avoid memory issues and optimize using HTTP based embeddings.
For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index,
embedding_chunk_size>1000 and batch_size~64 for best performance.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
batch_size: Batch size to use when adding the texts to the vectorstore.
embedding_chunk_size: Chunk size to use when embedding the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
texts = list(texts)
ids = ids or [str(uuid.uuid4()) for _ in texts]
metadatas = metadatas or [{} for _ in texts]
for metadata, text in zip(metadatas, texts):
metadata[self._text_key] = text
for i in range(0, len(texts), embedding_chunk_size):
chunk_texts = texts[i:i + embedding_chunk_size]
chunk_ids = ids[i:i + embedding_chunk_size]
chunk_metadatas = metadatas[i:i + embedding_chunk_size]
embeddings = self._embed_documents(chunk_texts)
async_res = [self._index.upsert(vectors=batch, namespace=namespace,
async_req=True, **kwargs) for batch in batch_iterate(batch_size,
zip(chunk_ids, embeddings, chunk_metadatas))]
[res.get() for res in async_res]
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, namespace: Optional[str]=None,
batch_size: int=32, embedding_chunk_size: int=1000, **kwargs: Any) ->List[
str]:
"""Run more texts through the embeddings and add to the vectorstore.
Upsert optimization is done by chunking the embeddings and upserting them.
This is done to avoid memory issues and optimize using HTTP based embeddings.
For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index,
embedding_chunk_size>1000 and batch_size~64 for best performance.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
batch_size: Batch size to use when adding the texts to the vectorstore.
embedding_chunk_size: Chunk size to use when embedding the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
texts = list(texts)
ids = ids or [str(uuid.uuid4()) for _ in texts]
metadatas = metadatas or [{} for _ in texts]
for metadata, text in zip(metadatas, texts):
metadata[self._text_key] = text
for i in range(0, len(texts), embedding_chunk_size):
chunk_texts = texts[i:i + embedding_chunk_size]
chunk_ids = ids[i:i + embedding_chunk_size]
chunk_metadatas = metadatas[i:i + embedding_chunk_size]
embeddings = self._embed_documents(chunk_texts)
async_res = [self._index.upsert(vectors=batch, namespace=namespace,
async_req=True, **kwargs) for batch in batch_iterate(batch_size,
zip(chunk_ids, embeddings, chunk_metadatas))]
[res.get() for res in async_res]
return ids
|
Run more texts through the embeddings and add to the vectorstore.
Upsert optimization is done by chunking the embeddings and upserting them.
This is done to avoid memory issues and optimize using HTTP based embeddings.
For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index,
embedding_chunk_size>1000 and batch_size~64 for best performance.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
batch_size: Batch size to use when adding the texts to the vectorstore.
embedding_chunk_size: Chunk size to use when embedding the texts.
Returns:
List of ids from adding the texts into the vectorstore.
|
lookup
|
"""Look up based on prompt and llm_string."""
generations = []
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
|
Look up based on prompt and llm_string.
|
_chunk_to_generation
|
return ChatGeneration(message=convert_message_chunk_to_message(chunk.
message), generation_info=chunk.generation_info)
|
@staticmethod
def _chunk_to_generation(chunk: ChatGenerationChunk) ->ChatGeneration:
return ChatGeneration(message=convert_message_chunk_to_message(chunk.
message), generation_info=chunk.generation_info)
| null |
__init__
|
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[Callable[[Dict], Document]
] = sample_to_document_function
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets(dataset_name=self.dataset_name,
split_name=self.split_name, load_max_docs=self.load_max_docs,
sample_to_document_function=self.sample_to_document_function)
|
def __init__(self, dataset_name: str, split_name: str, load_max_docs:
Optional[int]=100, sample_to_document_function: Optional[Callable[[Dict
], Document]]=None):
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[Callable[[Dict], Document]
] = sample_to_document_function
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets(dataset_name=self.dataset_name,
split_name=self.split_name, load_max_docs=self.load_max_docs,
sample_to_document_function=self.sample_to_document_function)
|
Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
|
_With
|
self.fill('with ')
interleave(lambda : self.write(', '), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
|
def _With(self, t):
self.fill('with ')
interleave(lambda : self.write(', '), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
| null |
_import_merriam_webster
|
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
return MerriamWebsterAPIWrapper
|
def _import_merriam_webster() ->Any:
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
return MerriamWebsterAPIWrapper
| null |
_get_docs
|
"""Get docs."""
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(question, k=self.k, **self.
search_kwargs)
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(question, k=self.
k, **self.search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
|
def _get_docs(self, question: str, *, run_manager: CallbackManagerForChainRun
) ->List[Document]:
"""Get docs."""
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(question, k=self.k, **
self.search_kwargs)
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(question, k=
self.k, **self.search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
|
Get docs.
|
create_new_index
|
"""
This method constructs a Cypher query and executes it
to create a new vector index in Neo4j.
"""
index_query = (
'CALL db.index.vector.createNodeIndex($index_name,$node_label,$embedding_node_property,toInteger($embedding_dimension),$similarity_metric )'
)
parameters = {'index_name': self.index_name, 'node_label': self.node_label,
'embedding_node_property': self.embedding_node_property,
'embedding_dimension': self.embedding_dimension, 'similarity_metric':
DISTANCE_MAPPING[self._distance_strategy]}
self.query(index_query, params=parameters)
|
def create_new_index(self) ->None:
"""
This method constructs a Cypher query and executes it
to create a new vector index in Neo4j.
"""
index_query = (
'CALL db.index.vector.createNodeIndex($index_name,$node_label,$embedding_node_property,toInteger($embedding_dimension),$similarity_metric )'
)
parameters = {'index_name': self.index_name, 'node_label': self.
node_label, 'embedding_node_property': self.embedding_node_property,
'embedding_dimension': self.embedding_dimension,
'similarity_metric': DISTANCE_MAPPING[self._distance_strategy]}
self.query(index_query, params=parameters)
|
This method constructs a Cypher query and executes it
to create a new vector index in Neo4j.
|
_agent_type
|
"""Return Identifier of agent type."""
return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
|
@property
def _agent_type(self) ->str:
"""Return Identifier of agent type."""
return AgentType.CONVERSATIONAL_REACT_DESCRIPTION
|
Return Identifier of agent type.
|
test_nltk_text_splitter
|
"""Test splitting by sentence using NLTK."""
text = 'This is sentence one. And this is sentence two.'
separator = '|||'
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [f'This is sentence one.{separator}And this is sentence two.'
]
assert output == expected_output
|
def test_nltk_text_splitter() ->None:
"""Test splitting by sentence using NLTK."""
text = 'This is sentence one. And this is sentence two.'
separator = '|||'
splitter = NLTKTextSplitter(separator=separator)
output = splitter.split_text(text)
expected_output = [
f'This is sentence one.{separator}And this is sentence two.']
assert output == expected_output
|
Test splitting by sentence using NLTK.
|
test_load_llmchain
|
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello')
prompt = PromptTemplate.from_template('hello {name}!')
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj, secrets_map={'OPENAI_API_KEY': 'hello'})
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
|
@pytest.mark.requires('openai')
def test_load_llmchain() ->None:
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello')
prompt = PromptTemplate.from_template('hello {name}!')
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
chain2 = load(chain_obj, secrets_map={'OPENAI_API_KEY': 'hello'})
assert chain2 == chain
assert dumpd(chain2) == chain_obj
assert isinstance(chain2, LLMChain)
assert isinstance(chain2.llm, OpenAI)
assert isinstance(chain2.prompt, PromptTemplate)
| null |
get_prompts
|
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
llm_cache = get_llm_cache()
for i, prompt in enumerate(prompts):
if llm_cache is not None:
cache_val = llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
|
def get_prompts(params: Dict[str, Any], prompts: List[str]) ->Tuple[Dict[
int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
llm_cache = get_llm_cache()
for i, prompt in enumerate(prompts):
if llm_cache is not None:
cache_val = llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
|
Get prompts that are already cached.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**self._default_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
|
Get the identifying parameters.
|
test_api_key_is_secret_string
|
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id')
assert isinstance(llm.minimax_api_key, SecretStr)
|
def test_api_key_is_secret_string() ->None:
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id'
)
assert isinstance(llm.minimax_api_key, SecretStr)
| null |
_dependable_praw_import
|
try:
import praw
except ImportError:
raise ImportError(
'praw package not found, please install it with `pip install praw`')
return praw
|
def _dependable_praw_import() ->praw:
try:
import praw
except ImportError:
raise ImportError(
'praw package not found, please install it with `pip install praw`'
)
return praw
| null |
test_react
|
"""Test functionality on a prompt."""
llm = OpenAI(temperature=0, model_name='gpt-3.5-turbo-instruct')
react = ReActChain(llm=llm, docstore=Wikipedia())
question = (
'Author David Chanoff has collaborated with a U.S. Navy admiral who served as the ambassador to the United Kingdom under which President?'
)
output = react.run(question)
assert output == 'Bill Clinton'
|
def test_react() ->None:
"""Test functionality on a prompt."""
llm = OpenAI(temperature=0, model_name='gpt-3.5-turbo-instruct')
react = ReActChain(llm=llm, docstore=Wikipedia())
question = (
'Author David Chanoff has collaborated with a U.S. Navy admiral who served as the ambassador to the United Kingdom under which President?'
)
output = react.run(question)
assert output == 'Bill Clinton'
|
Test functionality on a prompt.
|
query
|
"""Query the graph."""
return []
|
def query(self, query: str, params: dict={}) ->List[Dict[str, Any]]:
"""Query the graph."""
return []
|
Query the graph.
|
test_from_texts_with_metadatas
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai,
metadatas=metadatas, collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
|
def test_from_texts_with_metadatas(self, embedding_openai: Embeddings,
collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts,
embedding_openai, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
| null |
_type
|
return 'structured_chat'
|
@property
def _type(self) ->str:
return 'structured_chat'
| null |
process_model
|
"""Utility to process a run for wandb model_dict serialization.
:param run: The run to process.
:return: The convert model_dict to pass to WBTraceTree.
"""
try:
data = json.loads(run.json())
processed = self.flatten_run(data)
keep_keys = ('id', 'name', 'serialized', 'inputs', 'outputs',
'parent_run_id', 'execution_order')
processed = self.truncate_run_iterative(processed, keep_keys=keep_keys)
exact_keys, partial_keys = ('lc', 'type'), ('api_key',)
processed = self.modify_serialized_iterative(processed, exact_keys=
exact_keys, partial_keys=partial_keys)
output = self.build_tree(processed)
return output
except Exception as e:
if PRINT_WARNINGS:
self.wandb.termwarn(f'WARNING: Failed to serialize model: {e}')
return None
|
def process_model(self, run: Run) ->Optional[Dict[str, Any]]:
"""Utility to process a run for wandb model_dict serialization.
:param run: The run to process.
:return: The convert model_dict to pass to WBTraceTree.
"""
try:
data = json.loads(run.json())
processed = self.flatten_run(data)
keep_keys = ('id', 'name', 'serialized', 'inputs', 'outputs',
'parent_run_id', 'execution_order')
processed = self.truncate_run_iterative(processed, keep_keys=keep_keys)
exact_keys, partial_keys = ('lc', 'type'), ('api_key',)
processed = self.modify_serialized_iterative(processed, exact_keys=
exact_keys, partial_keys=partial_keys)
output = self.build_tree(processed)
return output
except Exception as e:
if PRINT_WARNINGS:
self.wandb.termwarn(f'WARNING: Failed to serialize model: {e}')
return None
|
Utility to process a run for wandb model_dict serialization.
:param run: The run to process.
:return: The convert model_dict to pass to WBTraceTree.
|
validate_environment
|
"""Validate that llama-cpp-python library is installed."""
model_path = values['model_path']
model_param_names = ['n_ctx', 'n_parts', 'seed', 'f16_kv', 'logits_all',
'vocab_only', 'use_mlock', 'n_threads', 'n_batch', 'verbose']
model_params = {k: values[k] for k in model_param_names}
if values['n_gpu_layers'] is not None:
model_params['n_gpu_layers'] = values['n_gpu_layers']
try:
from llama_cpp import Llama
values['client'] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-python'
)
except Exception as e:
raise ValueError(
f'Could not load Llama model from path: {model_path}. Received error {e}'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values['model_path']
model_param_names = ['n_ctx', 'n_parts', 'seed', 'f16_kv', 'logits_all',
'vocab_only', 'use_mlock', 'n_threads', 'n_batch', 'verbose']
model_params = {k: values[k] for k in model_param_names}
if values['n_gpu_layers'] is not None:
model_params['n_gpu_layers'] = values['n_gpu_layers']
try:
from llama_cpp import Llama
values['client'] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-python'
)
except Exception as e:
raise ValueError(
f'Could not load Llama model from path: {model_path}. Received error {e}'
)
return values
|
Validate that llama-cpp-python library is installed.
|
__call__
|
"""Callable to load the combine documents chain."""
|
def __call__(self, llm: BaseLanguageModel, **kwargs: Any
) ->BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
|
Callable to load the combine documents chain.
|
_reduce_tokens_below_limit
|
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(self.
combine_documents_chain, StuffDocumentsChain):
tokens = [self.combine_documents_chain.llm_chain._get_num_tokens(doc.
page_content) for doc in docs]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
|
def _reduce_tokens_below_limit(self, docs: List[Document]) ->List[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(self.
combine_documents_chain, StuffDocumentsChain):
tokens = [self.combine_documents_chain.llm_chain._get_num_tokens(
doc.page_content) for doc in docs]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
| null |
vector_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import Vector
results = self.client.search(search_text='', vectors=[Vector(value=np.array
(self.embedding_function(query), dtype=np.float32).tolist(), k=k,
fields=FIELDS_CONTENT_VECTOR)], filter=filters)
docs = [(Document(page_content=result.pop(FIELDS_CONTENT), metadata={**{
FIELDS_ID: result.pop(FIELDS_ID)} if FIELDS_ID in result else {}, **
json.loads(result[FIELDS_METADATA]) if FIELDS_METADATA in result else {
k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR}}), float
(result['@search.score'])) for result in results]
return docs
|
def vector_search_with_score(self, query: str, k: int=4, filters: Optional[
str]=None) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
from azure.search.documents.models import Vector
results = self.client.search(search_text='', vectors=[Vector(value=np.
array(self.embedding_function(query), dtype=np.float32).tolist(), k
=k, fields=FIELDS_CONTENT_VECTOR)], filter=filters)
docs = [(Document(page_content=result.pop(FIELDS_CONTENT), metadata={**
{FIELDS_ID: result.pop(FIELDS_ID)} if FIELDS_ID in result else {},
**json.loads(result[FIELDS_METADATA]) if FIELDS_METADATA in result else
{k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR}}),
float(result['@search.score'])) for result in results]
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
|
test_connect_neo4j
|
"""Test that Neo4j database is correctly instantiated and connected."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
output = graph.query("""
RETURN "test" AS output
""")
expected_output = [{'output': 'test'}]
assert output == expected_output
|
def test_connect_neo4j() ->None:
"""Test that Neo4j database is correctly instantiated and connected."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
output = graph.query("""
RETURN "test" AS output
""")
expected_output = [{'output': 'test'}]
assert output == expected_output
|
Test that Neo4j database is correctly instantiated and connected.
|
_import_predibase
|
from langchain_community.llms.predibase import Predibase
return Predibase
|
def _import_predibase() ->Any:
from langchain_community.llms.predibase import Predibase
return Predibase
| null |
test_chat_from_role_strings
|
"""Test instantiation of chat template from role strings."""
template = ChatPromptTemplate.from_role_strings([('system',
'You are a bot.'), ('assistant', 'hello!'), ('human', '{question}'), (
'other', '{quack}')])
messages = template.format_messages(question='How are you?', quack='duck')
assert messages == [ChatMessage(content='You are a bot.', role='system'),
ChatMessage(content='hello!', role='assistant'), ChatMessage(content=
'How are you?', role='human'), ChatMessage(content='duck', role='other')]
|
def test_chat_from_role_strings() ->None:
"""Test instantiation of chat template from role strings."""
template = ChatPromptTemplate.from_role_strings([('system',
'You are a bot.'), ('assistant', 'hello!'), ('human', '{question}'),
('other', '{quack}')])
messages = template.format_messages(question='How are you?', quack='duck')
assert messages == [ChatMessage(content='You are a bot.', role='system'
), ChatMessage(content='hello!', role='assistant'), ChatMessage(
content='How are you?', role='human'), ChatMessage(content='duck',
role='other')]
|
Test instantiation of chat template from role strings.
|
__init__
|
"""Initialize with bucket and key name.
Args:
project_name: The name of the project to load
bucket: The name of the GCS bucket.
blob: The name of the GCS blob to load.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the
UnstructuredFileLoader is used.
Examples:
To use an alternative PDF loader:
>> from from langchain_community.document_loaders import PyPDFLoader
>> loader = GCSFileLoader(..., loader_func=PyPDFLoader)
To use UnstructuredFileLoader with additional arguments:
>> loader = GCSFileLoader(...,
>> loader_func=lambda x: UnstructuredFileLoader(x, mode="elements"))
"""
self.bucket = bucket
self.blob = blob
self.project_name = project_name
def default_loader_func(file_path: str) ->BaseLoader:
return UnstructuredFileLoader(file_path)
self._loader_func = loader_func if loader_func else default_loader_func
|
def __init__(self, project_name: str, bucket: str, blob: str, loader_func:
Optional[Callable[[str], BaseLoader]]=None):
"""Initialize with bucket and key name.
Args:
project_name: The name of the project to load
bucket: The name of the GCS bucket.
blob: The name of the GCS blob to load.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the
UnstructuredFileLoader is used.
Examples:
To use an alternative PDF loader:
>> from from langchain_community.document_loaders import PyPDFLoader
>> loader = GCSFileLoader(..., loader_func=PyPDFLoader)
To use UnstructuredFileLoader with additional arguments:
>> loader = GCSFileLoader(...,
>> loader_func=lambda x: UnstructuredFileLoader(x, mode="elements"))
"""
self.bucket = bucket
self.blob = blob
self.project_name = project_name
def default_loader_func(file_path: str) ->BaseLoader:
return UnstructuredFileLoader(file_path)
self._loader_func = loader_func if loader_func else default_loader_func
|
Initialize with bucket and key name.
Args:
project_name: The name of the project to load
bucket: The name of the GCS bucket.
blob: The name of the GCS blob to load.
loader_func: A loader function that instantiates a loader based on a
file_path argument. If nothing is provided, the
UnstructuredFileLoader is used.
Examples:
To use an alternative PDF loader:
>> from from langchain_community.document_loaders import PyPDFLoader
>> loader = GCSFileLoader(..., loader_func=PyPDFLoader)
To use UnstructuredFileLoader with additional arguments:
>> loader = GCSFileLoader(...,
>> loader_func=lambda x: UnstructuredFileLoader(x, mode="elements"))
|
_call_after_scoring_before_learning
|
...
|
@abstractmethod
def _call_after_scoring_before_learning(self, event: TEvent, score:
Optional[float]) ->TEvent:
...
| null |
reset_pinecone
|
assert os.environ.get('PINECONE_API_KEY') is not None
assert os.environ.get('PINECONE_ENVIRONMENT') is not None
import pinecone
importlib.reload(pinecone)
pinecone.init(api_key=os.environ.get('PINECONE_API_KEY'), environment=os.
environ.get('PINECONE_ENVIRONMENT'))
|
def reset_pinecone() ->None:
assert os.environ.get('PINECONE_API_KEY') is not None
assert os.environ.get('PINECONE_ENVIRONMENT') is not None
import pinecone
importlib.reload(pinecone)
pinecone.init(api_key=os.environ.get('PINECONE_API_KEY'), environment=
os.environ.get('PINECONE_ENVIRONMENT'))
| null |
_write_constant
|
if isinstance(value, (float, complex)):
self.write(repr(value).replace('inf', INFSTR))
else:
self.write(repr(value))
|
def _write_constant(self, value):
if isinstance(value, (float, complex)):
self.write(repr(value).replace('inf', INFSTR))
else:
self.write(repr(value))
| null |
_get_embedding_dimension
|
return len(self._get_embedding(text='This is a sample sentence.'))
|
def _get_embedding_dimension(self) ->int:
return len(self._get_embedding(text='This is a sample sentence.'))
| null |
getEthBalance
|
url = (
f'https://api.etherscan.io/api?module=account&action=balance&address={self.account_address}&tag=latest&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
return [Document(page_content=response.json()['result'])]
|
def getEthBalance(self) ->List[Document]:
url = (
f'https://api.etherscan.io/api?module=account&action=balance&address={self.account_address}&tag=latest&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
return [Document(page_content=response.json()['result'])]
| null |
test_critique_parsing
|
"""Test parsing of critique text."""
for text in [TEXT_ONE, TEXT_TWO, TEXT_THREE]:
critique = ConstitutionalChain._parse_critique(text)
assert critique.strip(
) == 'This text is bad.', f'Failed on {text} with {critique}'
|
def test_critique_parsing() ->None:
"""Test parsing of critique text."""
for text in [TEXT_ONE, TEXT_TWO, TEXT_THREE]:
critique = ConstitutionalChain._parse_critique(text)
assert critique.strip(
) == 'This text is bad.', f'Failed on {text} with {critique}'
|
Test parsing of critique text.
|
on_retriever_end
|
"""Run when Retriever ends running."""
retrieval_run = self._get_run(run_id, run_type='retriever')
retrieval_run.outputs = {'documents': documents}
retrieval_run.end_time = datetime.now(timezone.utc)
retrieval_run.events.append({'name': 'end', 'time': retrieval_run.end_time})
self._end_trace(retrieval_run)
self._on_retriever_end(retrieval_run)
return retrieval_run
|
def on_retriever_end(self, documents: Sequence[Document], *, run_id: UUID,
**kwargs: Any) ->Run:
"""Run when Retriever ends running."""
retrieval_run = self._get_run(run_id, run_type='retriever')
retrieval_run.outputs = {'documents': documents}
retrieval_run.end_time = datetime.now(timezone.utc)
retrieval_run.events.append({'name': 'end', 'time': retrieval_run.end_time}
)
self._end_trace(retrieval_run)
self._on_retriever_end(retrieval_run)
return retrieval_run
|
Run when Retriever ends running.
|
add_to_hash_tree
|
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(hash_tree, tag, parent_id, parent_name, grand_parent_id)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
if node_name == tag:
value: Tuple[bool, Optional[int]] = (True, node_id)
elif is_parent_desc_anchor:
value = True, anchor_id
else:
value = False, None
hash_tree[str(node_id)] = value
return value
|
def add_to_hash_tree(hash_tree: Dict[str, Tuple[bool, Optional[int]]], tag:
str, node_id: int, node_name: Optional[str], parent_id: int) ->Tuple[
bool, Optional[int]]:
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(hash_tree, tag, parent_id, parent_name,
grand_parent_id)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
if node_name == tag:
value: Tuple[bool, Optional[int]] = (True, node_id)
elif is_parent_desc_anchor:
value = True, anchor_id
else:
value = False, None
hash_tree[str(node_id)] = value
return value
| null |
format_response_payload
|
response_json = json.loads(output)
return response_json[0]['0']
|
def format_response_payload(self, output: bytes) ->str:
response_json = json.loads(output)
return response_json[0]['0']
| null |
_llm_type
|
"""Return type of model."""
return 'edenai'
|
@property
def _llm_type(self) ->str:
"""Return type of model."""
return 'edenai'
|
Return type of model.
|
OutputType
|
"""Get the output type for this runnable."""
return AnyMessage
|
@property
def OutputType(self) ->Any:
"""Get the output type for this runnable."""
return AnyMessage
|
Get the output type for this runnable.
|
add_triple
|
"""Add a triple to the graph."""
if not self._graph.has_node(knowledge_triple.subject):
self._graph.add_node(knowledge_triple.subject)
if not self._graph.has_node(knowledge_triple.object_):
self._graph.add_node(knowledge_triple.object_)
self._graph.add_edge(knowledge_triple.subject, knowledge_triple.object_,
relation=knowledge_triple.predicate)
|
def add_triple(self, knowledge_triple: KnowledgeTriple) ->None:
"""Add a triple to the graph."""
if not self._graph.has_node(knowledge_triple.subject):
self._graph.add_node(knowledge_triple.subject)
if not self._graph.has_node(knowledge_triple.object_):
self._graph.add_node(knowledge_triple.object_)
self._graph.add_edge(knowledge_triple.subject, knowledge_triple.object_,
relation=knowledge_triple.predicate)
|
Add a triple to the graph.
|
add_documents
|
print(documents)
return super().add_documents(documents, ids=[f'{i}' for i in range(len(
documents))])
|
def add_documents(self, documents: Sequence[Document], **kwargs: Any) ->List[
str]:
print(documents)
return super().add_documents(documents, ids=[f'{i}' for i in range(len(
documents))])
| null |
dependable_scann_import
|
"""
Import `scann` if available, otherwise raise error.
"""
try:
import scann
except ImportError:
raise ImportError(
'Could not import scann python package. Please install it with `pip install scann` '
)
return scann
|
def dependable_scann_import() ->Any:
"""
Import `scann` if available, otherwise raise error.
"""
try:
import scann
except ImportError:
raise ImportError(
'Could not import scann python package. Please install it with `pip install scann` '
)
return scann
|
Import `scann` if available, otherwise raise error.
|
_import_openapi_utils_api_models
|
from langchain_community.tools.openapi.utils.api_models import APIOperation
return APIOperation
|
def _import_openapi_utils_api_models() ->Any:
from langchain_community.tools.openapi.utils.api_models import APIOperation
return APIOperation
| null |
_generate
|
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop, run_manager, stream
=stream, **kwargs)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts([generation.
message], stop)
params = {**params, **kwargs}
pl_request_id = promptlayer_api_request('langchain.PromptLayerChatOpenAI',
'langchain', message_dicts, params, self.pl_tags, response_dict,
request_start_time, request_end_time, get_api_key(), return_pl_id=
self.return_pl_id)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(generation.
generation_info, dict):
generation.generation_info = {}
generation.generation_info['pl_request_id'] = pl_request_id
return generated_responses
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop, run_manager,
stream=stream, **kwargs)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts([generation.
message], stop)
params = {**params, **kwargs}
pl_request_id = promptlayer_api_request(
'langchain.PromptLayerChatOpenAI', 'langchain', message_dicts,
params, self.pl_tags, response_dict, request_start_time,
request_end_time, get_api_key(), return_pl_id=self.return_pl_id)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(generation
.generation_info, dict):
generation.generation_info = {}
generation.generation_info['pl_request_id'] = pl_request_id
return generated_responses
|
Call ChatOpenAI generate and then call PromptLayer API to log the request.
|
test_llm_cache_clear
|
prompt = 'How are you?'
response = 'Test response'
cached_response = 'Cached test response'
llm = FakeListLLM(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=prompt, llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)])
get_llm_cache().clear()
assert llm(prompt) == response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
|
def test_llm_cache_clear() ->None:
prompt = 'How are you?'
response = 'Test response'
cached_response = 'Cached test response'
llm = FakeListLLM(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=prompt, llm_string=create_llm_string(
llm), return_val=[Generation(text=cached_response)])
get_llm_cache().clear()
assert llm(prompt) == response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
| null |
run
|
"""
Invokes the lambda function and returns the
result.
Args:
query: an input to passed to the lambda
function as the ``body`` of a JSON
object.
"""
res = self.lambda_client.invoke(FunctionName=self.function_name,
InvocationType='RequestResponse', Payload=json.dumps({'body': query}))
try:
payload_stream = res['Payload']
payload_string = payload_stream.read().decode('utf-8')
answer = json.loads(payload_string)['body']
except StopIteration:
return 'Failed to parse response from Lambda'
if answer is None or answer == '':
return 'Request failed.'
else:
return f'Result: {answer}'
|
def run(self, query: str) ->str:
"""
Invokes the lambda function and returns the
result.
Args:
query: an input to passed to the lambda
function as the ``body`` of a JSON
object.
"""
res = self.lambda_client.invoke(FunctionName=self.function_name,
InvocationType='RequestResponse', Payload=json.dumps({'body': query}))
try:
payload_stream = res['Payload']
payload_string = payload_stream.read().decode('utf-8')
answer = json.loads(payload_string)['body']
except StopIteration:
return 'Failed to parse response from Lambda'
if answer is None or answer == '':
return 'Request failed.'
else:
return f'Result: {answer}'
|
Invokes the lambda function and returns the
result.
Args:
query: an input to passed to the lambda
function as the ``body`` of a JSON
object.
|
json_schema_evaluator
|
return JsonSchemaEvaluator()
|
@pytest.fixture
def json_schema_evaluator() ->JsonSchemaEvaluator:
return JsonSchemaEvaluator()
| null |
__get__
|
if instance is not None or owner is not None:
emit_warning()
return super().__get__(instance, owner)
|
def __get__(self, instance, owner=None):
if instance is not None or owner is not None:
emit_warning()
return super().__get__(instance, owner)
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'google_palm']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'google_palm']
|
Get the namespace of the langchain object.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.