method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
test_scann_local_save_load
|
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/'
) as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = ScaNN.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
|
def test_scann_local_save_load() ->None:
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/'
) as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = ScaNN.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
|
Test end to end serialization.
|
requires_reference
|
return True
|
@property
def requires_reference(self) ->bool:
return True
| null |
test_split_text_on_tokens
|
"""Test splitting by tokens per chunk."""
text = 'foo bar baz 123'
tokenizer = Tokenizer(chunk_overlap=3, tokens_per_chunk=7, decode=lambda it:
''.join(chr(i) for i in it), encode=lambda it: [ord(c) for c in it])
output = split_text_on_tokens(text=text, tokenizer=tokenizer)
expected_output = ['foo bar', 'bar baz', 'baz 123']
assert output == expected_output
|
def test_split_text_on_tokens() ->None:
"""Test splitting by tokens per chunk."""
text = 'foo bar baz 123'
tokenizer = Tokenizer(chunk_overlap=3, tokens_per_chunk=7, decode=lambda
it: ''.join(chr(i) for i in it), encode=lambda it: [ord(c) for c in it]
)
output = split_text_on_tokens(text=text, tokenizer=tokenizer)
expected_output = ['foo bar', 'bar baz', 'baz 123']
assert output == expected_output
|
Test splitting by tokens per chunk.
|
test_get_eth_balance
|
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='eth_balance')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
|
@pytest.mark.skipif(not etherscan_key_set, reason=
'Etherscan API key not provided.')
def test_get_eth_balance() ->None:
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='eth_balance')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
| null |
_get_page_content
|
"""Get page content from OneNote API"""
request_url = self.onenote_api_base_url + f'/pages/{page_id}/content'
response = requests.get(request_url, headers=self._headers, timeout=10)
response.raise_for_status()
return response.text
|
def _get_page_content(self, page_id: str) ->str:
"""Get page content from OneNote API"""
request_url = self.onenote_api_base_url + f'/pages/{page_id}/content'
response = requests.get(request_url, headers=self._headers, timeout=10)
response.raise_for_status()
return response.text
|
Get page content from OneNote API
|
from_clickup_api_wrapper
|
operations: List[Dict] = [{'mode': 'get_task', 'name': 'Get task',
'description': CLICKUP_GET_TASK_PROMPT}, {'mode': 'get_task_attribute',
'name': 'Get task attribute', 'description':
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT}, {'mode': 'get_teams', 'name':
'Get Teams', 'description': CLICKUP_GET_ALL_TEAMS_PROMPT}, {'mode':
'create_task', 'name': 'Create Task', 'description':
CLICKUP_TASK_CREATE_PROMPT}, {'mode': 'create_list', 'name':
'Create List', 'description': CLICKUP_LIST_CREATE_PROMPT}, {'mode':
'create_folder', 'name': 'Create Folder', 'description':
CLICKUP_FOLDER_CREATE_PROMPT}, {'mode': 'get_list', 'name':
'Get all lists in the space', 'description': CLICKUP_GET_LIST_PROMPT},
{'mode': 'get_folders', 'name': 'Get all folders in the workspace',
'description': CLICKUP_GET_FOLDERS_PROMPT}, {'mode': 'get_spaces',
'name': 'Get all spaces in the workspace', 'description':
CLICKUP_GET_SPACES_PROMPT}, {'mode': 'update_task', 'name':
'Update task', 'description': CLICKUP_UPDATE_TASK_PROMPT}, {'mode':
'update_task_assignees', 'name': 'Update task assignees', 'description':
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT}]
tools = [ClickupAction(name=action['name'], description=action[
'description'], mode=action['mode'], api_wrapper=clickup_api_wrapper) for
action in operations]
return cls(tools=tools)
|
@classmethod
def from_clickup_api_wrapper(cls, clickup_api_wrapper: ClickupAPIWrapper
) ->'ClickupToolkit':
operations: List[Dict] = [{'mode': 'get_task', 'name': 'Get task',
'description': CLICKUP_GET_TASK_PROMPT}, {'mode':
'get_task_attribute', 'name': 'Get task attribute', 'description':
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT}, {'mode': 'get_teams', 'name':
'Get Teams', 'description': CLICKUP_GET_ALL_TEAMS_PROMPT}, {'mode':
'create_task', 'name': 'Create Task', 'description':
CLICKUP_TASK_CREATE_PROMPT}, {'mode': 'create_list', 'name':
'Create List', 'description': CLICKUP_LIST_CREATE_PROMPT}, {'mode':
'create_folder', 'name': 'Create Folder', 'description':
CLICKUP_FOLDER_CREATE_PROMPT}, {'mode': 'get_list', 'name':
'Get all lists in the space', 'description':
CLICKUP_GET_LIST_PROMPT}, {'mode': 'get_folders', 'name':
'Get all folders in the workspace', 'description':
CLICKUP_GET_FOLDERS_PROMPT}, {'mode': 'get_spaces', 'name':
'Get all spaces in the workspace', 'description':
CLICKUP_GET_SPACES_PROMPT}, {'mode': 'update_task', 'name':
'Update task', 'description': CLICKUP_UPDATE_TASK_PROMPT}, {'mode':
'update_task_assignees', 'name': 'Update task assignees',
'description': CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT}]
tools = [ClickupAction(name=action['name'], description=action[
'description'], mode=action['mode'], api_wrapper=
clickup_api_wrapper) for action in operations]
return cls(tools=tools)
| null |
is_lc_serializable
|
"""RunnableBranch is serializable if all its branches are serializable."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""RunnableBranch is serializable if all its branches are serializable."""
return True
|
RunnableBranch is serializable if all its branches are serializable.
|
setUp
|
self.host = 'localhost'
self.graph = 'test_falkordb'
self.port = 6379
|
def setUp(self) ->None:
self.host = 'localhost'
self.graph = 'test_falkordb'
self.port = 6379
| null |
test_pwd_command
|
"""Test correct functionality."""
session = BashProcess()
commands = ['pwd']
output = session.run(commands)
assert output == subprocess.check_output('pwd', shell=True).decode()
|
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_pwd_command() ->None:
"""Test correct functionality."""
session = BashProcess()
commands = ['pwd']
output = session.run(commands)
assert output == subprocess.check_output('pwd', shell=True).decode()
|
Test correct functionality.
|
clear
|
"""Delete session from Xata table."""
while True:
r = self._client.data().query(self._table_name, payload={'columns': [
'id'], 'filter': {'sessionId': self._session_id}})
if r.status_code != 200:
raise Exception(f'Error running query: {r.status_code} {r}')
ids = [rec['id'] for rec in r['records']]
if len(ids) == 0:
break
operations = [{'delete': {'table': self._table_name, 'id': id}} for id in
ids]
self._client.records().transaction(payload={'operations': operations})
|
def clear(self) ->None:
"""Delete session from Xata table."""
while True:
r = self._client.data().query(self._table_name, payload={'columns':
['id'], 'filter': {'sessionId': self._session_id}})
if r.status_code != 200:
raise Exception(f'Error running query: {r.status_code} {r}')
ids = [rec['id'] for rec in r['records']]
if len(ids) == 0:
break
operations = [{'delete': {'table': self._table_name, 'id': id}} for
id in ids]
self._client.records().transaction(payload={'operations': operations})
|
Delete session from Xata table.
|
_call_eden_ai
|
"""
Make an API call to the EdenAI service with the specified query parameters.
Args:
query_params (dict): The parameters to include in the API call.
Returns:
requests.Response: The response from the EdenAI API call.
"""
headers = {'Authorization': f'Bearer {self.edenai_api_key}', 'User-Agent':
self.get_user_agent()}
url = f'https://api.edenai.run/v2/{self.feature}/{self.subfeature}'
payload = {'providers': str(self.providers), 'response_as_dict': False,
'attributes_as_list': True, 'show_original_response': False}
payload.update(query_params)
response = requests.post(url, json=payload, headers=headers)
self._raise_on_error(response)
try:
return self._parse_response(response.json())
except Exception as e:
raise RuntimeError(f'An error occurred while running tool: {e}')
|
def _call_eden_ai(self, query_params: Dict[str, Any]) ->str:
"""
Make an API call to the EdenAI service with the specified query parameters.
Args:
query_params (dict): The parameters to include in the API call.
Returns:
requests.Response: The response from the EdenAI API call.
"""
headers = {'Authorization': f'Bearer {self.edenai_api_key}',
'User-Agent': self.get_user_agent()}
url = f'https://api.edenai.run/v2/{self.feature}/{self.subfeature}'
payload = {'providers': str(self.providers), 'response_as_dict': False,
'attributes_as_list': True, 'show_original_response': False}
payload.update(query_params)
response = requests.post(url, json=payload, headers=headers)
self._raise_on_error(response)
try:
return self._parse_response(response.json())
except Exception as e:
raise RuntimeError(f'An error occurred while running tool: {e}')
|
Make an API call to the EdenAI service with the specified query parameters.
Args:
query_params (dict): The parameters to include in the API call.
Returns:
requests.Response: The response from the EdenAI API call.
|
from_texts
|
"""Use from components instead."""
raise NotImplementedError(
'This method is not implemented. Instead, you should initialize the class with `MatchingEngine.from_components(...)` and then call `add_texts`'
)
|
@classmethod
def from_texts(cls: Type['MatchingEngine'], texts: List[str], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, **kwargs: Any
) ->'MatchingEngine':
"""Use from components instead."""
raise NotImplementedError(
'This method is not implemented. Instead, you should initialize the class with `MatchingEngine.from_components(...)` and then call `add_texts`'
)
|
Use from components instead.
|
test_last_message_not_human_message
|
messages = [HumanMessage(content='usr-msg-1'), AIMessage(content='ai-msg-1')]
with pytest.raises(ValueError) as info:
model.predict_messages(messages)
assert info.value.args[0] == 'last message must be a HumanMessage'
|
def test_last_message_not_human_message(model: Llama2Chat) ->None:
messages = [HumanMessage(content='usr-msg-1'), AIMessage(content=
'ai-msg-1')]
with pytest.raises(ValueError) as info:
model.predict_messages(messages)
assert info.value.args[0] == 'last message must be a HumanMessage'
| null |
__init__
|
try:
import arcgis
except ImportError as e:
raise ImportError(
'arcgis is required to use the ArcGIS Loader. Install it with pip or conda.'
) from e
try:
from bs4 import BeautifulSoup
self.BEAUTIFULSOUP = BeautifulSoup
except ImportError:
warnings.warn('BeautifulSoup not found. HTML will not be parsed.')
self.BEAUTIFULSOUP = None
self.gis = gis or arcgis.gis.GIS()
if isinstance(layer, str):
self.url = layer
self.layer = arcgis.features.FeatureLayer(layer, gis=gis)
else:
self.url = layer.url
self.layer = layer
self.layer_properties = self._get_layer_properties(lyr_desc)
self.where = where
if isinstance(out_fields, str):
self.out_fields = out_fields
elif out_fields is None:
self.out_fields = '*'
else:
self.out_fields = ','.join(out_fields)
self.return_geometry = return_geometry
self.result_record_count = result_record_count
self.return_all_records = not isinstance(result_record_count, int)
query_params = dict(where=self.where, out_fields=self.out_fields,
return_geometry=self.return_geometry, return_all_records=self.
return_all_records, result_record_count=self.result_record_count)
query_params.update(kwargs)
self.query_params = query_params
|
def __init__(self, layer: Union[str, arcgis.features.FeatureLayer], gis:
Optional[arcgis.gis.GIS]=None, where: str='1=1', out_fields: Optional[
Union[List[str], str]]=None, return_geometry: bool=False,
result_record_count: Optional[int]=None, lyr_desc: Optional[str]=None,
**kwargs: Any):
try:
import arcgis
except ImportError as e:
raise ImportError(
'arcgis is required to use the ArcGIS Loader. Install it with pip or conda.'
) from e
try:
from bs4 import BeautifulSoup
self.BEAUTIFULSOUP = BeautifulSoup
except ImportError:
warnings.warn('BeautifulSoup not found. HTML will not be parsed.')
self.BEAUTIFULSOUP = None
self.gis = gis or arcgis.gis.GIS()
if isinstance(layer, str):
self.url = layer
self.layer = arcgis.features.FeatureLayer(layer, gis=gis)
else:
self.url = layer.url
self.layer = layer
self.layer_properties = self._get_layer_properties(lyr_desc)
self.where = where
if isinstance(out_fields, str):
self.out_fields = out_fields
elif out_fields is None:
self.out_fields = '*'
else:
self.out_fields = ','.join(out_fields)
self.return_geometry = return_geometry
self.result_record_count = result_record_count
self.return_all_records = not isinstance(result_record_count, int)
query_params = dict(where=self.where, out_fields=self.out_fields,
return_geometry=self.return_geometry, return_all_records=self.
return_all_records, result_record_count=self.result_record_count)
query_params.update(kwargs)
self.query_params = query_params
| null |
test_call
|
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper()
output = search.run('what is 2x+18=x+5?')
assert 'x = -13' in output
|
def test_call() ->None:
"""Test that call gives the correct answer."""
search = WolframAlphaAPIWrapper()
output = search.run('what is 2x+18=x+5?')
assert 'x = -13' in output
|
Test that call gives the correct answer.
|
_import_symblai_nebula
|
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
|
def _import_symblai_nebula() ->Any:
from langchain_community.llms.symblai_nebula import Nebula
return Nebula
| null |
__eq__
|
if isinstance(other, RunnableGenerator):
if hasattr(self, '_transform') and hasattr(other, '_transform'):
return self._transform == other._transform
elif hasattr(self, '_atransform') and hasattr(other, '_atransform'):
return self._atransform == other._atransform
else:
return False
else:
return False
|
def __eq__(self, other: Any) ->bool:
if isinstance(other, RunnableGenerator):
if hasattr(self, '_transform') and hasattr(other, '_transform'):
return self._transform == other._transform
elif hasattr(self, '_atransform') and hasattr(other, '_atransform'):
return self._atransform == other._atransform
else:
return False
else:
return False
| null |
from_llm
|
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
sparql_generation_select_chain = LLMChain(llm=llm, prompt=sparql_select_prompt)
sparql_generation_update_chain = LLMChain(llm=llm, prompt=sparql_update_prompt)
sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt)
return cls(qa_chain=qa_chain, sparql_generation_select_chain=
sparql_generation_select_chain, sparql_generation_update_chain=
sparql_generation_update_chain, sparql_intent_chain=sparql_intent_chain,
**kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
SPARQL_QA_PROMPT, sparql_select_prompt: BasePromptTemplate=
SPARQL_GENERATION_SELECT_PROMPT, sparql_update_prompt:
BasePromptTemplate=SPARQL_GENERATION_UPDATE_PROMPT,
sparql_intent_prompt: BasePromptTemplate=SPARQL_INTENT_PROMPT, **kwargs:
Any) ->GraphSparqlQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
sparql_generation_select_chain = LLMChain(llm=llm, prompt=
sparql_select_prompt)
sparql_generation_update_chain = LLMChain(llm=llm, prompt=
sparql_update_prompt)
sparql_intent_chain = LLMChain(llm=llm, prompt=sparql_intent_prompt)
return cls(qa_chain=qa_chain, sparql_generation_select_chain=
sparql_generation_select_chain, sparql_generation_update_chain=
sparql_generation_update_chain, sparql_intent_chain=
sparql_intent_chain, **kwargs)
|
Initialize from LLM.
|
_type
|
return 'react-json-single-input'
|
@property
def _type(self) ->str:
return 'react-json-single-input'
| null |
test_saving_loading_llm
|
"""Test saving/loading an AzureML Foundation Model LLM."""
save_llm = AzureMLOnlineEndpoint(deployment_name=
'databricks-dolly-v2-12b-4', model_kwargs={'temperature': 0.03, 'top_p':
0.4, 'max_tokens': 200})
save_llm.save(file_path=tmp_path / 'azureml.yaml')
loaded_llm = load_llm(tmp_path / 'azureml.yaml')
assert loaded_llm == save_llm
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an AzureML Foundation Model LLM."""
save_llm = AzureMLOnlineEndpoint(deployment_name=
'databricks-dolly-v2-12b-4', model_kwargs={'temperature': 0.03,
'top_p': 0.4, 'max_tokens': 200})
save_llm.save(file_path=tmp_path / 'azureml.yaml')
loaded_llm = load_llm(tmp_path / 'azureml.yaml')
assert loaded_llm == save_llm
|
Test saving/loading an AzureML Foundation Model LLM.
|
output_keys
|
"""The keys to extract from the run."""
return ['prediction', 'input']
|
@property
def output_keys(self) ->List[str]:
"""The keys to extract from the run."""
return ['prediction', 'input']
|
The keys to extract from the run.
|
test_symblai_nebula_call
|
"""Test valid call to Nebula."""
conversation = """Sam: Good morning, team! Let's keep this standup concise.
We'll go in the usual order: what you did yesterday,
what you plan to do today, and any blockers. Alex, kick us off.
Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard.
The new charts and widgets are now responsive.
I also had a sync with the design team to ensure the final touchups are in
line with the brand guidelines. Today, I'll start integrating the frontend with
the new API endpoints Rhea was working on.
The only blocker is waiting for some final API documentation,
but I guess Rhea can update on that.
Rhea: Hey, all! Yep, about the API documentation - I completed the majority of
the backend work for user data retrieval yesterday.
The endpoints are mostly set up, but I need to do a bit more testing today.
I'll finalize the API documentation by noon, so that should unblock Alex.
After that, I’ll be working on optimizing the database queries
for faster data fetching. No other blockers on my end.
Sam: Great, thanks Rhea. Do reach out if you need any testing assistance
or if there are any hitches with the database.
Now, my update: Yesterday, I coordinated with the client to get clarity
on some feature requirements. Today, I'll be updating our project roadmap
and timelines based on their feedback. Additionally, I'll be sitting with
the QA team in the afternoon for preliminary testing.
Blocker: I might need both of you to be available for a quick call
in case the client wants to discuss the changes live.
Alex: Sounds good, Sam. Just let us know a little in advance for the call.
Rhea: Agreed. We can make time for that.
Sam: Perfect! Let's keep the momentum going. Reach out if there are any
sudden issues or support needed. Have a productive day!
Alex: You too.
Rhea: Thanks, bye!"""
llm = Nebula(nebula_api_key='<your_api_key>')
instruction = """Identify the main objectives mentioned in this
conversation."""
output = llm.invoke(f"""{instruction}
{conversation}""")
assert isinstance(output, str)
|
def test_symblai_nebula_call() ->None:
"""Test valid call to Nebula."""
conversation = """Sam: Good morning, team! Let's keep this standup concise.
We'll go in the usual order: what you did yesterday,
what you plan to do today, and any blockers. Alex, kick us off.
Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard.
The new charts and widgets are now responsive.
I also had a sync with the design team to ensure the final touchups are in
line with the brand guidelines. Today, I'll start integrating the frontend with
the new API endpoints Rhea was working on.
The only blocker is waiting for some final API documentation,
but I guess Rhea can update on that.
Rhea: Hey, all! Yep, about the API documentation - I completed the majority of
the backend work for user data retrieval yesterday.
The endpoints are mostly set up, but I need to do a bit more testing today.
I'll finalize the API documentation by noon, so that should unblock Alex.
After that, I’ll be working on optimizing the database queries
for faster data fetching. No other blockers on my end.
Sam: Great, thanks Rhea. Do reach out if you need any testing assistance
or if there are any hitches with the database.
Now, my update: Yesterday, I coordinated with the client to get clarity
on some feature requirements. Today, I'll be updating our project roadmap
and timelines based on their feedback. Additionally, I'll be sitting with
the QA team in the afternoon for preliminary testing.
Blocker: I might need both of you to be available for a quick call
in case the client wants to discuss the changes live.
Alex: Sounds good, Sam. Just let us know a little in advance for the call.
Rhea: Agreed. We can make time for that.
Sam: Perfect! Let's keep the momentum going. Reach out if there are any
sudden issues or support needed. Have a productive day!
Alex: You too.
Rhea: Thanks, bye!"""
llm = Nebula(nebula_api_key='<your_api_key>')
instruction = (
'Identify the main objectives mentioned in this \nconversation.')
output = llm.invoke(f'{instruction}\n{conversation}')
assert isinstance(output, str)
|
Test valid call to Nebula.
|
_default_meta_function
|
return {'source': meta['loc'], **meta}
|
def _default_meta_function(meta: dict, _content: Any) ->dict:
return {'source': meta['loc'], **meta}
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
create_retriever
|
"""Create the Milvus store and retriever."""
values['store'] = Milvus(values['embedding_function'], values[
'collection_name'], values['connection_args'], values['consistency_level'])
values['retriever'] = values['store'].as_retriever(search_kwargs={'param':
values['search_params']})
return values
|
@root_validator(pre=True)
def create_retriever(cls, values: Dict) ->Dict:
"""Create the Milvus store and retriever."""
values['store'] = Milvus(values['embedding_function'], values[
'collection_name'], values['connection_args'], values[
'consistency_level'])
values['retriever'] = values['store'].as_retriever(search_kwargs={
'param': values['search_params']})
return values
|
Create the Milvus store and retriever.
|
_import_zep
|
from langchain_community.vectorstores.zep import ZepVectorStore
return ZepVectorStore
|
def _import_zep() ->Any:
from langchain_community.vectorstores.zep import ZepVectorStore
return ZepVectorStore
| null |
_embedding_vector_column
|
"""Return the embedding vector column configs as a dictionary.
Empty if the index is not a self-managed embedding index.
"""
index_spec = self._delta_sync_index_spec if self._is_delta_sync_index(
) else self._direct_access_index_spec
return next(iter(index_spec.get('embedding_vector_columns') or list()), dict())
|
def _embedding_vector_column(self) ->dict:
"""Return the embedding vector column configs as a dictionary.
Empty if the index is not a self-managed embedding index.
"""
index_spec = self._delta_sync_index_spec if self._is_delta_sync_index(
) else self._direct_access_index_spec
return next(iter(index_spec.get('embedding_vector_columns') or list()),
dict())
|
Return the embedding vector column configs as a dictionary.
Empty if the index is not a self-managed embedding index.
|
get_input_schema
|
if self.custom_input_type is not None:
return super().get_input_schema(config)
return self.bound.get_input_schema(merge_configs(self.config, config))
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
if self.custom_input_type is not None:
return super().get_input_schema(config)
return self.bound.get_input_schema(merge_configs(self.config, config))
| null |
_import_gmail_GmailCreateDraft
|
from langchain_community.tools.gmail import GmailCreateDraft
return GmailCreateDraft
|
def _import_gmail_GmailCreateDraft() ->Any:
from langchain_community.tools.gmail import GmailCreateDraft
return GmailCreateDraft
| null |
_on_run_create
|
"""Start a run."""
if self.root_id is None:
self.root_id = run.id
self.send_stream.send_nowait(RunLogPatch({'op': 'replace', 'path': '',
'value': RunState(id=str(run.id), streamed_output=[], final_output=
None, logs={})}))
if not self.include_run(run):
return
with self.lock:
self._counter_map_by_name[run.name] += 1
count = self._counter_map_by_name[run.name]
self._key_map_by_run_id[run.id
] = run.name if count == 1 else f'{run.name}:{count}'
self.send_stream.send_nowait(RunLogPatch({'op': 'add', 'path':
f'/logs/{self._key_map_by_run_id[run.id]}', 'value': LogEntry(id=str(
run.id), name=run.name, type=run.run_type, tags=run.tags or [],
metadata=(run.extra or {}).get('metadata', {}), start_time=run.
start_time.isoformat(timespec='milliseconds'), streamed_output=[],
streamed_output_str=[], final_output=None, end_time=None)}))
|
def _on_run_create(self, run: Run) ->None:
"""Start a run."""
if self.root_id is None:
self.root_id = run.id
self.send_stream.send_nowait(RunLogPatch({'op': 'replace', 'path':
'', 'value': RunState(id=str(run.id), streamed_output=[],
final_output=None, logs={})}))
if not self.include_run(run):
return
with self.lock:
self._counter_map_by_name[run.name] += 1
count = self._counter_map_by_name[run.name]
self._key_map_by_run_id[run.id
] = run.name if count == 1 else f'{run.name}:{count}'
self.send_stream.send_nowait(RunLogPatch({'op': 'add', 'path':
f'/logs/{self._key_map_by_run_id[run.id]}', 'value': LogEntry(id=
str(run.id), name=run.name, type=run.run_type, tags=run.tags or [],
metadata=(run.extra or {}).get('metadata', {}), start_time=run.
start_time.isoformat(timespec='milliseconds'), streamed_output=[],
streamed_output_str=[], final_output=None, end_time=None)}))
|
Start a run.
|
validate_environment
|
"""Dont do anything if client provided externally"""
if values.get('client') is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
values['client'] = session.client('sagemaker-runtime', region_name=
values['region_name'])
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Dont do anything if client provided externally"""
if values.get('client') is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
values['client'] = session.client('sagemaker-runtime',
region_name=values['region_name'])
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
return values
|
Dont do anything if client provided externally
|
__repr__
|
if hasattr(self, '_transform'):
return f'RunnableGenerator({self._transform.__name__})'
elif hasattr(self, '_atransform'):
return f'RunnableGenerator({self._atransform.__name__})'
else:
return 'RunnableGenerator(...)'
|
def __repr__(self) ->str:
if hasattr(self, '_transform'):
return f'RunnableGenerator({self._transform.__name__})'
elif hasattr(self, '_atransform'):
return f'RunnableGenerator({self._atransform.__name__})'
else:
return 'RunnableGenerator(...)'
| null |
update_task
|
"""
Update an attribute of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
headers = self.get_headers()
payload = {query_dict['attribute_name']: query_dict['value']}
response = requests.put(url, headers=headers, params=params, json=payload)
return {'response': response}
|
def update_task(self, query: str) ->Dict:
"""
Update an attribute of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {'Error': error}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
headers = self.get_headers()
payload = {query_dict['attribute_name']: query_dict['value']}
response = requests.put(url, headers=headers, params=params, json=payload)
return {'response': response}
|
Update an attribute of a specified task.
|
on_tool_error
|
"""Run when tool errors."""
self.metrics['step'] += 1
self.metrics['errors'] += 1
|
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when tool errors."""
self.metrics['step'] += 1
self.metrics['errors'] += 1
|
Run when tool errors.
|
__post_init__
|
"""
Initialize the store.
"""
_engine_args = engine_args or {}
if 'pool_recycle' not in _engine_args:
_engine_args['pool_recycle'] = 3600
self.engine = create_engine(self.connection_string, **_engine_args)
self.create_collection()
|
def __post_init__(self, engine_args: Optional[dict]=None) ->None:
"""
Initialize the store.
"""
_engine_args = engine_args or {}
if 'pool_recycle' not in _engine_args:
_engine_args['pool_recycle'] = 3600
self.engine = create_engine(self.connection_string, **_engine_args)
self.create_collection()
|
Initialize the store.
|
__lt__
|
"""Create a Numeric less than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") < 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LT)
return RedisFilterExpression(str(self))
|
def __lt__(self, other: Union[int, float]) ->'RedisFilterExpression':
"""Create a Numeric less than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") < 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.LT)
return RedisFilterExpression(str(self))
|
Create a Numeric less than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") < 18
|
test_mdelete
|
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
store.mdelete(['key1'])
values = store.mget(['key1', 'key2'])
assert values == [None, 'value2']
store.mdelete(['key3'])
|
def test_mdelete() ->None:
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
store.mdelete(['key1'])
values = store.mget(['key1', 'key2'])
assert values == [None, 'value2']
store.mdelete(['key3'])
| null |
_create_retry_decorator
|
"""Define retry mechanism."""
import fireworks.client
errors = [fireworks.client.error.RateLimitError, fireworks.client.error.
InternalServerError, fireworks.client.error.BadGatewayError, fireworks.
client.error.ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: Fireworks, *, run_manager: Optional[Union[
AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[
[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [fireworks.client.error.RateLimitError, fireworks.client.error
.InternalServerError, fireworks.client.error.BadGatewayError,
fireworks.client.error.ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
Define retry mechanism.
|
_identifying_params
|
return {'key': 'fake'}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {'key': 'fake'}
| null |
test_llamacpp_invalid_model_kwargs
|
with pytest.raises(ValueError):
LlamaCpp(model_path=get_model(), model_kwargs={'n_ctx': 1024})
|
def test_llamacpp_invalid_model_kwargs() ->None:
with pytest.raises(ValueError):
LlamaCpp(model_path=get_model(), model_kwargs={'n_ctx': 1024})
| null |
__init__
|
"""
Initialize the controller.
Args:
c: The number of children to explore at each node.
"""
self.c = c
|
def __init__(self, c: int=3):
"""
Initialize the controller.
Args:
c: The number of children to explore at each node.
"""
self.c = c
|
Initialize the controller.
Args:
c: The number of children to explore at each node.
|
test_load_success
|
"""Test that returns one document"""
loader = ArxivLoader(query='1605.08386', load_max_docs=2)
docs = loader.load()
assert len(docs) == 1
print(docs[0].metadata)
print(docs[0].page_content)
assert_docs(docs)
|
def test_load_success() ->None:
"""Test that returns one document"""
loader = ArxivLoader(query='1605.08386', load_max_docs=2)
docs = loader.load()
assert len(docs) == 1
print(docs[0].metadata)
print(docs[0].page_content)
assert_docs(docs)
|
Test that returns one document
|
_validate_spark_df
|
try:
from pyspark.sql import DataFrame as SparkLocalDataFrame
return isinstance(df, SparkLocalDataFrame)
except ImportError:
return False
|
def _validate_spark_df(df: Any) ->bool:
try:
from pyspark.sql import DataFrame as SparkLocalDataFrame
return isinstance(df, SparkLocalDataFrame)
except ImportError:
return False
| null |
test__unique_documents
|
assert _unique_documents(documents) == expected
|
@pytest.mark.parametrize('documents,expected', [([], []), ([Document(
page_content='foo')], [Document(page_content='foo')]), ([Document(
page_content='foo')] * 2, [Document(page_content='foo')]), ([Document(
page_content='foo', metadata={'bar': 'baz'})] * 2, [Document(
page_content='foo', metadata={'bar': 'baz'})]), ([Document(page_content
='foo', metadata={'bar': [1, 2]})] * 2, [Document(page_content='foo',
metadata={'bar': [1, 2]})]), ([Document(page_content='foo', metadata={
'bar': {1, 2}})] * 2, [Document(page_content='foo', metadata={'bar': {1,
2}})]), ([Document(page_content='foo', metadata={'bar': [1, 2]}),
Document(page_content='foo', metadata={'bar': [2, 1]})], [Document(
page_content='foo', metadata={'bar': [1, 2]}), Document(page_content=
'foo', metadata={'bar': [2, 1]})])])
def test__unique_documents(documents: List[Document], expected: List[Document]
) ->None:
assert _unique_documents(documents) == expected
| null |
test_imessage_chat_loader_upgrade_osx11
|
chat_path = pathlib.Path(__file__
).parent / 'data' / 'imessage_chat_upgrade_osx_11.db'
loader = imessage.IMessageChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'testemail@gmail.com'))
assert chat_sessions, 'Chat sessions should not be empty'
assert chat_sessions[0]['messages'], 'Chat messages should not be empty'
first_message = chat_sessions[0]['messages'][0]
assert 'Yeh' in first_message.content, 'Chat content mismatch'
expected_message_time = 720845450393148160
assert first_message.additional_kwargs['message_time'
] == expected_message_time, 'unexpected time'
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert first_message.additional_kwargs['message_time_as_datetime'
] == expected_parsed_time, 'date failed to parse'
assert first_message.additional_kwargs['is_from_me'
] is False, 'is_from_me failed to parse'
|
def test_imessage_chat_loader_upgrade_osx11() ->None:
chat_path = pathlib.Path(__file__
).parent / 'data' / 'imessage_chat_upgrade_osx_11.db'
loader = imessage.IMessageChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'testemail@gmail.com'))
assert chat_sessions, 'Chat sessions should not be empty'
assert chat_sessions[0]['messages'], 'Chat messages should not be empty'
first_message = chat_sessions[0]['messages'][0]
assert 'Yeh' in first_message.content, 'Chat content mismatch'
expected_message_time = 720845450393148160
assert first_message.additional_kwargs['message_time'
] == expected_message_time, 'unexpected time'
expected_parsed_time = datetime.datetime(2023, 11, 5, 2, 50, 50, 393148)
assert first_message.additional_kwargs['message_time_as_datetime'
] == expected_parsed_time, 'date failed to parse'
assert first_message.additional_kwargs['is_from_me'
] is False, 'is_from_me failed to parse'
| null |
close
|
"""Close the cloud sandbox."""
self._uploaded_files = []
self.session.close()
|
def close(self) ->None:
"""Close the cloud sandbox."""
self._uploaded_files = []
self.session.close()
|
Close the cloud sandbox.
|
_Assert
|
self.fill('assert ')
self.dispatch(t.test)
if t.msg:
self.write(', ')
self.dispatch(t.msg)
|
def _Assert(self, t):
self.fill('assert ')
self.dispatch(t.test)
if t.msg:
self.write(', ')
self.dispatch(t.msg)
| null |
embed_query
|
"""Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray = next(self._model.query_embed(text))
return query_embeddings.tolist()
|
def embed_query(self, text: str) ->List[float]:
"""Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray = next(self._model.query_embed(text))
return query_embeddings.tolist()
|
Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
requires_reference
|
"""
This evaluator does not require a reference.
"""
return True
|
@property
def requires_reference(self) ->bool:
"""
This evaluator does not require a reference.
"""
return True
|
This evaluator does not require a reference.
|
stream
|
"""Enables streaming over steps taken to reach final output."""
config = ensure_config(config)
iterator = AgentExecutorIterator(self, input, config.get('callbacks'), tags
=config.get('tags'), metadata=config.get('metadata'), run_name=config.
get('run_name'), yield_actions=True, **kwargs)
for step in iterator:
yield step
|
def stream(self, input: Union[Dict[str, Any], Any], config: Optional[
RunnableConfig]=None, **kwargs: Any) ->Iterator[AddableDict]:
"""Enables streaming over steps taken to reach final output."""
config = ensure_config(config)
iterator = AgentExecutorIterator(self, input, config.get('callbacks'),
tags=config.get('tags'), metadata=config.get('metadata'), run_name=
config.get('run_name'), yield_actions=True, **kwargs)
for step in iterator:
yield step
|
Enables streaming over steps taken to reach final output.
|
_get_documents
|
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
|
def _get_documents(self, soup: Any) ->List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
|
Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
|
_import_ollama
|
from langchain_community.llms.ollama import Ollama
return Ollama
|
def _import_ollama() ->Any:
from langchain_community.llms.ollama import Ollama
return Ollama
| null |
api_passed_via_constructor_fixture
|
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from constructor"""
azure_chat = AzureMLChatOnlineEndpoint(endpoint_url=
'https://<your-endpoint>.<your_region>.inference.ml.azure.com/score',
endpoint_api_key='my-api-key')
return azure_chat
|
@pytest.fixture(scope='class')
def api_passed_via_constructor_fixture() ->AzureMLChatOnlineEndpoint:
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from constructor"""
azure_chat = AzureMLChatOnlineEndpoint(endpoint_url=
'https://<your-endpoint>.<your_region>.inference.ml.azure.com/score',
endpoint_api_key='my-api-key')
return azure_chat
|
Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from constructor
|
get_collection
|
from pymongo import MongoClient
test_client: MongoClient = MongoClient(CONNECTION_STRING)
return test_client[DB_NAME][COLLECTION_NAME]
|
def get_collection() ->Any:
from pymongo import MongoClient
test_client: MongoClient = MongoClient(CONNECTION_STRING)
return test_client[DB_NAME][COLLECTION_NAME]
| null |
_create_space
|
"""
Create VectorStore space
Args:
dim:dimension of vector
Return:
code,0 failed for ,1 for success
"""
space_config = {'name': self.using_table_name, 'partition_num': 1,
'replica_num': 1, 'engine': {'name': 'gamma', 'index_size': 1,
'retrieval_type': 'FLAT', 'retrieval_param': {'metric_type': 'L2'}},
'properties': {'text': {'type': 'string'}, 'metadata': {'type':
'string'}, 'text_embedding': {'type': 'vector', 'index': True,
'dimension': dim, 'store_type': 'MemoryOnly'}}}
response_code = self.vearch.create_space(self.using_db_name, space_config)
return response_code
|
def _create_space(self, dim: int=1024) ->int:
"""
Create VectorStore space
Args:
dim:dimension of vector
Return:
code,0 failed for ,1 for success
"""
space_config = {'name': self.using_table_name, 'partition_num': 1,
'replica_num': 1, 'engine': {'name': 'gamma', 'index_size': 1,
'retrieval_type': 'FLAT', 'retrieval_param': {'metric_type': 'L2'}},
'properties': {'text': {'type': 'string'}, 'metadata': {'type':
'string'}, 'text_embedding': {'type': 'vector', 'index': True,
'dimension': dim, 'store_type': 'MemoryOnly'}}}
response_code = self.vearch.create_space(self.using_db_name, space_config)
return response_code
|
Create VectorStore space
Args:
dim:dimension of vector
Return:
code,0 failed for ,1 for success
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_fireworks_batch
|
"""Test batch tokens from ChatFireworks."""
result = chat.batch(['What is the weather in Redwood City, CA today?',
'What is the weather in Redwood City, CA today?',
'What is the weather in Redwood City, CA today?'], config={
'max_concurrency': 2}, stop=[','])
for token in result:
assert isinstance(token.content, str)
assert token.content[-1] == ',', token.content
|
@pytest.mark.scheduled
def test_fireworks_batch(chat: ChatFireworks) ->None:
"""Test batch tokens from ChatFireworks."""
result = chat.batch(['What is the weather in Redwood City, CA today?',
'What is the weather in Redwood City, CA today?',
'What is the weather in Redwood City, CA today?'], config={
'max_concurrency': 2}, stop=[','])
for token in result:
assert isinstance(token.content, str)
assert token.content[-1] == ',', token.content
|
Test batch tokens from ChatFireworks.
|
count
|
"""
Count records of a store in jaguardb
Args: no args
Returns: (int) number of records in pod store
"""
podstore = self._pod + '.' + self._store
q = 'select count() from ' + podstore
js = self.run(q)
if isinstance(js, list) and len(js) == 0:
return 0
jd = json.loads(js[0])
return int(jd['data'])
|
def count(self) ->int:
"""
Count records of a store in jaguardb
Args: no args
Returns: (int) number of records in pod store
"""
podstore = self._pod + '.' + self._store
q = 'select count() from ' + podstore
js = self.run(q)
if isinstance(js, list) and len(js) == 0:
return 0
jd = json.loads(js[0])
return int(jd['data'])
|
Count records of a store in jaguardb
Args: no args
Returns: (int) number of records in pod store
|
evaluation_name
|
return f'pairwise_embedding_{self.distance_metric.value}_distance'
|
@property
def evaluation_name(self) ->str:
return f'pairwise_embedding_{self.distance_metric.value}_distance'
| null |
validate_environment
|
"""Validate environment variables."""
together_api_key = convert_to_secret_str(values.get('together_api_key') or
os.getenv('TOGETHER_API_KEY') or '')
values['together_api_key'] = together_api_key
together.api_key = together_api_key.get_secret_value()
values['_client'] = together.Together()
return values
|
@root_validator()
def validate_environment(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Validate environment variables."""
together_api_key = convert_to_secret_str(values.get('together_api_key') or
os.getenv('TOGETHER_API_KEY') or '')
values['together_api_key'] = together_api_key
together.api_key = together_api_key.get_secret_value()
values['_client'] = together.Together()
return values
|
Validate environment variables.
|
__init__
|
"""Initialize with file path."""
self.file_path = file_path
super().__init__(mode=mode, **unstructured_kwargs)
|
def __init__(self, file_path: Union[str, List[str]], mode: str='single', **
unstructured_kwargs: Any):
"""Initialize with file path."""
self.file_path = file_path
super().__init__(mode=mode, **unstructured_kwargs)
|
Initialize with file path.
|
get_request_body_for_operation
|
"""Get the request body for a given operation."""
from openapi_pydantic import Reference
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body
|
def get_request_body_for_operation(self, operation: Operation) ->Optional[
RequestBody]:
"""Get the request body for a given operation."""
from openapi_pydantic import Reference
request_body = operation.requestBody
if isinstance(request_body, Reference):
request_body = self._get_root_referenced_request_body(request_body)
return request_body
|
Get the request body for a given operation.
|
add
|
"""Add more documents."""
|
@abstractmethod
def add(self, texts: Dict[str, Document]) ->None:
"""Add more documents."""
|
Add more documents.
|
drop_index
|
"""
Drop an existing index.
Args:
index_name (str): Name of the index to drop.
Returns:
bool: True if the index is dropped successfully.
"""
try:
from tair import Tair as TairClient
except ImportError:
raise ValueError(
'Could not import tair python package. Please install it with `pip install tair`.'
)
url = get_from_dict_or_env(kwargs, 'tair_url', 'TAIR_URL')
try:
if 'tair_url' in kwargs:
kwargs.pop('tair_url')
client = TairClient.from_url(url=url, **kwargs)
except ValueError as e:
raise ValueError(f'Tair connection error: {e}')
ret = client.tvs_del_index(index_name)
if ret == 0:
logger.info('Index does not exist')
return False
return True
|
@staticmethod
def drop_index(index_name: str='langchain', **kwargs: Any) ->bool:
"""
Drop an existing index.
Args:
index_name (str): Name of the index to drop.
Returns:
bool: True if the index is dropped successfully.
"""
try:
from tair import Tair as TairClient
except ImportError:
raise ValueError(
'Could not import tair python package. Please install it with `pip install tair`.'
)
url = get_from_dict_or_env(kwargs, 'tair_url', 'TAIR_URL')
try:
if 'tair_url' in kwargs:
kwargs.pop('tair_url')
client = TairClient.from_url(url=url, **kwargs)
except ValueError as e:
raise ValueError(f'Tair connection error: {e}')
ret = client.tvs_del_index(index_name)
if ret == 0:
logger.info('Index does not exist')
return False
return True
|
Drop an existing index.
Args:
index_name (str): Name of the index to drop.
Returns:
bool: True if the index is dropped successfully.
|
_generate
|
"""Call out to Fireworks endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
"""
params = {'model': self.model, **self.model_kwargs}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = completion_with_retry_batching(self, self.use_retry, prompt=
_prompts, run_manager=run_manager, stop=stop, **params)
choices.extend(response)
return self.create_llm_result(choices, prompts)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Call out to Fireworks endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
"""
params = {'model': self.model, **self.model_kwargs}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = completion_with_retry_batching(self, self.use_retry,
prompt=_prompts, run_manager=run_manager, stop=stop, **params)
choices.extend(response)
return self.create_llm_result(choices, prompts)
|
Call out to Fireworks endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
|
create_json_agent
|
"""Construct a json agent from an LLM and tools."""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=suffix,
input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {})
|
def create_json_agent(llm: BaseLanguageModel, toolkit: JsonToolkit,
callback_manager: Optional[BaseCallbackManager]=None, prefix: str=
JSON_PREFIX, suffix: str=JSON_SUFFIX, format_instructions: Optional[str
]=None, input_variables: Optional[List[str]]=None, verbose: bool=False,
agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any
) ->AgentExecutor:
"""Construct a json agent from an LLM and tools."""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=
suffix, input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **
kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {})
|
Construct a json agent from an LLM and tools.
|
messages
|
"""Retrieve the messages from Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0..{self._window * 2}]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node RETURN {{data:{{content: node.content}}, type:node.type}} AS result'
)
records, _, _ = self._driver.execute_query(query, {'session_id': self.
_session_id})
messages = messages_from_dict([el['result'] for el in records])
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the messages from Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`)-[:LAST_MESSAGE]->(last_message) WHERE s.id = $session_id MATCH p=(last_message)<-[:NEXT*0..{self._window * 2}]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node RETURN {{data:{{content: node.content}}, type:node.type}} AS result'
)
records, _, _ = self._driver.execute_query(query, {'session_id': self.
_session_id})
messages = messages_from_dict([el['result'] for el in records])
return messages
|
Retrieve the messages from Neo4j
|
get_non_abstract_subclasses
|
to_skip = {AmadeusBaseTool, BaseBrowserTool, GmailBaseTool, O365BaseTool,
SlackBaseTool}
subclasses = []
for subclass in cls.__subclasses__():
if not getattr(subclass, '__abstract__', None
) and not subclass.__name__.startswith('_'
) and subclass not in to_skip:
subclasses.append(subclass)
sc = get_non_abstract_subclasses(subclass)
subclasses.extend(sc)
return subclasses
|
def get_non_abstract_subclasses(cls: Type[BaseTool]) ->List[Type[BaseTool]]:
to_skip = {AmadeusBaseTool, BaseBrowserTool, GmailBaseTool,
O365BaseTool, SlackBaseTool}
subclasses = []
for subclass in cls.__subclasses__():
if not getattr(subclass, '__abstract__', None
) and not subclass.__name__.startswith('_'
) and subclass not in to_skip:
subclasses.append(subclass)
sc = get_non_abstract_subclasses(subclass)
subclasses.extend(sc)
return subclasses
| null |
create_vectorstore_router_agent
|
"""Construct a VectorStore router agent from an LLM and tools.
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ]
prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX.
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
**kwargs: Additional named parameters to pass to the ZeroShotAgent.
Returns:
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
"""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {})
|
def create_vectorstore_router_agent(llm: BaseLanguageModel, toolkit:
VectorStoreRouterToolkit, callback_manager: Optional[
BaseCallbackManager]=None, prefix: str=ROUTER_PREFIX, verbose: bool=
False, agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any
) ->AgentExecutor:
"""Construct a VectorStore router agent from an LLM and tools.
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ]
prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX.
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
**kwargs: Additional named parameters to pass to the ZeroShotAgent.
Returns:
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
"""
tools = toolkit.get_tools()
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **
kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose, **
agent_executor_kwargs or {})
|
Construct a VectorStore router agent from an LLM and tools.
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ]
prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX.
verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ]
agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ]
**kwargs: Additional named parameters to pass to the ZeroShotAgent.
Returns:
AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response.
|
get_tools
|
"""Get the tools in the toolkit."""
json_agent_tool = Tool(name='json_explorer', func=self.json_agent.run,
description=DESCRIPTION)
request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper)
return [*request_toolkit.get_tools(), json_agent_tool]
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
json_agent_tool = Tool(name='json_explorer', func=self.json_agent.run,
description=DESCRIPTION)
request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper)
return [*request_toolkit.get_tools(), json_agent_tool]
|
Get the tools in the toolkit.
|
_create_key_encoder
|
"""Create an encoder for a key."""
return partial(_key_encoder, namespace=namespace)
|
def _create_key_encoder(namespace: str) ->Callable[[str], str]:
"""Create an encoder for a key."""
return partial(_key_encoder, namespace=namespace)
|
Create an encoder for a key.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_get_wolfram_alpha
|
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
|
def _get_wolfram_alpha(**kwargs: Any) ->BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
| null |
format_tool_to_openai_function
|
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(tool.args_schema, name=tool.
name, description=tool.description)
else:
return {'name': tool.name, 'description': tool.description,
'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type':
'string'}}, 'required': ['__arg1'], 'type': 'object'}}
|
def format_tool_to_openai_function(tool: BaseTool) ->FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(tool.args_schema, name=
tool.name, description=tool.description)
else:
return {'name': tool.name, 'description': tool.description,
'parameters': {'properties': {'__arg1': {'title': '__arg1',
'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}
|
Format tool into the OpenAI function API.
|
deanonymizer_mapping
|
"""Return the deanonymizer mapping"""
return self._deanonymizer_mapping.data
|
@property
def deanonymizer_mapping(self) ->MappingDataType:
"""Return the deanonymizer mapping"""
return self._deanonymizer_mapping.data
|
Return the deanonymizer mapping
|
test_initialization
|
"""Test chat model initialization."""
Chat__ModuleName__()
|
def test_initialization() ->None:
"""Test chat model initialization."""
Chat__ModuleName__()
|
Test chat model initialization.
|
test__convert_dict_to_message_ai
|
message_dict = {'role': 'assistant', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_ai() ->None:
message_dict = {'role': 'assistant', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = AIMessage(content='foo')
assert result == expected_output
| null |
mock_index
|
from databricks.vector_search.client import VectorSearchIndex
index = MagicMock(spec=VectorSearchIndex)
index.describe.return_value = index_details
return index
|
def mock_index(index_details: dict) ->MagicMock:
from databricks.vector_search.client import VectorSearchIndex
index = MagicMock(spec=VectorSearchIndex)
index.describe.return_value = index_details
return index
| null |
test_stream
|
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
def test_stream() ->None:
"""Test streaming tokens from Anthropic."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
Test streaming tokens from Anthropic.
|
test_simple_question
|
"""Test simple question that should not need python."""
question = 'Which mammal lays the biggest eggs?'
output = fake_llm_checker_chain.run(question)
assert output == "I still don't know."
|
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) ->None:
"""Test simple question that should not need python."""
question = 'Which mammal lays the biggest eggs?'
output = fake_llm_checker_chain.run(question)
assert output == "I still don't know."
|
Test simple question that should not need python.
|
serialize_outputs
|
if not outputs.get('generations'):
raise ValueError('Cannot evaluate LLM Run without generations.')
generations: List[Dict] = outputs['generations']
if not generations:
raise ValueError('Cannot evaluate LLM run with empty generations.')
first_generation: Dict = generations[0]
if isinstance(first_generation, list):
first_generation = first_generation[0]
if 'message' in first_generation:
output_ = self.serialize_chat_messages([first_generation['message']])
else:
output_ = first_generation['text']
return output_
|
def serialize_outputs(self, outputs: Dict) ->str:
if not outputs.get('generations'):
raise ValueError('Cannot evaluate LLM Run without generations.')
generations: List[Dict] = outputs['generations']
if not generations:
raise ValueError('Cannot evaluate LLM run with empty generations.')
first_generation: Dict = generations[0]
if isinstance(first_generation, list):
first_generation = first_generation[0]
if 'message' in first_generation:
output_ = self.serialize_chat_messages([first_generation['message']])
else:
output_ = first_generation['text']
return output_
| null |
transform
|
if hasattr(self, 'func'):
for output in self._transform_stream_with_config(input, self._transform,
self._config(config, self.func), **kwargs):
yield output
else:
raise TypeError(
'Cannot stream a coroutine function synchronously.Use `astream` instead.'
)
|
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig
]=None, **kwargs: Optional[Any]) ->Iterator[Output]:
if hasattr(self, 'func'):
for output in self._transform_stream_with_config(input, self.
_transform, self._config(config, self.func), **kwargs):
yield output
else:
raise TypeError(
'Cannot stream a coroutine function synchronously.Use `astream` instead.'
)
| null |
_import_ainetwork_rule
|
from langchain_community.tools.ainetwork.rule import AINRuleOps
return AINRuleOps
|
def _import_ainetwork_rule() ->Any:
from langchain_community.tools.ainetwork.rule import AINRuleOps
return AINRuleOps
| null |
buffer_as_messages
|
"""Exposes the buffer as a list of messages in case return_messages is True."""
return self.chat_memory.messages
|
@property
def buffer_as_messages(self) ->List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is True."""
return self.chat_memory.messages
|
Exposes the buffer as a list of messages in case return_messages is True.
|
_collapse_chain
|
if self.collapse_documents_chain is not None:
return self.collapse_documents_chain
else:
return self.combine_documents_chain
|
@property
def _collapse_chain(self) ->BaseCombineDocumentsChain:
if self.collapse_documents_chain is not None:
return self.collapse_documents_chain
else:
return self.combine_documents_chain
| null |
test_chat_openai_streaming_llm_output_contains_model_name
|
"""Test llm_output contains model_name."""
chat = ChatOpenAI(max_tokens=10, streaming=True)
message = HumanMessage(content='Hello')
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model_name'] == chat.model_name
|
def test_chat_openai_streaming_llm_output_contains_model_name() ->None:
"""Test llm_output contains model_name."""
chat = ChatOpenAI(max_tokens=10, streaming=True)
message = HumanMessage(content='Hello')
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model_name'] == chat.model_name
|
Test llm_output contains model_name.
|
test_pydantic_output_parser_fail
|
"""Test PydanticOutputParser where completion result fails schema validation."""
pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser(
pydantic_object=TestModel)
try:
pydantic_parser.parse_folder(DEF_RESULT_FAIL)
except OutputParserException as e:
print('parse_result:', e)
assert 'Failed to parse TestModel from completion' in str(e)
else:
assert False, 'Expected OutputParserException'
|
def test_pydantic_output_parser_fail() ->None:
"""Test PydanticOutputParser where completion result fails schema validation."""
pydantic_parser: PydanticOutputParser[TestModel] = PydanticOutputParser(
pydantic_object=TestModel)
try:
pydantic_parser.parse_folder(DEF_RESULT_FAIL)
except OutputParserException as e:
print('parse_result:', e)
assert 'Failed to parse TestModel from completion' in str(e)
else:
assert False, 'Expected OutputParserException'
|
Test PydanticOutputParser where completion result fails schema validation.
|
test_redis_cache_chat
|
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN),
ttl=1)
llm = FakeChatModel()
params = llm.dict()
params['stop'] = None
with pytest.warns():
llm.predict('foo')
langchain.llm_cache.redis.flushall()
|
@pytest.mark.requires('upstash_redis')
def test_redis_cache_chat() ->None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=
TOKEN), ttl=1)
llm = FakeChatModel()
params = llm.dict()
params['stop'] = None
with pytest.warns():
llm.predict('foo')
langchain.llm_cache.redis.flushall()
| null |
visit_Attribute
|
if isinstance(node.ctx, ast.Load):
parent = node.value
attr_expr = node.attr
while isinstance(parent, ast.Attribute):
attr_expr = parent.attr + '.' + attr_expr
parent = parent.value
if isinstance(parent, ast.Name):
self.loads.add(parent.id + '.' + attr_expr)
self.loads.discard(parent.id)
|
def visit_Attribute(self, node: ast.Attribute) ->Any:
if isinstance(node.ctx, ast.Load):
parent = node.value
attr_expr = node.attr
while isinstance(parent, ast.Attribute):
attr_expr = parent.attr + '.' + attr_expr
parent = parent.value
if isinstance(parent, ast.Name):
self.loads.add(parent.id + '.' + attr_expr)
self.loads.discard(parent.id)
| null |
from_llm
|
logger.warning(
'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead'
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H.from_llm(*args, **kwargs)
|
@classmethod
def from_llm(cls, *args: Any, **kwargs: Any) ->Any:
logger.warning(
'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead'
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H.from_llm(*args, **kwargs)
| null |
_extract_code
|
return '\n'.join(self.source_lines[start_idx:end_idx]).rstrip('\n')
|
def _extract_code(self, start_idx: int, end_idx: int) ->str:
return '\n'.join(self.source_lines[start_idx:end_idx]).rstrip('\n')
| null |
is_chat_model
|
"""Check if the language model is a chat model.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseChatModel model, False otherwise.
"""
return isinstance(llm, BaseChatModel)
|
def is_chat_model(llm: BaseLanguageModel) ->bool:
"""Check if the language model is a chat model.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseChatModel model, False otherwise.
"""
return isinstance(llm, BaseChatModel)
|
Check if the language model is a chat model.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseChatModel model, False otherwise.
|
load
|
"""Load all records from FeatureLayer."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load all records from FeatureLayer."""
return list(self.lazy_load())
|
Load all records from FeatureLayer.
|
save
|
"""Save prompt to file.
Args:
file_path: path to file.
"""
raise NotImplementedError()
|
def save(self, file_path: Union[Path, str]) ->None:
"""Save prompt to file.
Args:
file_path: path to file.
"""
raise NotImplementedError()
|
Save prompt to file.
Args:
file_path: path to file.
|
test_conversation_chain_errors_bad_prompt
|
"""Test that conversation chain raise error with bad prompt."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=[], template='nothing here')
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt)
|
def test_conversation_chain_errors_bad_prompt() ->None:
"""Test that conversation chain raise error with bad prompt."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=[], template='nothing here')
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt)
|
Test that conversation chain raise error with bad prompt.
|
_generate
|
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = self._chat_stream_with_aggregation(messages, stop=stop,
run_manager=run_manager, verbose=self.verbose, **kwargs)
chat_generation = ChatGeneration(message=AIMessage(content=final_chunk.text
), generation_info=final_chunk.generation_info)
return ChatResult(generations=[chat_generation])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
final_chunk = self._chat_stream_with_aggregation(messages, stop=stop,
run_manager=run_manager, verbose=self.verbose, **kwargs)
chat_generation = ChatGeneration(message=AIMessage(content=final_chunk.
text), generation_info=final_chunk.generation_info)
return ChatResult(generations=[chat_generation])
|
Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
|
_import_javelin_ai_gateway
|
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
|
def _import_javelin_ai_gateway() ->Any:
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
| null |
test_confluence_loader_when_content_format_and_keep_markdown_format_enabled
|
mock_confluence.get_all_pages_from_space.return_value = [self.
_get_mock_page('123', ContentFormat.VIEW), self._get_mock_page('456',
ContentFormat.VIEW)]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self._get_mock_page_restrictions('456')
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW, keep_markdown_format=True, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123\n\n'
assert documents[1].page_content == 'Content 456\n\n'
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
|
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock) ->None:
mock_confluence.get_all_pages_from_space.return_value = [self.
_get_mock_page('123', ContentFormat.VIEW), self._get_mock_page(
'456', ContentFormat.VIEW)]
mock_confluence.get_all_restrictions_for_content.side_effect = [self.
_get_mock_page_restrictions('123'), self.
_get_mock_page_restrictions('456')]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW, keep_markdown_format=True,
max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123\n\n'
assert documents[1].page_content == 'Content 456\n\n'
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
| null |
test_agent_iterator_properties_and_setters
|
"""Test properties and setters of AgentExecutorIterator."""
agent = _get_agent()
agent.tags = None
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
assert isinstance(agent_iter.inputs, dict)
assert isinstance(agent_iter.callbacks, type(None))
assert isinstance(agent_iter.tags, type(None))
assert isinstance(agent_iter.agent_executor, AgentExecutor)
agent_iter.inputs = 'New input'
assert isinstance(agent_iter.inputs, dict)
agent_iter.callbacks = [FakeCallbackHandler()]
assert isinstance(agent_iter.callbacks, list)
agent_iter.tags = ['test']
assert isinstance(agent_iter.tags, list)
new_agent = _get_agent()
agent_iter.agent_executor = new_agent
assert isinstance(agent_iter.agent_executor, AgentExecutor)
|
def test_agent_iterator_properties_and_setters() ->None:
"""Test properties and setters of AgentExecutorIterator."""
agent = _get_agent()
agent.tags = None
agent_iter = agent.iter(inputs='when was langchain made')
assert isinstance(agent_iter, AgentExecutorIterator)
assert isinstance(agent_iter.inputs, dict)
assert isinstance(agent_iter.callbacks, type(None))
assert isinstance(agent_iter.tags, type(None))
assert isinstance(agent_iter.agent_executor, AgentExecutor)
agent_iter.inputs = 'New input'
assert isinstance(agent_iter.inputs, dict)
agent_iter.callbacks = [FakeCallbackHandler()]
assert isinstance(agent_iter.callbacks, list)
agent_iter.tags = ['test']
assert isinstance(agent_iter.tags, list)
new_agent = _get_agent()
agent_iter.agent_executor = new_agent
assert isinstance(agent_iter.agent_executor, AgentExecutor)
|
Test properties and setters of AgentExecutorIterator.
|
_llm_type
|
return 'fake-messages-list-chat-model'
|
@property
def _llm_type(self) ->str:
return 'fake-messages-list-chat-model'
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.