method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
_identifying_params
|
"""Get all the identifying parameters."""
return {'server_url': self.server_url, 'model_name': self.model_name, **
self._model_default_parameters}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get all the identifying parameters."""
return {'server_url': self.server_url, 'model_name': self.model_name,
**self._model_default_parameters}
|
Get all the identifying parameters.
|
test_nullable_tags
|
tag = Tag('tag_field')
assert str(tag == value) == expected
|
@pytest.mark.parametrize('value, expected', [(None, '*'), ([], '*'), ('',
'*'), ([None], '*'), ([None, 'tag'], '@tag_field:{tag}')], ids=['none',
'empty_list', 'empty_string', 'list_with_none', 'list_with_none_and_tag'])
def test_nullable_tags(value: Any, expected: str) ->None:
tag = Tag('tag_field')
assert str(tag == value) == expected
| null |
extract_sub_links
|
"""Extract all links from a raw html string and convert into absolute paths.
Args:
raw_html: original html.
url: the url of the html.
base_url: the base url to check for outside links against.
pattern: Regex to use for extracting links from raw html.
prevent_outside: If True, ignore external links which are not children
of the base url.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
Returns:
List[str]: sub links
"""
base_url = base_url if base_url is not None else url
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
if link.startswith('http'):
absolute_paths.add(link)
elif link.startswith('//'):
absolute_paths.add(f'{urlparse(url).scheme}:{link}')
else:
absolute_paths.add(urljoin(url, link))
res = []
for path in absolute_paths:
if any(path.startswith(exclude) for exclude in exclude_prefixes):
continue
if prevent_outside and not path.startswith(base_url):
continue
res.append(path)
return res
|
def extract_sub_links(raw_html: str, url: str, *, base_url: Optional[str]=
None, pattern: Union[str, re.Pattern, None]=None, prevent_outside: bool
=True, exclude_prefixes: Sequence[str]=()) ->List[str]:
"""Extract all links from a raw html string and convert into absolute paths.
Args:
raw_html: original html.
url: the url of the html.
base_url: the base url to check for outside links against.
pattern: Regex to use for extracting links from raw html.
prevent_outside: If True, ignore external links which are not children
of the base url.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
Returns:
List[str]: sub links
"""
base_url = base_url if base_url is not None else url
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
if link.startswith('http'):
absolute_paths.add(link)
elif link.startswith('//'):
absolute_paths.add(f'{urlparse(url).scheme}:{link}')
else:
absolute_paths.add(urljoin(url, link))
res = []
for path in absolute_paths:
if any(path.startswith(exclude) for exclude in exclude_prefixes):
continue
if prevent_outside and not path.startswith(base_url):
continue
res.append(path)
return res
|
Extract all links from a raw html string and convert into absolute paths.
Args:
raw_html: original html.
url: the url of the html.
base_url: the base url to check for outside links against.
pattern: Regex to use for extracting links from raw html.
prevent_outside: If True, ignore external links which are not children
of the base url.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
Returns:
List[str]: sub links
|
embed_documents
|
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, input=texts, text_type='document',
model=self.model)
embedding_list = [item['embedding'] for item in embeddings]
return embedding_list
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, input=texts, text_type='document',
model=self.model)
embedding_list = [item['embedding'] for item in embeddings]
return embedding_list
|
Call out to DashScope's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
|
setup_class
|
from rockset import DevRegions, Regions, RocksetClient
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = Regions.use1a1
elif region == 'usw2a1' or not region:
host = Regions.usw2a1
elif region == 'euc1a1':
host = Regions.euc1a1
elif region == 'dev':
host = DevRegions.usw2a1
else:
host = region
client = RocksetClient(host, api_key)
cls.memory = RocksetChatMessageHistory(session_id, client, collection_name,
sync=True)
|
@classmethod
def setup_class(cls) ->None:
from rockset import DevRegions, Regions, RocksetClient
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = Regions.use1a1
elif region == 'usw2a1' or not region:
host = Regions.usw2a1
elif region == 'euc1a1':
host = Regions.euc1a1
elif region == 'dev':
host = DevRegions.usw2a1
else:
host = region
client = RocksetClient(host, api_key)
cls.memory = RocksetChatMessageHistory(session_id, client,
collection_name, sync=True)
| null |
delete_keys
|
"""Delete records from the SQLite database."""
with self._make_session() as session:
session.query(UpsertionRecord).filter(and_(UpsertionRecord.key.in_(keys
), UpsertionRecord.namespace == self.namespace)).delete()
session.commit()
|
def delete_keys(self, keys: Sequence[str]) ->None:
"""Delete records from the SQLite database."""
with self._make_session() as session:
session.query(UpsertionRecord).filter(and_(UpsertionRecord.key.in_(
keys), UpsertionRecord.namespace == self.namespace)).delete()
session.commit()
|
Delete records from the SQLite database.
|
_prepare
|
config = ensure_config(config)
specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()}
configurable_fields = {specs_by_id[k][0]: v for k, v in config.get(
'configurable', {}).items() if k in specs_by_id and isinstance(
specs_by_id[k][1], ConfigurableField)}
configurable_single_options = {k: v.options[config.get('configurable', {}).
get(v.id) or v.default] for k, v in self.fields.items() if isinstance(v,
ConfigurableFieldSingleOption)}
configurable_multi_options = {k: [v.options[o] for o in config.get(
'configurable', {}).get(v.id, v.default)] for k, v in self.fields.items
() if isinstance(v, ConfigurableFieldMultiOption)}
configurable = {**configurable_fields, **configurable_single_options, **
configurable_multi_options}
if configurable:
return self.default.__class__(**{**self.default.__dict__, **configurable}
), config
else:
return self.default, config
|
def _prepare(self, config: Optional[RunnableConfig]=None) ->Tuple[Runnable[
Input, Output], RunnableConfig]:
config = ensure_config(config)
specs_by_id = {spec.id: (key, spec) for key, spec in self.fields.items()}
configurable_fields = {specs_by_id[k][0]: v for k, v in config.get(
'configurable', {}).items() if k in specs_by_id and isinstance(
specs_by_id[k][1], ConfigurableField)}
configurable_single_options = {k: v.options[config.get('configurable',
{}).get(v.id) or v.default] for k, v in self.fields.items() if
isinstance(v, ConfigurableFieldSingleOption)}
configurable_multi_options = {k: [v.options[o] for o in config.get(
'configurable', {}).get(v.id, v.default)] for k, v in self.fields.
items() if isinstance(v, ConfigurableFieldMultiOption)}
configurable = {**configurable_fields, **configurable_single_options,
**configurable_multi_options}
if configurable:
return self.default.__class__(**{**self.default.__dict__, **
configurable}), config
else:
return self.default, config
| null |
wait_for_processing
|
"""Wait for processing to complete.
Args:
pdf_id: a PDF id.
Returns: None
"""
url = self.url + '/' + pdf_id
for _ in range(0, self.max_wait_time_seconds, 5):
response = requests.get(url, headers=self._mathpix_headers)
response_data = response.json()
error = response_data.get('error', None)
if error is not None:
raise ValueError(f'Unable to retrieve PDF from Mathpix: {error}')
status = response_data.get('status', None)
if status == 'completed':
return
elif status == 'error':
raise ValueError('Unable to retrieve PDF from Mathpix')
else:
print(f'Status: {status}, waiting for processing to complete')
time.sleep(5)
raise TimeoutError
|
def wait_for_processing(self, pdf_id: str) ->None:
"""Wait for processing to complete.
Args:
pdf_id: a PDF id.
Returns: None
"""
url = self.url + '/' + pdf_id
for _ in range(0, self.max_wait_time_seconds, 5):
response = requests.get(url, headers=self._mathpix_headers)
response_data = response.json()
error = response_data.get('error', None)
if error is not None:
raise ValueError(f'Unable to retrieve PDF from Mathpix: {error}')
status = response_data.get('status', None)
if status == 'completed':
return
elif status == 'error':
raise ValueError('Unable to retrieve PDF from Mathpix')
else:
print(f'Status: {status}, waiting for processing to complete')
time.sleep(5)
raise TimeoutError
|
Wait for processing to complete.
Args:
pdf_id: a PDF id.
Returns: None
|
_run
|
"""Use the tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
Use the tool.
|
test_hippo_with_score
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': '0'}),
Document(page_content='bar', metadata={'page': '1'}), Document(
page_content='baz', metadata={'page': '2'})]
assert scores[0] < scores[1] < scores[2]
|
def test_hippo_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': '0'}),
Document(page_content='bar', metadata={'page': '1'}), Document(
page_content='baz', metadata={'page': '2'})]
assert scores[0] < scores[1] < scores[2]
|
Test end to end construction and search with scores and IDs.
|
create_self_ask_with_search_agent
|
"""Create an agent that uses self-ask with search prompting.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import (
AgentExecutor, create_self_ask_with_search_agent
)
prompt = hub.pull("hwchase17/self-ask-with-search")
model = ChatAnthropic()
tools = [...] # Should just be one tool with name `Intermediate Answer`
agent = create_self_ask_with_search_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
Args:
llm: LLM to use as the agent.
tools: List of tools. Should just be of length 1, with that tool having
name `Intermediate Answer`
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'agent_scratchpad'}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
if len(tools) != 1:
raise ValueError('This agent expects exactly one tool')
tool = list(tools)[0]
if tool.name != 'Intermediate Answer':
raise ValueError(
'This agent expects the tool to be named `Intermediate Answer`')
llm_with_stop = llm.bind(stop=["""
Intermediate answer:"""])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_log_to_str(x['intermediate_steps'], observation_prefix=
"""
Intermediate answer: """, llm_prefix=''), chat_history=lambda x: x.
get('chat_history', '')) | prompt | llm_with_stop | SelfAskOutputParser()
return agent
|
def create_self_ask_with_search_agent(llm: BaseLanguageModel, tools:
Sequence[BaseTool], prompt: BasePromptTemplate) ->Runnable:
"""Create an agent that uses self-ask with search prompting.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import (
AgentExecutor, create_self_ask_with_search_agent
)
prompt = hub.pull("hwchase17/self-ask-with-search")
model = ChatAnthropic()
tools = [...] # Should just be one tool with name `Intermediate Answer`
agent = create_self_ask_with_search_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
Args:
llm: LLM to use as the agent.
tools: List of tools. Should just be of length 1, with that tool having
name `Intermediate Answer`
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'agent_scratchpad'}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
if len(tools) != 1:
raise ValueError('This agent expects exactly one tool')
tool = list(tools)[0]
if tool.name != 'Intermediate Answer':
raise ValueError(
'This agent expects the tool to be named `Intermediate Answer`')
llm_with_stop = llm.bind(stop=['\nIntermediate answer:'])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_log_to_str(x['intermediate_steps'], observation_prefix=
"""
Intermediate answer: """, llm_prefix=''), chat_history=lambda x:
x.get('chat_history', '')
) | prompt | llm_with_stop | SelfAskOutputParser()
return agent
|
Create an agent that uses self-ask with search prompting.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import (
AgentExecutor, create_self_ask_with_search_agent
)
prompt = hub.pull("hwchase17/self-ask-with-search")
model = ChatAnthropic()
tools = [...] # Should just be one tool with name `Intermediate Answer`
agent = create_self_ask_with_search_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
Args:
llm: LLM to use as the agent.
tools: List of tools. Should just be of length 1, with that tool having
name `Intermediate Answer`
prompt: The prompt to use, must have input keys of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
|
on_tool_end
|
self.on_tool_end_common()
|
def on_tool_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_tool_end_common()
| null |
test_api_key_masked_when_passed_from_env
|
monkeypatch.setenv('CEREBRIUMAI_API_KEY', 'secret-api-key')
llm = CerebriumAI()
print(llm.cerebriumai_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
|
def test_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
monkeypatch.setenv('CEREBRIUMAI_API_KEY', 'secret-api-key')
llm = CerebriumAI()
print(llm.cerebriumai_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
| null |
test_bigquery_loader_no_options
|
loader = BigQueryLoader('SELECT 1 AS a, 2 AS b')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'a: 1\nb: 2'
assert docs[0].metadata == {}
|
@pytest.mark.skipif(not bigquery_installed, reason='bigquery not installed')
def test_bigquery_loader_no_options() ->None:
loader = BigQueryLoader('SELECT 1 AS a, 2 AS b')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'a: 1\nb: 2'
assert docs[0].metadata == {}
| null |
_load_documents
|
"""Load all documents in the folder.
Returns:
List of documents.
"""
loader = DirectoryLoader(path=self.folder_path, glob='*.pdf', loader_cls=
PyPDFLoader)
documents = loader.load()
return documents
|
def _load_documents(self) ->List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
loader = DirectoryLoader(path=self.folder_path, glob='*.pdf',
loader_cls=PyPDFLoader)
documents = loader.load()
return documents
|
Load all documents in the folder.
Returns:
List of documents.
|
test_hashing_with_missing_content
|
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(ValueError):
_HashedDocument(metadata={'key': 'value'})
|
def test_hashing_with_missing_content() ->None:
"""Check that ValueError is raised if page_content is missing."""
with pytest.raises(ValueError):
_HashedDocument(metadata={'key': 'value'})
|
Check that ValueError is raised if page_content is missing.
|
test_save_local_load_local
|
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts)
file_name = 'tfidf_vectorizer'
temp_timestamp = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with TemporaryDirectory(suffix='_' + temp_timestamp + '/') as temp_folder:
tfidf_retriever.save_local(folder_path=temp_folder, file_name=file_name)
assert os.path.exists(os.path.join(temp_folder, f'{file_name}.joblib'))
assert os.path.exists(os.path.join(temp_folder, f'{file_name}.pkl'))
loaded_tfidf_retriever = TFIDFRetriever.load_local(folder_path=
temp_folder, file_name=file_name)
assert len(loaded_tfidf_retriever.docs) == 3
assert loaded_tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
|
@pytest.mark.requires('sklearn')
def test_save_local_load_local() ->None:
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts)
file_name = 'tfidf_vectorizer'
temp_timestamp = datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with TemporaryDirectory(suffix='_' + temp_timestamp + '/') as temp_folder:
tfidf_retriever.save_local(folder_path=temp_folder, file_name=file_name
)
assert os.path.exists(os.path.join(temp_folder, f'{file_name}.joblib'))
assert os.path.exists(os.path.join(temp_folder, f'{file_name}.pkl'))
loaded_tfidf_retriever = TFIDFRetriever.load_local(folder_path=
temp_folder, file_name=file_name)
assert len(loaded_tfidf_retriever.docs) == 3
assert loaded_tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
| null |
_parse_ai_message
|
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f'Expected an AI message got {type(message)}')
function_call = message.additional_kwargs.get('function_call', {})
if function_call:
function_name = function_call['name']
try:
if len(function_call['arguments'].strip()) == 0:
_tool_input = {}
else:
_tool_input = json.loads(function_call['arguments'], strict=False)
except JSONDecodeError:
raise OutputParserException(
f'Could not parse tool input: {function_call} because the `arguments` is not valid JSON.'
)
if '__arg1' in _tool_input:
tool_input = _tool_input['__arg1']
else:
tool_input = _tool_input
content_msg = (f'responded: {message.content}\n' if message.content else
'\n')
log = f'\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n'
return AgentActionMessageLog(tool=function_name, tool_input=tool_input,
log=log, message_log=[message])
return AgentFinish(return_values={'output': message.content}, log=str(
message.content))
|
@staticmethod
def _parse_ai_message(message: BaseMessage) ->Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
raise TypeError(f'Expected an AI message got {type(message)}')
function_call = message.additional_kwargs.get('function_call', {})
if function_call:
function_name = function_call['name']
try:
if len(function_call['arguments'].strip()) == 0:
_tool_input = {}
else:
_tool_input = json.loads(function_call['arguments'], strict
=False)
except JSONDecodeError:
raise OutputParserException(
f'Could not parse tool input: {function_call} because the `arguments` is not valid JSON.'
)
if '__arg1' in _tool_input:
tool_input = _tool_input['__arg1']
else:
tool_input = _tool_input
content_msg = (f'responded: {message.content}\n' if message.content
else '\n')
log = (
f'\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n'
)
return AgentActionMessageLog(tool=function_name, tool_input=
tool_input, log=log, message_log=[message])
return AgentFinish(return_values={'output': message.content}, log=str(
message.content))
|
Parse an AI message.
|
_extract_images_from_page
|
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ''
import pypdfium2.raw as pdfium_c
images = list(page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,)))
images = list(map(lambda x: x.get_bitmap().to_numpy(), images))
return extract_from_images_with_rapidocr(images)
|
def _extract_images_from_page(self, page: pypdfium2._helpers.page.PdfPage
) ->str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ''
import pypdfium2.raw as pdfium_c
images = list(page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,)))
images = list(map(lambda x: x.get_bitmap().to_numpy(), images))
return extract_from_images_with_rapidocr(images)
|
Extract images from page and get the text with RapidOCR.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
values['openai_api_key'] = values['openai_api_key'] or os.getenv(
'AZURE_OPENAI_API_KEY') or os.getenv('OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_version'] = values['openai_api_version'] or os.getenv(
'OPENAI_API_VERSION')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['azure_endpoint'] = values['azure_endpoint'] or os.getenv(
'AZURE_OPENAI_ENDPOINT')
values['azure_ad_token'] = values['azure_ad_token'] or os.getenv(
'AZURE_OPENAI_AD_TOKEN')
values['openai_api_type'] = get_from_dict_or_env(values, 'openai_api_type',
'OPENAI_API_TYPE', default='azure')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
openai_api_base = values['openai_api_base']
if openai_api_base and values['validate_base_url']:
if '/openai' not in openai_api_base:
values['openai_api_base'] = values['openai_api_base'].rstrip('/'
) + '/openai'
warnings.warn(
f"As of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`). Updating `openai_api_base` from {openai_api_base} to {values['openai_api_base']}."
)
if values['deployment_name']:
warnings.warn(
'As of openai>=1.0.0, if `deployment_name` (or alias `azure_deployment`) is specified then `openai_api_base` (or alias `base_url`) should not be. Instead use `deployment_name` (or alias `azure_deployment`) and `azure_endpoint`.'
)
if values['deployment_name'] not in values['openai_api_base']:
warnings.warn(
f"As of openai>=1.0.0, if `openai_api_base` (or alias `base_url`) is specified it is expected to be of the form https://example-resource.azure.openai.com/openai/deployments/example-deployment. Updating {openai_api_base} to {values['openai_api_base']}."
)
values['openai_api_base'] += '/deployments/' + values[
'deployment_name']
values['deployment_name'] = None
client_params = {'api_version': values['openai_api_version'],
'azure_endpoint': values['azure_endpoint'], 'azure_deployment':
values['deployment_name'], 'api_key': values['openai_api_key'],
'azure_ad_token': values['azure_ad_token'],
'azure_ad_token_provider': values['azure_ad_token_provider'],
'organization': values['openai_organization'], 'base_url': values[
'openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values[
'default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
values['client'] = openai.AzureOpenAI(**client_params).chat.completions
values['async_client'] = openai.AsyncAzureOpenAI(**client_params
).chat.completions
else:
values['client'] = openai.ChatCompletion
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
values['openai_api_key'] = values['openai_api_key'] or os.getenv(
'AZURE_OPENAI_API_KEY') or os.getenv('OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_version'] = values['openai_api_version'] or os.getenv(
'OPENAI_API_VERSION')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['azure_endpoint'] = values['azure_endpoint'] or os.getenv(
'AZURE_OPENAI_ENDPOINT')
values['azure_ad_token'] = values['azure_ad_token'] or os.getenv(
'AZURE_OPENAI_AD_TOKEN')
values['openai_api_type'] = get_from_dict_or_env(values,
'openai_api_type', 'OPENAI_API_TYPE', default='azure')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
openai_api_base = values['openai_api_base']
if openai_api_base and values['validate_base_url']:
if '/openai' not in openai_api_base:
values['openai_api_base'] = values['openai_api_base'].rstrip(
'/') + '/openai'
warnings.warn(
f"As of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`). Updating `openai_api_base` from {openai_api_base} to {values['openai_api_base']}."
)
if values['deployment_name']:
warnings.warn(
'As of openai>=1.0.0, if `deployment_name` (or alias `azure_deployment`) is specified then `openai_api_base` (or alias `base_url`) should not be. Instead use `deployment_name` (or alias `azure_deployment`) and `azure_endpoint`.'
)
if values['deployment_name'] not in values['openai_api_base']:
warnings.warn(
f"As of openai>=1.0.0, if `openai_api_base` (or alias `base_url`) is specified it is expected to be of the form https://example-resource.azure.openai.com/openai/deployments/example-deployment. Updating {openai_api_base} to {values['openai_api_base']}."
)
values['openai_api_base'] += '/deployments/' + values[
'deployment_name']
values['deployment_name'] = None
client_params = {'api_version': values['openai_api_version'],
'azure_endpoint': values['azure_endpoint'], 'azure_deployment':
values['deployment_name'], 'api_key': values['openai_api_key'],
'azure_ad_token': values['azure_ad_token'],
'azure_ad_token_provider': values['azure_ad_token_provider'],
'organization': values['openai_organization'], 'base_url':
values['openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values
['default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
values['client'] = openai.AzureOpenAI(**client_params).chat.completions
values['async_client'] = openai.AsyncAzureOpenAI(**client_params
).chat.completions
else:
values['client'] = openai.ChatCompletion
return values
|
Validate that api key and python package exists in environment.
|
_import_twilio
|
from langchain_community.utilities.twilio import TwilioAPIWrapper
return TwilioAPIWrapper
|
def _import_twilio() ->Any:
from langchain_community.utilities.twilio import TwilioAPIWrapper
return TwilioAPIWrapper
| null |
validate_input_variables
|
"""Validate input variables.
If input_variables is not set, it will be set to the union of
all input variables in the messages.
Args:
values: values to validate.
Returns:
Validated values.
"""
messages = values['messages']
input_vars = set()
input_types: Dict[str, Any] = values.get('input_types', {})
for message in messages:
if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)
):
input_vars.update(message.input_variables)
if isinstance(message, MessagesPlaceholder):
if message.variable_name not in input_types:
input_types[message.variable_name] = List[AnyMessage]
if 'partial_variables' in values:
input_vars = input_vars - set(values['partial_variables'])
if 'input_variables' in values and values.get('validate_template'):
if input_vars != set(values['input_variables']):
raise ValueError(
f"Got mismatched input_variables. Expected: {input_vars}. Got: {values['input_variables']}"
)
else:
values['input_variables'] = sorted(input_vars)
values['input_types'] = input_types
return values
|
@root_validator(pre=True)
def validate_input_variables(cls, values: dict) ->dict:
"""Validate input variables.
If input_variables is not set, it will be set to the union of
all input variables in the messages.
Args:
values: values to validate.
Returns:
Validated values.
"""
messages = values['messages']
input_vars = set()
input_types: Dict[str, Any] = values.get('input_types', {})
for message in messages:
if isinstance(message, (BaseMessagePromptTemplate,
BaseChatPromptTemplate)):
input_vars.update(message.input_variables)
if isinstance(message, MessagesPlaceholder):
if message.variable_name not in input_types:
input_types[message.variable_name] = List[AnyMessage]
if 'partial_variables' in values:
input_vars = input_vars - set(values['partial_variables'])
if 'input_variables' in values and values.get('validate_template'):
if input_vars != set(values['input_variables']):
raise ValueError(
f"Got mismatched input_variables. Expected: {input_vars}. Got: {values['input_variables']}"
)
else:
values['input_variables'] = sorted(input_vars)
values['input_types'] = input_types
return values
|
Validate input variables.
If input_variables is not set, it will be set to the union of
all input variables in the messages.
Args:
values: values to validate.
Returns:
Validated values.
|
img_prompt_func
|
"""
Gemini prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
"""
messages = []
if data_dict['context']['images']:
for image in data_dict['context']['images'][:num_images]:
image_message = {'type': 'image_url', 'image_url': {'url':
f'data:image/jpeg;base64,{image}'}}
messages.append(image_message)
text_message = {'type': 'text', 'text':
f"""You are an analyst tasked with answering questions about visual content.
You will be give a set of image(s) from a slide deck / presentation.
Use this information to answer the user question.
User-provided question: {data_dict['question']}
"""
}
messages.append(text_message)
return [HumanMessage(content=messages)]
|
def img_prompt_func(data_dict, num_images=2):
"""
Gemini prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
"""
messages = []
if data_dict['context']['images']:
for image in data_dict['context']['images'][:num_images]:
image_message = {'type': 'image_url', 'image_url': {'url':
f'data:image/jpeg;base64,{image}'}}
messages.append(image_message)
text_message = {'type': 'text', 'text':
f"""You are an analyst tasked with answering questions about visual content.
You will be give a set of image(s) from a slide deck / presentation.
Use this information to answer the user question.
User-provided question: {data_dict['question']}
"""
}
messages.append(text_message)
return [HumanMessage(content=messages)]
|
Gemini prompt for image analysis.
:param data_dict: A dict with images and a user-provided question.
:param num_images: Number of images to include in the prompt.
:return: A list containing message objects for each image and the text prompt.
|
test_embedding_query
|
query = 'foo'
embedding = ErnieEmbeddings()
output = embedding.embed_query(query)
assert len(output) == 384
|
def test_embedding_query() ->None:
query = 'foo'
embedding = ErnieEmbeddings()
output = embedding.embed_query(query)
assert len(output) == 384
| null |
update_iterations
|
"""
Increment the number of iterations and update the time elapsed.
"""
self.iterations += 1
self.time_elapsed = time.time() - self.start_time
logger.debug(
f'Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)')
|
def update_iterations(self) ->None:
"""
Increment the number of iterations and update the time elapsed.
"""
self.iterations += 1
self.time_elapsed = time.time() - self.start_time
logger.debug(
f'Agent Iterations: {self.iterations} ({self.time_elapsed:.2f}s elapsed)'
)
|
Increment the number of iterations and update the time elapsed.
|
_load_map_reduce_chain
|
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose, callbacks
=callbacks)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=
verbose, callbacks=callbacks)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name, verbose=verbose,
callbacks=callbacks)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose, callbacks=
callbacks), document_variable_name=combine_document_variable_name)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose, callbacks=callbacks)
return MapReduceDocumentsChain(llm_chain=map_chain, reduce_documents_chain=
reduce_documents_chain, document_variable_name=
map_reduce_document_variable_name, verbose=verbose, callbacks=callbacks,
**kwargs)
|
def _load_map_reduce_chain(llm: BaseLanguageModel, map_prompt:
BasePromptTemplate=map_reduce_prompt.PROMPT, combine_prompt:
BasePromptTemplate=map_reduce_prompt.PROMPT,
combine_document_variable_name: str='text',
map_reduce_document_variable_name: str='text', collapse_prompt:
Optional[BasePromptTemplate]=None, reduce_llm: Optional[
BaseLanguageModel]=None, collapse_llm: Optional[BaseLanguageModel]=None,
verbose: Optional[bool]=None, token_max: int=3000, callbacks: Callbacks
=None, **kwargs: Any) ->MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose,
callbacks=callbacks)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose
=verbose, callbacks=callbacks)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name, verbose=
verbose, callbacks=callbacks)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose,
callbacks=callbacks), document_variable_name=
combine_document_variable_name)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose, callbacks=callbacks)
return MapReduceDocumentsChain(llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name, verbose=
verbose, callbacks=callbacks, **kwargs)
| null |
test_qdrant_similarity_search_with_score_by_vector
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), location=
':memory:', content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key, batch_size=batch_size,
vector_name=vector_name)
embeddings = ConsistentFakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_with_score_by_vector(embeddings, k=1)
assert len(output) == 1
document, score = output[0]
assert document == Document(page_content='foo')
assert score >= 0
|
@pytest.mark.parametrize('batch_size', [1, 64])
@pytest.mark.parametrize('content_payload_key', [Qdrant.CONTENT_KEY, 'foo'])
@pytest.mark.parametrize('metadata_payload_key', [Qdrant.METADATA_KEY, 'bar'])
@pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_similarity_search_with_score_by_vector(batch_size: int,
content_payload_key: str, metadata_payload_key: str, vector_name:
Optional[str]) ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
location=':memory:', content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key, batch_size=batch_size,
vector_name=vector_name)
embeddings = ConsistentFakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_with_score_by_vector(embeddings, k=1)
assert len(output) == 1
document, score = output[0]
assert document == Document(page_content='foo')
assert score >= 0
|
Test end to end construction and search.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['voyage_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'voyage_api_key', 'VOYAGE_API_KEY'))
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['voyage_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'voyage_api_key', 'VOYAGE_API_KEY'))
return values
|
Validate that api key and python package exists in environment.
|
fake_llm_checker
|
"""This is a fake LLM that responds with a thought validity."""
responses = ['VALID', 'valid', 'INVALID', 'invalid', 'INTERMEDIATE',
'intermediate', 'SOMETHING ELSE']
queries = dict(enumerate(responses))
return FakeLLM(queries=queries, sequential_responses=True)
|
@pytest.fixture
def fake_llm_checker() ->FakeLLM:
"""This is a fake LLM that responds with a thought validity."""
responses = ['VALID', 'valid', 'INVALID', 'invalid', 'INTERMEDIATE',
'intermediate', 'SOMETHING ELSE']
queries = dict(enumerate(responses))
return FakeLLM(queries=queries, sequential_responses=True)
|
This is a fake LLM that responds with a thought validity.
|
_search_request
|
headers = {'X-Subscription-Token': self.api_key, 'Accept': 'application/json'}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{'q': query}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError('prepared url is None, this should not happen')
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f'HTTP error {response.status_code}')
return response.json().get('web', {}).get('results', [])
|
def _search_request(self, query: str) ->List[dict]:
headers = {'X-Subscription-Token': self.api_key, 'Accept':
'application/json'}
req = requests.PreparedRequest()
params = {**self.search_kwargs, **{'q': query}}
req.prepare_url(self.base_url, params)
if req.url is None:
raise ValueError('prepared url is None, this should not happen')
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f'HTTP error {response.status_code}')
return response.json().get('web', {}).get('results', [])
| null |
create_prompt
|
tool_strings = '\n'.join([f'> {tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = human_message.format(format_instructions=
_output_parser.get_format_instructions())
final_prompt = format_instructions.format(tool_names=tool_names, tools=
tool_strings)
if input_variables is None:
input_variables = ['input', 'chat_history', 'agent_scratchpad']
messages = [SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name='chat_history'),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name='agent_scratchpad')]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool], system_message: str=
PREFIX, human_message: str=SUFFIX, input_variables: Optional[List[str]]
=None, output_parser: Optional[BaseOutputParser]=None
) ->BasePromptTemplate:
tool_strings = '\n'.join([f'> {tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
_output_parser = output_parser or cls._get_default_output_parser()
format_instructions = human_message.format(format_instructions=
_output_parser.get_format_instructions())
final_prompt = format_instructions.format(tool_names=tool_names, tools=
tool_strings)
if input_variables is None:
input_variables = ['input', 'chat_history', 'agent_scratchpad']
messages = [SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name='chat_history'),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name='agent_scratchpad')]
return ChatPromptTemplate(input_variables=input_variables, messages=
messages)
| null |
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
get_format_instructions
|
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions())
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = '\n'.join([_get_sub_string(schema) for schema in self.
response_schemas])
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
else:
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
|
def get_format_instructions(self, only_json: bool=False) ->str:
"""Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions())
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
"""
schema_str = '\n'.join([_get_sub_string(schema) for schema in self.
response_schemas])
if only_json:
return STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS.format(format=schema_str)
else:
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
|
Get format instructions for the output parser.
example:
```python
from langchain.output_parsers.structured import (
StructuredOutputParser, ResponseSchema
)
response_schemas = [
ResponseSchema(
name="foo",
description="a list of strings",
type="List[string]"
),
ResponseSchema(
name="bar",
description="a string",
type="string"
),
]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
print(parser.get_format_instructions())
output:
# The output should be a Markdown code snippet formatted in the following
# schema, including the leading and trailing "```json" and "```":
#
# ```json
# {
# "foo": List[string] // a list of strings
# "bar": string // a string
# }
# ```
Args:
only_json (bool): If True, only the json in the Markdown code snippet
will be returned, without the introducing text. Defaults to False.
|
__init__
|
self.api_key = api_key
|
def __init__(self, api_key: Optional[str]=None):
self.api_key = api_key
| null |
_generate
|
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {'text': generation.text, 'llm_output': generated_responses.
llm_output}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request('langchain.PromptLayerOpenAIChat',
'langchain', [prompt], params, self.pl_tags, resp,
request_start_time, request_end_time, get_api_key(), return_pl_id=
self.return_pl_id)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(generation.
generation_info, dict):
generation.generation_info = {}
generation.generation_info['pl_request_id'] = pl_request_id
return generated_responses
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
generation = generated_responses.generations[i][0]
resp = {'text': generation.text, 'llm_output': generated_responses.
llm_output}
params = {**self._identifying_params, **kwargs}
pl_request_id = promptlayer_api_request(
'langchain.PromptLayerOpenAIChat', 'langchain', [prompt],
params, self.pl_tags, resp, request_start_time,
request_end_time, get_api_key(), return_pl_id=self.return_pl_id)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(generation
.generation_info, dict):
generation.generation_info = {}
generation.generation_info['pl_request_id'] = pl_request_id
return generated_responses
|
Call OpenAI generate and then call PromptLayer API to log the request.
|
_package_namespace
|
return package_name if package_name == 'langchain' else f"langchain_{package_name.replace('-', '_')}"
|
def _package_namespace(package_name: str) ->str:
return (package_name if package_name == 'langchain' else
f"langchain_{package_name.replace('-', '_')}")
| null |
default_doc_builder
|
return Document(page_content=hit['_source'].get(self.query_field, ''),
metadata=hit['_source']['metadata'])
|
def default_doc_builder(hit: Dict) ->Document:
return Document(page_content=hit['_source'].get(self.query_field, ''),
metadata=hit['_source']['metadata'])
| null |
test_modelscope_embedding_documents
|
"""Test modelscope embeddings for documents."""
documents = ['foo bar']
embedding = ModelScopeEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 512
|
def test_modelscope_embedding_documents() ->None:
"""Test modelscope embeddings for documents."""
documents = ['foo bar']
embedding = ModelScopeEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 512
|
Test modelscope embeddings for documents.
|
_diff
|
return jsonpatch.make_patch(prev, next).patch
|
def _diff(self, prev: Optional[Any], next: Any) ->Any:
return jsonpatch.make_patch(prev, next).patch
| null |
test_johnsnowlabs_embed_document
|
"""Test johnsnowlabs embeddings."""
documents = ['foo bar', 'bar foo']
embedding = JohnSnowLabsEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 128
|
def test_johnsnowlabs_embed_document() ->None:
"""Test johnsnowlabs embeddings."""
documents = ['foo bar', 'bar foo']
embedding = JohnSnowLabsEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 128
|
Test johnsnowlabs embeddings.
|
_get_sample_spark_rows
|
query = f'SELECT * FROM {table} LIMIT {self._sample_rows_in_table_info}'
df = self._spark.sql(query)
columns_str = '\t'.join(list(map(lambda f: f.name, df.schema.fields)))
try:
sample_rows = self._get_dataframe_results(df)
sample_rows_str = '\n'.join(['\t'.join(row) for row in sample_rows])
except Exception:
sample_rows_str = ''
return f"""{self._sample_rows_in_table_info} rows from {table} table:
{columns_str}
{sample_rows_str}"""
|
def _get_sample_spark_rows(self, table: str) ->str:
query = f'SELECT * FROM {table} LIMIT {self._sample_rows_in_table_info}'
df = self._spark.sql(query)
columns_str = '\t'.join(list(map(lambda f: f.name, df.schema.fields)))
try:
sample_rows = self._get_dataframe_results(df)
sample_rows_str = '\n'.join(['\t'.join(row) for row in sample_rows])
except Exception:
sample_rows_str = ''
return f"""{self._sample_rows_in_table_info} rows from {table} table:
{columns_str}
{sample_rows_str}"""
| null |
parse_result
|
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs['function_call'])
except KeyError as exc:
raise OutputParserException(f'Could not parse function call: {exc}')
if self.args_only:
return func_call['arguments']
return func_call
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs['function_call'])
except KeyError as exc:
raise OutputParserException(f'Could not parse function call: {exc}')
if self.args_only:
return func_call['arguments']
return func_call
| null |
__init__
|
self.stack: List[Thought] = stack or []
|
def __init__(self, stack: Optional[List[Thought]]=None):
self.stack: List[Thought] = stack or []
| null |
connect
|
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
|
def connect(self) ->sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
| null |
_determine_reference_key
|
if config.reference_key:
reference_key = config.reference_key
if example_outputs and reference_key not in example_outputs:
raise ValueError(
f'Reference key {reference_key} not in Dataset example outputs: {example_outputs}'
)
elif example_outputs and len(example_outputs) == 1:
reference_key = list(example_outputs)[0]
else:
reference_key = None
return reference_key
|
def _determine_reference_key(config: smith_eval.RunEvalConfig,
example_outputs: Optional[List[str]]) ->Optional[str]:
if config.reference_key:
reference_key = config.reference_key
if example_outputs and reference_key not in example_outputs:
raise ValueError(
f'Reference key {reference_key} not in Dataset example outputs: {example_outputs}'
)
elif example_outputs and len(example_outputs) == 1:
reference_key = list(example_outputs)[0]
else:
reference_key = None
return reference_key
| null |
_on_tool_start
|
"""Process the Tool Run upon start."""
|
def _on_tool_start(self, run: Run) ->None:
"""Process the Tool Run upon start."""
|
Process the Tool Run upon start.
|
raise_value_error
|
"""Raise a value error."""
raise ValueError('x is too large')
|
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
|
Raise a value error.
|
index
|
"""Index data from the loader into the vector store.
Indexing functionality uses a manager to keep track of which documents
are in the vector store.
This allows us to keep track of which documents were updated, and which
documents were deleted, which documents should be skipped.
For the time being, documents are indexed using their hashes, and users
are not able to specify the uid of the document.
IMPORTANT:
if auto_cleanup is set to True, the loader should be returning
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
Args:
docs_source: Data loader or iterable of documents to index.
record_manager: Timestamped set to keep track of which documents were
updated.
vector_store: Vector store to index the documents into.
batch_size: Batch size to use when indexing.
cleanup: How to handle clean up of documents.
- Incremental: Cleans up all documents that haven't been updated AND
that are associated with source ids that were seen
during indexing.
Clean up is done continuously during indexing helping
to minimize the probability of users seeing duplicated
content.
- Full: Delete all documents that haven to been returned by the loader.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
- None: Do not delete any documents.
source_id_key: Optional key that helps identify the original source
of the document.
cleanup_batch_size: Batch size to use when cleaning up documents.
force_update: Force update documents even if they are present in the
record manager. Useful if you are re-indexing with updated embeddings.
Returns:
Indexing result which contains information about how many documents
were added, updated, deleted, or skipped.
"""
if cleanup not in {'incremental', 'full', None}:
raise ValueError(
f"cleanup should be one of 'incremental', 'full' or None. Got {cleanup}."
)
if cleanup == 'incremental' and source_id_key is None:
raise ValueError(
'Source id key is required when cleanup mode is incremental.')
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
raise ValueError(
f'Vectorstore {vector_store} does not have required method {method}'
)
if type(vector_store).delete == VectorStore.delete:
raise ValueError('Vectorstore has not implemented the delete method')
if isinstance(docs_source, BaseLoader):
try:
doc_iterator = docs_source.lazy_load()
except NotImplementedError:
doc_iterator = iter(docs_source.load())
else:
doc_iterator = iter(docs_source)
source_id_assigner = _get_source_id_assigner(source_id_key)
index_start_dt = record_manager.get_time()
num_added = 0
num_skipped = 0
num_updated = 0
num_deleted = 0
for doc_batch in _batch(batch_size, doc_iterator):
hashed_docs = list(_deduplicate_in_order([_HashedDocument.from_document
(doc) for doc in doc_batch]))
source_ids: Sequence[Optional[str]] = [source_id_assigner(doc) for doc in
hashed_docs]
if cleanup == 'incremental':
for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None:
raise ValueError(
f'Source ids are required when cleanup mode is incremental. Document that starts with content: {hashed_doc.page_content[:100]} was not assigned as source id.'
)
source_ids = cast(Sequence[str], source_ids)
exists_batch = record_manager.exists([doc.uid for doc in hashed_docs])
uids = []
docs_to_index = []
uids_to_refresh = []
seen_docs: Set[str] = set()
for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
if doc_exists:
if force_update:
seen_docs.add(hashed_doc.uid)
else:
uids_to_refresh.append(hashed_doc.uid)
continue
uids.append(hashed_doc.uid)
docs_to_index.append(hashed_doc.to_document())
if uids_to_refresh:
record_manager.update(uids_to_refresh, time_at_least=index_start_dt)
num_skipped += len(uids_to_refresh)
if docs_to_index:
vector_store.add_documents(docs_to_index, ids=uids)
num_added += len(docs_to_index) - len(seen_docs)
num_updated += len(seen_docs)
record_manager.update([doc.uid for doc in hashed_docs], group_ids=
source_ids, time_at_least=index_start_dt)
if cleanup == 'incremental':
for source_id in source_ids:
if source_id is None:
raise AssertionError('Source ids cannot be None here.')
_source_ids = cast(Sequence[str], source_ids)
uids_to_delete = record_manager.list_keys(group_ids=_source_ids,
before=index_start_dt)
if uids_to_delete:
vector_store.delete(uids_to_delete)
record_manager.delete_keys(uids_to_delete)
num_deleted += len(uids_to_delete)
if cleanup == 'full':
while (uids_to_delete := record_manager.list_keys(before=index_start_dt,
limit=cleanup_batch_size)):
vector_store.delete(uids_to_delete)
record_manager.delete_keys(uids_to_delete)
num_deleted += len(uids_to_delete)
return {'num_added': num_added, 'num_updated': num_updated, 'num_skipped':
num_skipped, 'num_deleted': num_deleted}
|
def index(docs_source: Union[BaseLoader, Iterable[Document]],
record_manager: RecordManager, vector_store: VectorStore, *, batch_size:
int=100, cleanup: Literal['incremental', 'full', None]=None,
source_id_key: Union[str, Callable[[Document], str], None]=None,
cleanup_batch_size: int=1000, force_update: bool=False) ->IndexingResult:
"""Index data from the loader into the vector store.
Indexing functionality uses a manager to keep track of which documents
are in the vector store.
This allows us to keep track of which documents were updated, and which
documents were deleted, which documents should be skipped.
For the time being, documents are indexed using their hashes, and users
are not able to specify the uid of the document.
IMPORTANT:
if auto_cleanup is set to True, the loader should be returning
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
Args:
docs_source: Data loader or iterable of documents to index.
record_manager: Timestamped set to keep track of which documents were
updated.
vector_store: Vector store to index the documents into.
batch_size: Batch size to use when indexing.
cleanup: How to handle clean up of documents.
- Incremental: Cleans up all documents that haven't been updated AND
that are associated with source ids that were seen
during indexing.
Clean up is done continuously during indexing helping
to minimize the probability of users seeing duplicated
content.
- Full: Delete all documents that haven to been returned by the loader.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
- None: Do not delete any documents.
source_id_key: Optional key that helps identify the original source
of the document.
cleanup_batch_size: Batch size to use when cleaning up documents.
force_update: Force update documents even if they are present in the
record manager. Useful if you are re-indexing with updated embeddings.
Returns:
Indexing result which contains information about how many documents
were added, updated, deleted, or skipped.
"""
if cleanup not in {'incremental', 'full', None}:
raise ValueError(
f"cleanup should be one of 'incremental', 'full' or None. Got {cleanup}."
)
if cleanup == 'incremental' and source_id_key is None:
raise ValueError(
'Source id key is required when cleanup mode is incremental.')
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
raise ValueError(
f'Vectorstore {vector_store} does not have required method {method}'
)
if type(vector_store).delete == VectorStore.delete:
raise ValueError('Vectorstore has not implemented the delete method')
if isinstance(docs_source, BaseLoader):
try:
doc_iterator = docs_source.lazy_load()
except NotImplementedError:
doc_iterator = iter(docs_source.load())
else:
doc_iterator = iter(docs_source)
source_id_assigner = _get_source_id_assigner(source_id_key)
index_start_dt = record_manager.get_time()
num_added = 0
num_skipped = 0
num_updated = 0
num_deleted = 0
for doc_batch in _batch(batch_size, doc_iterator):
hashed_docs = list(_deduplicate_in_order([_HashedDocument.
from_document(doc) for doc in doc_batch]))
source_ids: Sequence[Optional[str]] = [source_id_assigner(doc) for
doc in hashed_docs]
if cleanup == 'incremental':
for source_id, hashed_doc in zip(source_ids, hashed_docs):
if source_id is None:
raise ValueError(
f'Source ids are required when cleanup mode is incremental. Document that starts with content: {hashed_doc.page_content[:100]} was not assigned as source id.'
)
source_ids = cast(Sequence[str], source_ids)
exists_batch = record_manager.exists([doc.uid for doc in hashed_docs])
uids = []
docs_to_index = []
uids_to_refresh = []
seen_docs: Set[str] = set()
for hashed_doc, doc_exists in zip(hashed_docs, exists_batch):
if doc_exists:
if force_update:
seen_docs.add(hashed_doc.uid)
else:
uids_to_refresh.append(hashed_doc.uid)
continue
uids.append(hashed_doc.uid)
docs_to_index.append(hashed_doc.to_document())
if uids_to_refresh:
record_manager.update(uids_to_refresh, time_at_least=index_start_dt
)
num_skipped += len(uids_to_refresh)
if docs_to_index:
vector_store.add_documents(docs_to_index, ids=uids)
num_added += len(docs_to_index) - len(seen_docs)
num_updated += len(seen_docs)
record_manager.update([doc.uid for doc in hashed_docs], group_ids=
source_ids, time_at_least=index_start_dt)
if cleanup == 'incremental':
for source_id in source_ids:
if source_id is None:
raise AssertionError('Source ids cannot be None here.')
_source_ids = cast(Sequence[str], source_ids)
uids_to_delete = record_manager.list_keys(group_ids=_source_ids,
before=index_start_dt)
if uids_to_delete:
vector_store.delete(uids_to_delete)
record_manager.delete_keys(uids_to_delete)
num_deleted += len(uids_to_delete)
if cleanup == 'full':
while (uids_to_delete := record_manager.list_keys(before=
index_start_dt, limit=cleanup_batch_size)):
vector_store.delete(uids_to_delete)
record_manager.delete_keys(uids_to_delete)
num_deleted += len(uids_to_delete)
return {'num_added': num_added, 'num_updated': num_updated,
'num_skipped': num_skipped, 'num_deleted': num_deleted}
|
Index data from the loader into the vector store.
Indexing functionality uses a manager to keep track of which documents
are in the vector store.
This allows us to keep track of which documents were updated, and which
documents were deleted, which documents should be skipped.
For the time being, documents are indexed using their hashes, and users
are not able to specify the uid of the document.
IMPORTANT:
if auto_cleanup is set to True, the loader should be returning
the entire dataset, and not just a subset of the dataset.
Otherwise, the auto_cleanup will remove documents that it is not
supposed to.
Args:
docs_source: Data loader or iterable of documents to index.
record_manager: Timestamped set to keep track of which documents were
updated.
vector_store: Vector store to index the documents into.
batch_size: Batch size to use when indexing.
cleanup: How to handle clean up of documents.
- Incremental: Cleans up all documents that haven't been updated AND
that are associated with source ids that were seen
during indexing.
Clean up is done continuously during indexing helping
to minimize the probability of users seeing duplicated
content.
- Full: Delete all documents that haven to been returned by the loader.
Clean up runs after all documents have been indexed.
This means that users may see duplicated content during indexing.
- None: Do not delete any documents.
source_id_key: Optional key that helps identify the original source
of the document.
cleanup_batch_size: Batch size to use when cleaning up documents.
force_update: Force update documents even if they are present in the
record manager. Useful if you are re-indexing with updated embeddings.
Returns:
Indexing result which contains information about how many documents
were added, updated, deleted, or skipped.
|
_get_encoding_model
|
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
try:
encoding = tiktoken_.encoding_for_model('gpt-3.5-turbo-0301')
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
|
def _get_encoding_model(self) ->tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
try:
encoding = tiktoken_.encoding_for_model('gpt-3.5-turbo-0301')
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
| null |
get_user_agent
|
from langchain_community import __version__
return f'langchain-py-ms/{__version__}'
|
@staticmethod
def get_user_agent() ->str:
from langchain_community import __version__
return f'langchain-py-ms/{__version__}'
| null |
plan
|
"""Given input, decided what to do."""
inputs['tools'] = [f'{tool.name}: {tool.description}' for tool in inputs[
'hf_tools']]
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks
)
return self.output_parser.parse(llm_response, inputs['hf_tools'])
|
def plan(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any) ->Plan:
"""Given input, decided what to do."""
inputs['tools'] = [f'{tool.name}: {tool.description}' for tool in
inputs['hf_tools']]
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=
callbacks)
return self.output_parser.parse(llm_response, inputs['hf_tools'])
|
Given input, decided what to do.
|
_process_object_schema
|
from openapi_pydantic import Reference
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f'No properties found when processing object schema: {schema}')
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split('/')[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(cls.from_schema(schema=prop_schema, name=prop_name,
required=prop_name in required_props, spec=spec, references_used=
references_used))
return schema.type, properties
|
@classmethod
def _process_object_schema(cls, schema: Schema, spec: OpenAPISpec,
references_used: List[str]) ->Tuple[Union[str, List[str], None], List[
'APIRequestBodyProperty']]:
from openapi_pydantic import Reference
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f'No properties found when processing object schema: {schema}')
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split('/')[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(cls.from_schema(schema=prop_schema, name=
prop_name, required=prop_name in required_props, spec=spec,
references_used=references_used))
return schema.type, properties
| null |
__call__
|
"""Callable to load the combine documents chain."""
|
def __call__(self, llm: BaseLanguageModel, **kwargs: Any
) ->BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
|
Callable to load the combine documents chain.
|
_type
|
return 'api_requester'
|
@property
def _type(self) ->str:
return 'api_requester'
| null |
__init__
|
"""Initialize vearch vector store
flag 1 for cluster,0 for standalone
"""
try:
if flag:
import vearch_cluster
else:
import vearch
except ImportError:
raise ValueError(
'Could not import suitable python package. Please install it with `pip install vearch or vearch_cluster`.'
)
if flag:
if path_or_url is None:
raise ValueError('Please input url of cluster')
if not db_name:
db_name = self._DEFAULT_CLUSTER_DB_NAME
db_name += '_'
db_name += str(uuid.uuid4()).split('-')[-1]
self.using_db_name = db_name
self.url = path_or_url
self.vearch = vearch_cluster.VearchCluster(path_or_url)
else:
if path_or_url is None:
metadata_path = os.getcwd().replace('\\', '/')
else:
metadata_path = path_or_url
if not os.path.isdir(metadata_path):
os.makedirs(metadata_path)
log_path = os.path.join(metadata_path, 'log')
if not os.path.isdir(log_path):
os.makedirs(log_path)
self.vearch = vearch.Engine(metadata_path, log_path)
self.using_metapath = metadata_path
if not table_name:
table_name = self._DEFAULT_TABLE_NAME
table_name += '_'
table_name += str(uuid.uuid4()).split('-')[-1]
self.using_table_name = table_name
self.embedding_func = embedding_function
self.flag = flag
|
def __init__(self, embedding_function: Embeddings, path_or_url: Optional[
str]=None, table_name: str=_DEFAULT_TABLE_NAME, db_name: str=
_DEFAULT_CLUSTER_DB_NAME, flag: int=_DEFAULT_VERSION, **kwargs: Any
) ->None:
"""Initialize vearch vector store
flag 1 for cluster,0 for standalone
"""
try:
if flag:
import vearch_cluster
else:
import vearch
except ImportError:
raise ValueError(
'Could not import suitable python package. Please install it with `pip install vearch or vearch_cluster`.'
)
if flag:
if path_or_url is None:
raise ValueError('Please input url of cluster')
if not db_name:
db_name = self._DEFAULT_CLUSTER_DB_NAME
db_name += '_'
db_name += str(uuid.uuid4()).split('-')[-1]
self.using_db_name = db_name
self.url = path_or_url
self.vearch = vearch_cluster.VearchCluster(path_or_url)
else:
if path_or_url is None:
metadata_path = os.getcwd().replace('\\', '/')
else:
metadata_path = path_or_url
if not os.path.isdir(metadata_path):
os.makedirs(metadata_path)
log_path = os.path.join(metadata_path, 'log')
if not os.path.isdir(log_path):
os.makedirs(log_path)
self.vearch = vearch.Engine(metadata_path, log_path)
self.using_metapath = metadata_path
if not table_name:
table_name = self._DEFAULT_TABLE_NAME
table_name += '_'
table_name += str(uuid.uuid4()).split('-')[-1]
self.using_table_name = table_name
self.embedding_func = embedding_function
self.flag = flag
|
Initialize vearch vector store
flag 1 for cluster,0 for standalone
|
wait_for_indexing
|
"""Wait for the search index to contain a certain number of
documents. Useful in tests.
"""
start = time.time()
while True:
r = self._client.data().search_table(self._table_name, payload={'query':
'', 'page': {'size': 0}})
if r.status_code != 200:
raise Exception(f'Error running search: {r.status_code} {r}')
if r['totalCount'] == ndocs:
break
if time.time() - start > timeout:
raise Exception('Timed out waiting for indexing to complete.')
time.sleep(0.5)
|
def wait_for_indexing(self, timeout: float=5, ndocs: int=1) ->None:
"""Wait for the search index to contain a certain number of
documents. Useful in tests.
"""
start = time.time()
while True:
r = self._client.data().search_table(self._table_name, payload={
'query': '', 'page': {'size': 0}})
if r.status_code != 200:
raise Exception(f'Error running search: {r.status_code} {r}')
if r['totalCount'] == ndocs:
break
if time.time() - start > timeout:
raise Exception('Timed out waiting for indexing to complete.')
time.sleep(0.5)
|
Wait for the search index to contain a certain number of
documents. Useful in tests.
|
_type
|
return 'simple_json_output_parser'
|
@property
def _type(self) ->str:
return 'simple_json_output_parser'
| null |
prepare
|
project_name = project_name or name_generation.random_name()
wrapped_model, project, dataset, examples = _prepare_eval_run(client,
dataset_name, llm_or_chain_factory, project_name, project_metadata=
project_metadata, tags=tags)
tags = tags or []
for k, v in (project.metadata.get('git') or {}).items():
tags.append(f'git:{k}={v}')
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory)
run_evaluators = _setup_evaluation(wrapped_model, examples, evaluation,
dataset.data_type or DataType.kv)
_validate_example_inputs(examples[0], wrapped_model, input_mapper)
progress_bar = progress.ProgressBarCallback(len(examples))
configs = [RunnableConfig(callbacks=[LangChainTracer(project_name=project.
name, client=client, use_threading=False, example_id=example.id),
EvaluatorCallbackHandler(evaluators=run_evaluators or [], client=client,
example_id=example.id, max_concurrency=0), progress_bar], tags=tags,
max_concurrency=concurrency_level) for example in examples]
return cls(client=client, project=project, wrapped_model=wrapped_model,
examples=examples, configs=configs)
|
@classmethod
def prepare(cls, client: Client, dataset_name: str, llm_or_chain_factory:
MODEL_OR_CHAIN_FACTORY, project_name: Optional[str], evaluation:
Optional[smith_eval.RunEvalConfig]=None, tags: Optional[List[str]]=None,
input_mapper: Optional[Callable[[Dict], Any]]=None, concurrency_level:
int=5, project_metadata: Optional[Dict[str, Any]]=None
) ->_DatasetRunContainer:
project_name = project_name or name_generation.random_name()
wrapped_model, project, dataset, examples = _prepare_eval_run(client,
dataset_name, llm_or_chain_factory, project_name, project_metadata=
project_metadata, tags=tags)
tags = tags or []
for k, v in (project.metadata.get('git') or {}).items():
tags.append(f'git:{k}={v}')
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory)
run_evaluators = _setup_evaluation(wrapped_model, examples, evaluation,
dataset.data_type or DataType.kv)
_validate_example_inputs(examples[0], wrapped_model, input_mapper)
progress_bar = progress.ProgressBarCallback(len(examples))
configs = [RunnableConfig(callbacks=[LangChainTracer(project_name=
project.name, client=client, use_threading=False, example_id=
example.id), EvaluatorCallbackHandler(evaluators=run_evaluators or
[], client=client, example_id=example.id, max_concurrency=0),
progress_bar], tags=tags, max_concurrency=concurrency_level) for
example in examples]
return cls(client=client, project=project, wrapped_model=wrapped_model,
examples=examples, configs=configs)
| null |
_get_docs
|
docs = [Document(page_content=doc['snippet'], metadata=doc) for doc in
response.generation_info['documents']]
docs.append(Document(page_content=response.message.content, metadata={
'type': 'model_response', 'citations': response.generation_info[
'citations'], 'search_results': response.generation_info[
'search_results'], 'search_queries': response.generation_info[
'search_queries'], 'token_count': response.generation_info['token_count']})
)
return docs
|
def _get_docs(response: Any) ->List[Document]:
docs = [Document(page_content=doc['snippet'], metadata=doc) for doc in
response.generation_info['documents']]
docs.append(Document(page_content=response.message.content, metadata={
'type': 'model_response', 'citations': response.generation_info[
'citations'], 'search_results': response.generation_info[
'search_results'], 'search_queries': response.generation_info[
'search_queries'], 'token_count': response.generation_info[
'token_count']}))
return docs
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Embeddings:
return self._embedding
| null |
_chain_type
|
raise NotImplementedError
|
@property
def _chain_type(self) ->str:
raise NotImplementedError
| null |
test_clarifai_call
|
"""Test valid call to clarifai."""
llm = Clarifai(user_id='google-research', app_id='summarization', model_id=
'text-summarization-english-pegasus')
output = llm(
'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in compression but linear, rigid, and load-bearing in tension. A chain may consist of two or more links.'
)
assert isinstance(output, str)
assert llm._llm_type == 'clarifai'
assert llm.model_id == 'text-summarization-english-pegasus'
|
def test_clarifai_call() ->None:
"""Test valid call to clarifai."""
llm = Clarifai(user_id='google-research', app_id='summarization',
model_id='text-summarization-english-pegasus')
output = llm(
'A chain is a serial assembly of connected pieces, called links, typically made of metal, with an overall character similar to that of a rope in that it is flexible and curved in compression but linear, rigid, and load-bearing in tension. A chain may consist of two or more links.'
)
assert isinstance(output, str)
assert llm._llm_type == 'clarifai'
assert llm.model_id == 'text-summarization-english-pegasus'
|
Test valid call to clarifai.
|
test_runnable_lambda_stream_with_callbacks
|
"""Test that stream works for RunnableLambda when using callbacks."""
tracer = FakeTracer()
llm_res = "i'm a textbot"
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
config: RunnableConfig = {'callbacks': [tracer]}
assert list(RunnableLambda(lambda x: llm).stream('', config=config)) == list(
llm_res)
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {'output': llm_res}
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
with pytest.raises(ValueError):
for _ in RunnableLambda(raise_value_error).stream(1000, config=config):
pass
assert len(tracer.runs) == 2
assert "ValueError('x is too large')" in str(tracer.runs[1].error)
assert tracer.runs[1].outputs is None
|
def test_runnable_lambda_stream_with_callbacks() ->None:
"""Test that stream works for RunnableLambda when using callbacks."""
tracer = FakeTracer()
llm_res = "i'm a textbot"
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
config: RunnableConfig = {'callbacks': [tracer]}
assert list(RunnableLambda(lambda x: llm).stream('', config=config)
) == list(llm_res)
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {'output': llm_res}
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
with pytest.raises(ValueError):
for _ in RunnableLambda(raise_value_error).stream(1000, config=config):
pass
assert len(tracer.runs) == 2
assert "ValueError('x is too large')" in str(tracer.runs[1].error)
assert tracer.runs[1].outputs is None
|
Test that stream works for RunnableLambda when using callbacks.
|
_import_alibaba_cloud_open_search_settings
|
from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearchSettings
return AlibabaCloudOpenSearchSettings
|
def _import_alibaba_cloud_open_search_settings() ->Any:
from langchain_community.vectorstores.alibabacloud_opensearch import AlibabaCloudOpenSearchSettings
return AlibabaCloudOpenSearchSettings
| null |
__init__
|
return super().__init__(variable_name=variable_name, optional=optional, **
kwargs)
|
def __init__(self, variable_name: str, *, optional: bool=False, **kwargs: Any):
return super().__init__(variable_name=variable_name, optional=optional,
**kwargs)
| null |
test_simple_context_str_no_emb
|
expected = [{'a_namespace': 'test'}]
assert base.embed('test', MockEncoder(), 'a_namespace') == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_simple_context_str_no_emb() ->None:
expected = [{'a_namespace': 'test'}]
assert base.embed('test', MockEncoder(), 'a_namespace') == expected
| null |
_agent_type
|
raise NotImplementedError
|
@property
def _agent_type(self) ->str:
raise NotImplementedError
| null |
test_cosine_similarity_top_k
|
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2), (2, 0)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579, 0.5976143]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
|
def test_cosine_similarity_top_k(X: List[List[float]], Y: List[List[float]]
) ->None:
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2), (2, 0)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579, 0.5976143]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
| null |
_format_output
|
if self.feature == 'text':
return output[self.provider]['generated_text']
else:
return output[self.provider]['items'][0]['image']
|
def _format_output(self, output: dict) ->str:
if self.feature == 'text':
return output[self.provider]['generated_text']
else:
return output[self.provider]['items'][0]['image']
| null |
test_chroma_legacy_batching
|
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection('my_collection', embedding_function=
embedding_function.embed_documents)
docs = ['This is a test document'] * 100
Chroma.from_texts(client=client, collection_name=col.name, texts=docs,
embedding=embedding_function, ids=[str(uuid.uuid4()) for _ in range(len
(docs))])
|
@pytest.mark.requires('chromadb')
@pytest.mark.skipif(not is_api_accessible(
'http://localhost:8000/api/v1/heartbeat'), reason='API not accessible')
@pytest.mark.skipif(batch_support_chroma_version(), reason=
'ChromaDB version does not support batching')
def test_chroma_legacy_batching() ->None:
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection('my_collection',
embedding_function=embedding_function.embed_documents)
docs = ['This is a test document'] * 100
Chroma.from_texts(client=client, collection_name=col.name, texts=docs,
embedding=embedding_function, ids=[str(uuid.uuid4()) for _ in range
(len(docs))])
| null |
_run
|
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.delete(data['url'])
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip()
|
def _run(self, text: str) ->str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
response = self.requests_wrapper.delete(data['url'])
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip()
| null |
check_code
|
v = strip_python_markdown_tags(v).strip()
try:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(v)
temp_file_path = temp_file.name
try:
format_black(temp_file_path)
format_ruff(temp_file_path)
except subprocess.CalledProcessError:
pass
with open(temp_file_path, 'r') as temp_file:
v = temp_file.read()
complaints = dict(ruff=None, mypy=None)
try:
check_ruff(temp_file_path)
except subprocess.CalledProcessError as e:
complaints['ruff'] = e.output
try:
check_mypy(temp_file_path)
except subprocess.CalledProcessError as e:
complaints['mypy'] = e.output
if any(complaints.values()):
code_str = f'```{temp_file_path}\n{v}```'
error_messages = [f'```{key}\n{value}```' for key, value in
complaints.items() if value]
raise ValueError('\n\n'.join([code_str] + error_messages))
finally:
os.remove(temp_file_path)
return v
|
@validator('code')
@classmethod
def check_code(cls, v: str) ->str:
v = strip_python_markdown_tags(v).strip()
try:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
temp_file.write(v)
temp_file_path = temp_file.name
try:
format_black(temp_file_path)
format_ruff(temp_file_path)
except subprocess.CalledProcessError:
pass
with open(temp_file_path, 'r') as temp_file:
v = temp_file.read()
complaints = dict(ruff=None, mypy=None)
try:
check_ruff(temp_file_path)
except subprocess.CalledProcessError as e:
complaints['ruff'] = e.output
try:
check_mypy(temp_file_path)
except subprocess.CalledProcessError as e:
complaints['mypy'] = e.output
if any(complaints.values()):
code_str = f'```{temp_file_path}\n{v}```'
error_messages = [f'```{key}\n{value}```' for key, value in
complaints.items() if value]
raise ValueError('\n\n'.join([code_str] + error_messages))
finally:
os.remove(temp_file_path)
return v
| null |
similarity_search
|
"""
Return the documents that are semantically most relevant to the query.
Args:
query (str): String to query the vectorstore with.
k (Optional[int]): Number of documents to return. Defaults to 4.
collection_name (Optional[str]): Collection to use.
Defaults to "langchain_store" or the one provided before.
Returns:
List of documents that are semantically most relevant to the query
"""
if not collection_name:
collection_name = self._collection_name
query_vector = self._embeddings.embed_query(query)
status_code, response = self._client.query(table_name=collection_name,
query_field='embeddings', query_vector=query_vector, limit=k)
if status_code != 200:
logger.error(f"Search failed: {response['message']}.")
raise Exception('Error: {}.'.format(response['message']))
exclude_keys = ['id', 'text', 'embeddings']
return list(map(lambda item: Document(page_content=item['text'], metadata={
key: item[key] for key in item if key not in exclude_keys}), response[
'result']))
|
def similarity_search(self, query: str, k: int=4, collection_name: str='',
**kwargs: Any) ->List[Document]:
"""
Return the documents that are semantically most relevant to the query.
Args:
query (str): String to query the vectorstore with.
k (Optional[int]): Number of documents to return. Defaults to 4.
collection_name (Optional[str]): Collection to use.
Defaults to "langchain_store" or the one provided before.
Returns:
List of documents that are semantically most relevant to the query
"""
if not collection_name:
collection_name = self._collection_name
query_vector = self._embeddings.embed_query(query)
status_code, response = self._client.query(table_name=collection_name,
query_field='embeddings', query_vector=query_vector, limit=k)
if status_code != 200:
logger.error(f"Search failed: {response['message']}.")
raise Exception('Error: {}.'.format(response['message']))
exclude_keys = ['id', 'text', 'embeddings']
return list(map(lambda item: Document(page_content=item['text'],
metadata={key: item[key] for key in item if key not in exclude_keys
}), response['result']))
|
Return the documents that are semantically most relevant to the query.
Args:
query (str): String to query the vectorstore with.
k (Optional[int]): Number of documents to return. Defaults to 4.
collection_name (Optional[str]): Collection to use.
Defaults to "langchain_store" or the one provided before.
Returns:
List of documents that are semantically most relevant to the query
|
test_quip_loader_load_data_all_folder
|
mock_quip.get_authenticated_user.side_effect = [self.
_get_mock_authenticated_user()]
mock_quip.get_folder.side_effect = [self._get_mock_folder(self.
MOCK_FOLDER_IDS[0])]
mock_quip.get_thread.side_effect = [self._get_mock_thread(self.
MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(include_all_folders=True)
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].metadata.get('source'
) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[0]}'
assert documents[1].metadata.get('source'
) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[1]}'
|
def test_quip_loader_load_data_all_folder(self, mock_quip: MagicMock) ->None:
mock_quip.get_authenticated_user.side_effect = [self.
_get_mock_authenticated_user()]
mock_quip.get_folder.side_effect = [self._get_mock_folder(self.
MOCK_FOLDER_IDS[0])]
mock_quip.get_thread.side_effect = [self._get_mock_thread(self.
MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])]
quip_loader = self._get_mock_quip_loader(mock_quip)
documents = quip_loader.load(include_all_folders=True)
assert mock_quip.get_folder.call_count == 1
assert mock_quip.get_thread.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].metadata.get('source'
) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[0]}'
assert documents[1].metadata.get('source'
) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[1]}'
| null |
test_azure_openai_embedding_query
|
"""Test openai embeddings."""
document = 'foo bar'
embedding = _get_embeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
|
@pytest.mark.scheduled
def test_azure_openai_embedding_query() ->None:
"""Test openai embeddings."""
document = 'foo bar'
embedding = _get_embeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
|
Test openai embeddings.
|
texts
|
return ['foo', 'bar', 'baz']
|
@pytest.fixture
def texts() ->List[str]:
return ['foo', 'bar', 'baz']
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
_import_docarray_hnsw
|
from langchain_community.vectorstores.docarray import DocArrayHnswSearch
return DocArrayHnswSearch
|
def _import_docarray_hnsw() ->Any:
from langchain_community.vectorstores.docarray import DocArrayHnswSearch
return DocArrayHnswSearch
| null |
_import_gmail_GmailSearch
|
from langchain_community.tools.gmail import GmailSearch
return GmailSearch
|
def _import_gmail_GmailSearch() ->Any:
from langchain_community.tools.gmail import GmailSearch
return GmailSearch
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(values, 'beam_client_id',
'BEAM_CLIENT_ID')
beam_client_secret = get_from_dict_or_env(values, 'beam_client_secret',
'BEAM_CLIENT_SECRET')
values['beam_client_id'] = beam_client_id
values['beam_client_secret'] = beam_client_secret
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
beam_client_id = get_from_dict_or_env(values, 'beam_client_id',
'BEAM_CLIENT_ID')
beam_client_secret = get_from_dict_or_env(values, 'beam_client_secret',
'BEAM_CLIENT_SECRET')
values['beam_client_id'] = beam_client_id
values['beam_client_secret'] = beam_client_secret
return values
|
Validate that api key and python package exists in environment.
|
embed_documents
|
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i:i + _chunk_size])
results.extend(response)
return results
|
def embed_documents(self, texts: List[str], chunk_size: int=64) ->List[List
[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i:i + _chunk_size])
results.extend(response)
return results
|
Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
|
lazy_load
|
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError('Please using `pip install bce-python-sdk`' +
' before import bos related package.')
client = BosClient(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f'{temp_dir}/{self.bucket}/{self.key}'
os.makedirs(os.path.dirname(file_path), exist_ok=True)
logger.debug(f'get object key {self.key} to file {file_path}')
client.get_object_to_file(self.bucket, self.key, file_path)
try:
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
return iter(documents)
except Exception as ex:
logger.error(f'load document error = {ex}')
return iter([Document(page_content='')])
|
def lazy_load(self) ->Iterator[Document]:
"""Load documents."""
try:
from baidubce.services.bos.bos_client import BosClient
except ImportError:
raise ImportError('Please using `pip install bce-python-sdk`' +
' before import bos related package.')
client = BosClient(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f'{temp_dir}/{self.bucket}/{self.key}'
os.makedirs(os.path.dirname(file_path), exist_ok=True)
logger.debug(f'get object key {self.key} to file {file_path}')
client.get_object_to_file(self.bucket, self.key, file_path)
try:
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
return iter(documents)
except Exception as ex:
logger.error(f'load document error = {ex}')
return iter([Document(page_content='')])
|
Load documents.
|
from_template
|
"""Create a chat prompt template from a template string.
Creates a chat template consisting of a single message assumed to be from
the human.
Args:
template: template string
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt_template = PromptTemplate.from_template(template, **kwargs)
message = HumanMessagePromptTemplate(prompt=prompt_template)
return cls.from_messages([message])
|
@classmethod
def from_template(cls, template: str, **kwargs: Any) ->ChatPromptTemplate:
"""Create a chat prompt template from a template string.
Creates a chat template consisting of a single message assumed to be from
the human.
Args:
template: template string
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
"""
prompt_template = PromptTemplate.from_template(template, **kwargs)
message = HumanMessagePromptTemplate(prompt=prompt_template)
return cls.from_messages([message])
|
Create a chat prompt template from a template string.
Creates a chat template consisting of a single message assumed to be from
the human.
Args:
template: template string
**kwargs: keyword arguments to pass to the constructor.
Returns:
A new instance of this class.
|
validate_environment
|
"""Dont do anything if client provided externally"""
if values.get('client') is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
values['client'] = session.client('sagemaker-runtime', region_name=
values['region_name'])
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Dont do anything if client provided externally"""
if values.get('client') is not None:
return values
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values['credentials_profile_name'] is not None:
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
values['client'] = session.client('sagemaker-runtime',
region_name=values['region_name'])
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
return values
|
Dont do anything if client provided externally
|
test__convert_dict_to_message_other_role
|
message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_other_role() ->None:
message_dict = {'role': 'system', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = ChatMessage(role='system', content='foo')
assert result == expected_output
| null |
on_retriever_end_common
|
self.ends += 1
self.retriever_ends += 1
|
def on_retriever_end_common(self) ->None:
self.ends += 1
self.retriever_ends += 1
| null |
_import_redis
|
from langchain_community.vectorstores.redis import Redis
return Redis
|
def _import_redis() ->Any:
from langchain_community.vectorstores.redis import Redis
return Redis
| null |
shim_docstore
|
byte_store = values.get('byte_store')
docstore = values.get('docstore')
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception('You must pass a `byte_store` parameter.')
values['docstore'] = docstore
return values
|
@root_validator(pre=True)
def shim_docstore(cls, values: Dict) ->Dict:
byte_store = values.get('byte_store')
docstore = values.get('docstore')
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception('You must pass a `byte_store` parameter.')
values['docstore'] = docstore
return values
| null |
current_path
|
"""Return the thoughts path."""
return self.stack[:]
|
def current_path(self) ->List[Thought]:
"""Return the thoughts path."""
return self.stack[:]
|
Return the thoughts path.
|
_import_volcengine_maas
|
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
|
def _import_volcengine_maas() ->Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
| null |
BasedOn
|
return _BasedOn(anything)
|
def BasedOn(anything: Any) ->_BasedOn:
return _BasedOn(anything)
| null |
llm
|
return False
|
@property
def llm(self) ->bool:
return False
| null |
fake_erroring_retriever_v2
|
return FakeRetrieverV2(throw_error=True)
|
@pytest.fixture
def fake_erroring_retriever_v2() ->BaseRetriever:
return FakeRetrieverV2(throw_error=True)
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale
from pinecone_text.sparse.base_sparse_encoder import BaseSparseEncoder
except ImportError:
raise ImportError(
'Could not import pinecone_text python package. Please install it with `pip install pinecone_text`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale
from pinecone_text.sparse.base_sparse_encoder import BaseSparseEncoder
except ImportError:
raise ImportError(
'Could not import pinecone_text python package. Please install it with `pip install pinecone_text`.'
)
return values
|
Validate that api key and python package exists in environment.
|
_generate
|
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': False}
response = completion_with_retry(self, messages=message_dicts,
models_priority_list=self.models_priority_list, run_manager=run_manager,
**params)
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': False}
response = completion_with_retry(self, messages=message_dicts,
models_priority_list=self.models_priority_list, run_manager=
run_manager, **params)
return self._create_chat_result(response)
| null |
test_pgvector_embeddings
|
"""Test end to end construction with embeddings and search."""
texts = ['foo', 'bar', 'baz']
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = PGVector.from_embeddings(text_embeddings=text_embedding_pairs,
collection_name='test_collection', embedding=
FakeEmbeddingsWithAdaDimension(), connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_pgvector_embeddings() ->None:
"""Test end to end construction with embeddings and search."""
texts = ['foo', 'bar', 'baz']
text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = PGVector.from_embeddings(text_embeddings=
text_embedding_pairs, collection_name='test_collection', embedding=
FakeEmbeddingsWithAdaDimension(), connection_string=
CONNECTION_STRING, pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction with embeddings and search.
|
_import_ctranslate2
|
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
|
def _import_ctranslate2() ->Any:
from langchain_community.llms.ctranslate2 import CTranslate2
return CTranslate2
| null |
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
query=query)
return docs
|
def similarity_search_with_score(self, query: str, k: int=4) ->List[Tuple[
Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, query=query)
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
|
input_keys
|
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ['prediction', 'prediction_b']
|
@property
def input_keys(self) ->List[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ['prediction', 'prediction_b']
|
Get the input keys.
Returns:
List[str]: The input keys.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.