method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_import_ctransformers
|
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
|
def _import_ctransformers() ->Any:
from langchain_community.llms.ctransformers import CTransformers
return CTransformers
| null |
_get_relevant_documents
|
processed_query = self.preprocess_func(query)
return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=self.k)
return return_docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
processed_query = self.preprocess_func(query)
return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=
self.k)
return return_docs
| null |
_on_llm_start
|
crumbs = self.get_breadcrumbs(run)
inputs = {'prompts': [p.strip() for p in run.inputs['prompts']]
} if 'prompts' in run.inputs else run.inputs
self.function_callback(f"{get_colored_text('[llm/start]', color='green')} " +
get_bolded_text(f"""[{crumbs}] Entering LLM run with input:
""") +
f"{try_json_stringify(inputs, '[inputs]')}")
|
def _on_llm_start(self, run: Run) ->None:
crumbs = self.get_breadcrumbs(run)
inputs = {'prompts': [p.strip() for p in run.inputs['prompts']]
} if 'prompts' in run.inputs else run.inputs
self.function_callback(
f"{get_colored_text('[llm/start]', color='green')} " +
get_bolded_text(f"""[{crumbs}] Entering LLM run with input:
""") +
f"{try_json_stringify(inputs, '[inputs]')}")
| null |
validate_environment
|
"""Validate that credentials and python package exists in environment."""
values['url'] = convert_to_secret_str(get_from_dict_or_env(values, 'url',
'WATSONX_URL'))
if 'cloud.ibm.com' in values.get('url', '').get_secret_value():
values['apikey'] = convert_to_secret_str(get_from_dict_or_env(values,
'apikey', 'WATSONX_APIKEY'))
else:
if not values['token'
] and 'WATSONX_TOKEN' not in os.environ and not values['password'
] and 'WATSONX_PASSWORD' not in os.environ and not values['apikey'
] and 'WATSONX_APIKEY' not in os.environ:
raise ValueError(
"Did not find 'token', 'password' or 'apikey', please add an environment variable `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' which contains it, or pass 'token', 'password' or 'apikey' as a named parameter."
)
elif values['token'] or 'WATSONX_TOKEN' in os.environ:
values['token'] = convert_to_secret_str(get_from_dict_or_env(values,
'token', 'WATSONX_TOKEN'))
elif values['password'] or 'WATSONX_PASSWORD' in os.environ:
values['password'] = convert_to_secret_str(get_from_dict_or_env(
values, 'password', 'WATSONX_PASSWORD'))
values['username'] = convert_to_secret_str(get_from_dict_or_env(
values, 'username', 'WATSONX_USERNAME'))
elif values['apikey'] or 'WATSONX_APIKEY' in os.environ:
values['apikey'] = convert_to_secret_str(get_from_dict_or_env(
values, 'apikey', 'WATSONX_APIKEY'))
values['username'] = convert_to_secret_str(get_from_dict_or_env(
values, 'username', 'WATSONX_USERNAME'))
if not values['instance_id'] or 'WATSONX_INSTANCE_ID' not in os.environ:
values['instance_id'] = convert_to_secret_str(get_from_dict_or_env(
values, 'instance_id', 'WATSONX_INSTANCE_ID'))
try:
from ibm_watsonx_ai.foundation_models import ModelInference
credentials = {'url': values['url'].get_secret_value() if values['url']
else None, 'apikey': values['apikey'].get_secret_value() if values
['apikey'] else None, 'token': values['token'].get_secret_value() if
values['token'] else None, 'password': values['password'].
get_secret_value() if values['password'] else None, 'username':
values['username'].get_secret_value() if values['username'] else
None, 'instance_id': values['instance_id'].get_secret_value() if
values['instance_id'] else None, 'version': values['version'].
get_secret_value() if values['version'] else None}
credentials_without_none_value = {key: value for key, value in
credentials.items() if value is not None}
watsonx_model = ModelInference(model_id=values['model_id'],
deployment_id=values['deployment_id'], credentials=
credentials_without_none_value, params=values['params'], project_id
=values['project_id'], space_id=values['space_id'], verify=values[
'verify'])
values['watsonx_model'] = watsonx_model
except ImportError:
raise ImportError(
'Could not import ibm_watsonx_ai python package. Please install it with `pip install ibm_watsonx_ai`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that credentials and python package exists in environment."""
values['url'] = convert_to_secret_str(get_from_dict_or_env(values,
'url', 'WATSONX_URL'))
if 'cloud.ibm.com' in values.get('url', '').get_secret_value():
values['apikey'] = convert_to_secret_str(get_from_dict_or_env(
values, 'apikey', 'WATSONX_APIKEY'))
else:
if not values['token'
] and 'WATSONX_TOKEN' not in os.environ and not values['password'
] and 'WATSONX_PASSWORD' not in os.environ and not values['apikey'
] and 'WATSONX_APIKEY' not in os.environ:
raise ValueError(
"Did not find 'token', 'password' or 'apikey', please add an environment variable `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' which contains it, or pass 'token', 'password' or 'apikey' as a named parameter."
)
elif values['token'] or 'WATSONX_TOKEN' in os.environ:
values['token'] = convert_to_secret_str(get_from_dict_or_env(
values, 'token', 'WATSONX_TOKEN'))
elif values['password'] or 'WATSONX_PASSWORD' in os.environ:
values['password'] = convert_to_secret_str(get_from_dict_or_env
(values, 'password', 'WATSONX_PASSWORD'))
values['username'] = convert_to_secret_str(get_from_dict_or_env
(values, 'username', 'WATSONX_USERNAME'))
elif values['apikey'] or 'WATSONX_APIKEY' in os.environ:
values['apikey'] = convert_to_secret_str(get_from_dict_or_env(
values, 'apikey', 'WATSONX_APIKEY'))
values['username'] = convert_to_secret_str(get_from_dict_or_env
(values, 'username', 'WATSONX_USERNAME'))
if not values['instance_id'
] or 'WATSONX_INSTANCE_ID' not in os.environ:
values['instance_id'] = convert_to_secret_str(get_from_dict_or_env
(values, 'instance_id', 'WATSONX_INSTANCE_ID'))
try:
from ibm_watsonx_ai.foundation_models import ModelInference
credentials = {'url': values['url'].get_secret_value() if values[
'url'] else None, 'apikey': values['apikey'].get_secret_value() if
values['apikey'] else None, 'token': values['token'].
get_secret_value() if values['token'] else None, 'password':
values['password'].get_secret_value() if values['password'] else
None, 'username': values['username'].get_secret_value() if
values['username'] else None, 'instance_id': values[
'instance_id'].get_secret_value() if values['instance_id'] else
None, 'version': values['version'].get_secret_value() if values
['version'] else None}
credentials_without_none_value = {key: value for key, value in
credentials.items() if value is not None}
watsonx_model = ModelInference(model_id=values['model_id'],
deployment_id=values['deployment_id'], credentials=
credentials_without_none_value, params=values['params'],
project_id=values['project_id'], space_id=values['space_id'],
verify=values['verify'])
values['watsonx_model'] = watsonx_model
except ImportError:
raise ImportError(
'Could not import ibm_watsonx_ai python package. Please install it with `pip install ibm_watsonx_ai`.'
)
return values
|
Validate that credentials and python package exists in environment.
|
test_base_blob_parser
|
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazy parsing interface."""
yield Document(page_content='foo')
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data='who?')), Iterator)
docs = parser.parse(Blob(data='who?'))
assert len(docs) == 1
assert docs[0].page_content == 'foo'
|
def test_base_blob_parser() ->None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazy parsing interface."""
yield Document(page_content='foo')
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data='who?')), Iterator)
docs = parser.parse(Blob(data='who?'))
assert len(docs) == 1
assert docs[0].page_content == 'foo'
|
Verify that the eager method is hooked up to the lazy method by default.
|
process_page
|
if keep_markdown_format:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
'`markdownify` package not found, please run `pip install markdownify`'
)
if include_comments or not keep_markdown_format:
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'`beautifulsoup4` package not found, please run `pip install beautifulsoup4`'
)
if include_attachments:
attachment_texts = self.process_attachment(page['id'], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
if keep_markdown_format:
text = markdownify(content, heading_style='ATX') + ''.join(attachment_texts
)
elif keep_newlines:
text = BeautifulSoup(content.replace('</p>', '\n</p>').replace('<br />',
'\n'), 'lxml').get_text(' ') + ''.join(attachment_texts)
else:
text = BeautifulSoup(content, 'lxml').get_text(' ', strip=True) + ''.join(
attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(page['id'], expand=
'body.view.value', depth='all')['results']
comment_texts = [BeautifulSoup(comment['body']['view']['value'], 'lxml'
).get_text(' ', strip=True) for comment in comments]
text = text + ''.join(comment_texts)
metadata = {'title': page['title'], 'id': page['id'], 'source': self.
base_url.strip('/') + page['_links']['webui']}
if 'version' in page and 'when' in page['version']:
metadata['when'] = page['version']['when']
return Document(page_content=text, metadata=metadata)
|
def process_page(self, page: dict, include_attachments: bool,
include_comments: bool, content_format: ContentFormat, ocr_languages:
Optional[str]=None, keep_markdown_format: Optional[bool]=False,
keep_newlines: bool=False) ->Document:
if keep_markdown_format:
try:
from markdownify import markdownify
except ImportError:
raise ImportError(
'`markdownify` package not found, please run `pip install markdownify`'
)
if include_comments or not keep_markdown_format:
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'`beautifulsoup4` package not found, please run `pip install beautifulsoup4`'
)
if include_attachments:
attachment_texts = self.process_attachment(page['id'], ocr_languages)
else:
attachment_texts = []
content = content_format.get_content(page)
if keep_markdown_format:
text = markdownify(content, heading_style='ATX') + ''.join(
attachment_texts)
elif keep_newlines:
text = BeautifulSoup(content.replace('</p>', '\n</p>').replace(
'<br />', '\n'), 'lxml').get_text(' ') + ''.join(attachment_texts)
else:
text = BeautifulSoup(content, 'lxml').get_text(' ', strip=True
) + ''.join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(page['id'], expand=
'body.view.value', depth='all')['results']
comment_texts = [BeautifulSoup(comment['body']['view']['value'],
'lxml').get_text(' ', strip=True) for comment in comments]
text = text + ''.join(comment_texts)
metadata = {'title': page['title'], 'id': page['id'], 'source': self.
base_url.strip('/') + page['_links']['webui']}
if 'version' in page and 'when' in page['version']:
metadata['when'] = page['version']['when']
return Document(page_content=text, metadata=metadata)
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'expr': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'expr': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
start_run
|
"""To start a new run, auto generates the random suffix for name"""
if name.endswith('-%'):
rname = ''.join(random.choices(string.ascii_uppercase + string.digits, k=7)
)
name = name.replace('%', rname)
self.run = self.mlflow.MlflowClient().create_run(self.mlf_expid, run_name=
name, tags=tags)
|
def start_run(self, name: str, tags: Dict[str, str]) ->None:
"""To start a new run, auto generates the random suffix for name"""
if name.endswith('-%'):
rname = ''.join(random.choices(string.ascii_uppercase + string.
digits, k=7))
name = name.replace('%', rname)
self.run = self.mlflow.MlflowClient().create_run(self.mlf_expid,
run_name=name, tags=tags)
|
To start a new run, auto generates the random suffix for name
|
full_key_prefix
|
return f'{self.key_prefix}:{self.session_id}'
|
@property
def full_key_prefix(self) ->str:
return f'{self.key_prefix}:{self.session_id}'
| null |
_url_to_b64_string
|
b64_template = 'data:image/png;base64,{b64_string}'
try:
if _is_url(image_source):
response = requests.get(image_source)
response.raise_for_status()
encoded = base64.b64encode(response.content).decode('utf-8')
return b64_template.format(b64_string=encoded)
elif _is_b64(image_source):
return image_source
elif os.path.exists(image_source):
with open(image_source, 'rb') as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
return b64_template.format(b64_string=encoded)
else:
raise ValueError(
'The provided string is not a valid URL, base64, or file path.')
except Exception as e:
raise ValueError(f'Unable to process the provided image source: {e}')
|
def _url_to_b64_string(image_source: str) ->str:
b64_template = 'data:image/png;base64,{b64_string}'
try:
if _is_url(image_source):
response = requests.get(image_source)
response.raise_for_status()
encoded = base64.b64encode(response.content).decode('utf-8')
return b64_template.format(b64_string=encoded)
elif _is_b64(image_source):
return image_source
elif os.path.exists(image_source):
with open(image_source, 'rb') as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
return b64_template.format(b64_string=encoded)
else:
raise ValueError(
'The provided string is not a valid URL, base64, or file path.'
)
except Exception as e:
raise ValueError(f'Unable to process the provided image source: {e}')
| null |
test_load_converts_dataframe_columns_to_document_metadata
|
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata['author'] == sample_data_frame.loc[i, 'author']
assert doc.metadata['date'] == sample_data_frame.loc[i, 'date']
|
def test_load_converts_dataframe_columns_to_document_metadata(sample_data_frame
: pd.DataFrame) ->None:
loader = DataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata['author'] == sample_data_frame.loc[i, 'author']
assert doc.metadata['date'] == sample_data_frame.loc[i, 'date']
| null |
create_conversational_retrieval_agent
|
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(memory_key=memory_key, llm=
llm, max_token_limit=max_token_limit)
else:
memory = ConversationTokenBufferMemory(memory_key=memory_key,
return_messages=True, output_key='output', llm=llm, max_token_limit
=max_token_limit)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)])
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=
verbose, return_intermediate_steps=remember_intermediate_steps, **kwargs)
|
def create_conversational_retrieval_agent(llm: BaseLanguageModel, tools:
List[BaseTool], remember_intermediate_steps: bool=True, memory_key: str
='chat_history', system_message: Optional[SystemMessage]=None, verbose:
bool=False, max_token_limit: int=2000, **kwargs: Any) ->AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(memory_key=memory_key,
llm=llm, max_token_limit=max_token_limit)
else:
memory = ConversationTokenBufferMemory(memory_key=memory_key,
return_messages=True, output_key='output', llm=llm,
max_token_limit=max_token_limit)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(system_message=
_system_message, extra_prompt_messages=[MessagesPlaceholder(
variable_name=memory_key)])
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=
verbose, return_intermediate_steps=remember_intermediate_steps, **
kwargs)
|
A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be ChatOpenAI
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not,
defaults to False.
max_token_limit: The max number of tokens to keep around in memory.
Defaults to 2000.
Returns:
An agent executor initialized appropriately
|
default_params
|
return {'model': self.model, 'temperature': self.temperature, 'top_p': self
.top_p, 'top_k': self.top_k, 'max_tokens': self.max_tokens,
'repetition_penalty': self.repetition_penalty}
|
@property
def default_params(self) ->Dict[str, Any]:
return {'model': self.model, 'temperature': self.temperature, 'top_p':
self.top_p, 'top_k': self.top_k, 'max_tokens': self.max_tokens,
'repetition_penalty': self.repetition_penalty}
| null |
add_performance_evaluation
|
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
|
def add_performance_evaluation(self, evaluation: str) ->None:
"""
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
"""
self.performance_evaluation.append(evaluation)
|
Add a performance evaluation item to the performance_evaluation list.
Args:
evaluation (str): The evaluation item to be added.
|
_embed
|
"""Embed a single text entry to either passage or query type"""
response = self.client.get_req(model_name=self.model, payload={'input':
texts, 'model': model_type, 'encoding_format': 'float'})
response.raise_for_status()
result = response.json()
data = result['data']
if not isinstance(data, list):
raise ValueError(f'Expected a list of embeddings. Got: {data}')
embedding_list = [(res['embedding'], res['index']) for res in data]
return [x[0] for x in sorted(embedding_list, key=lambda x: x[1])]
|
def _embed(self, texts: List[str], model_type: Literal['passage', 'query']
) ->List[List[float]]:
"""Embed a single text entry to either passage or query type"""
response = self.client.get_req(model_name=self.model, payload={'input':
texts, 'model': model_type, 'encoding_format': 'float'})
response.raise_for_status()
result = response.json()
data = result['data']
if not isinstance(data, list):
raise ValueError(f'Expected a list of embeddings. Got: {data}')
embedding_list = [(res['embedding'], res['index']) for res in data]
return [x[0] for x in sorted(embedding_list, key=lambda x: x[1])]
|
Embed a single text entry to either passage or query type
|
json_to_md
|
"""Converts a JSON object to a markdown table."""
if len(json_contents) == 0:
return ''
output_md = ''
headers = json_contents[0].keys()
for header in headers:
header.replace('[', '.').replace(']', '')
if table_name:
header.replace(f'{table_name}.', '')
output_md += f'| {header} '
output_md += '|\n'
for row in json_contents:
for value in row.values():
output_md += f'| {value} '
output_md += '|\n'
return output_md
|
def json_to_md(json_contents: List[Dict[str, Union[str, int, float]]],
table_name: Optional[str]=None) ->str:
"""Converts a JSON object to a markdown table."""
if len(json_contents) == 0:
return ''
output_md = ''
headers = json_contents[0].keys()
for header in headers:
header.replace('[', '.').replace(']', '')
if table_name:
header.replace(f'{table_name}.', '')
output_md += f'| {header} '
output_md += '|\n'
for row in json_contents:
for value in row.values():
output_md += f'| {value} '
output_md += '|\n'
return output_md
|
Converts a JSON object to a markdown table.
|
test_similarity_search_with_score_by_vector_with_score_threshold
|
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=2,
score_threshold=0.2)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
assert output[0][1] < 0.2
|
def test_similarity_search_with_score_by_vector_with_score_threshold() ->None:
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=
2, score_threshold=0.2)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
assert output[0][1] < 0.2
|
Test vector similarity with score by vector.
|
create_table_if_not_exists
|
self._connection.execute(
f"""
CREATE TABLE IF NOT EXISTS {self._table}
(
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT,
metadata BLOB,
text_embedding BLOB
)
;
"""
)
self._connection.execute(
f"""
CREATE VIRTUAL TABLE IF NOT EXISTS vss_{self._table} USING vss0(
text_embedding({self.get_dimensionality()})
);
"""
)
self._connection.execute(
f"""
CREATE TRIGGER IF NOT EXISTS embed_text
AFTER INSERT ON {self._table}
BEGIN
INSERT INTO vss_{self._table}(rowid, text_embedding)
VALUES (new.rowid, new.text_embedding)
;
END;
"""
)
self._connection.commit()
|
def create_table_if_not_exists(self) ->None:
self._connection.execute(
f"""
CREATE TABLE IF NOT EXISTS {self._table}
(
rowid INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT,
metadata BLOB,
text_embedding BLOB
)
;
"""
)
self._connection.execute(
f"""
CREATE VIRTUAL TABLE IF NOT EXISTS vss_{self._table} USING vss0(
text_embedding({self.get_dimensionality()})
);
"""
)
self._connection.execute(
f"""
CREATE TRIGGER IF NOT EXISTS embed_text
AFTER INSERT ON {self._table}
BEGIN
INSERT INTO vss_{self._table}(rowid, text_embedding)
VALUES (new.rowid, new.text_embedding)
;
END;
"""
)
self._connection.commit()
| null |
StreamlitCallbackHandler
|
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
try:
from streamlit.external.langchain import StreamlitCallbackHandler as OfficialStreamlitCallbackHandler
return OfficialStreamlitCallbackHandler(parent_container,
max_thought_containers=max_thought_containers, expand_new_thoughts=
expand_new_thoughts, collapse_completed_thoughts=
collapse_completed_thoughts, thought_labeler=thought_labeler)
except ImportError:
return _InternalStreamlitCallbackHandler(parent_container,
max_thought_containers=max_thought_containers, expand_new_thoughts=
expand_new_thoughts, collapse_completed_thoughts=
collapse_completed_thoughts, thought_labeler=thought_labeler)
|
def StreamlitCallbackHandler(parent_container: DeltaGenerator, *,
max_thought_containers: int=4, expand_new_thoughts: bool=True,
collapse_completed_thoughts: bool=True, thought_labeler: Optional[
LLMThoughtLabeler]=None) ->BaseCallbackHandler:
"""Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
"""
try:
from streamlit.external.langchain import StreamlitCallbackHandler as OfficialStreamlitCallbackHandler
return OfficialStreamlitCallbackHandler(parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler)
except ImportError:
return _InternalStreamlitCallbackHandler(parent_container,
max_thought_containers=max_thought_containers,
expand_new_thoughts=expand_new_thoughts,
collapse_completed_thoughts=collapse_completed_thoughts,
thought_labeler=thought_labeler)
|
Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
The max number of completed LLM thought containers to show at once. When this
threshold is reached, a new thought will cause the oldest thoughts to be
collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether that
expander is expanded by default. Defaults to True.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to True.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
Returns
-------
A new StreamlitCallbackHandler instance.
Note that this is an "auto-updating" API: if the installed version of Streamlit
has a more recent StreamlitCallbackHandler implementation, an instance of that class
will be used.
|
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_by_vector_with_score(embedding=
embedding, k=k, filters=filters, **kwargs)
return [doc for doc, _ in docs_with_score]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filters: Optional[Any]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_by_vector_with_score(embedding
=embedding, k=k, filters=filters, **kwargs)
return [doc for doc, _ in docs_with_score]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
|
search_issues_and_prs
|
"""
Searches issues and pull requests in the repository.
Parameters:
query(str): The search query
Returns:
str: A string containing the first 5 issues and pull requests
"""
search_result = self.github.search_issues(query, repo=self.github_repository)
max_items = min(5, len(search_result))
results = [f'Top {max_items} results:']
for issue in search_result[:max_items]:
results.append(
f'Title: {issue.title}, Number: {issue.number}, State: {issue.state}')
return '\n'.join(results)
|
def search_issues_and_prs(self, query: str) ->str:
"""
Searches issues and pull requests in the repository.
Parameters:
query(str): The search query
Returns:
str: A string containing the first 5 issues and pull requests
"""
search_result = self.github.search_issues(query, repo=self.
github_repository)
max_items = min(5, len(search_result))
results = [f'Top {max_items} results:']
for issue in search_result[:max_items]:
results.append(
f'Title: {issue.title}, Number: {issue.number}, State: {issue.state}'
)
return '\n'.join(results)
|
Searches issues and pull requests in the repository.
Parameters:
query(str): The search query
Returns:
str: A string containing the first 5 issues and pull requests
|
_convert_row_as_tuple
|
return tuple(map(str, row.asDict().values()))
|
def _convert_row_as_tuple(self, row: Row) ->tuple:
return tuple(map(str, row.asDict().values()))
| null |
run
|
"""Run the Semantic Scholar API."""
results = self.semanticscholar_search(query, limit=self.load_max_docs,
fields=self.returned_fields)
documents = []
for item in results[:self.top_k_results]:
authors = ', '.join(author['name'] for author in getattr(item,
'authors', []))
documents.append(
f"""Published year: {getattr(item, 'year', None)}
Title: {getattr(item, 'title', None)}
Authors: {authors}
Astract: {getattr(item, 'abstract', None)}
"""
)
if documents:
return '\n\n'.join(documents)[:self.doc_content_chars_max]
else:
return 'No results found.'
|
def run(self, query: str) ->str:
"""Run the Semantic Scholar API."""
results = self.semanticscholar_search(query, limit=self.load_max_docs,
fields=self.returned_fields)
documents = []
for item in results[:self.top_k_results]:
authors = ', '.join(author['name'] for author in getattr(item,
'authors', []))
documents.append(
f"""Published year: {getattr(item, 'year', None)}
Title: {getattr(item, 'title', None)}
Authors: {authors}
Astract: {getattr(item, 'abstract', None)}
"""
)
if documents:
return '\n\n'.join(documents)[:self.doc_content_chars_max]
else:
return 'No results found.'
|
Run the Semantic Scholar API.
|
_get_default_llm_chain_factory
|
"""Returns a default LLMChain factory."""
return partial(_get_default_llm_chain, prompt)
|
def _get_default_llm_chain_factory(prompt: BasePromptTemplate) ->Callable[[
], Any]:
"""Returns a default LLMChain factory."""
return partial(_get_default_llm_chain, prompt)
|
Returns a default LLMChain factory.
|
test_embedding_documents_2
|
documents = ['foo', 'bar']
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
|
def test_embedding_documents_2() ->None:
documents = ['foo', 'bar']
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
| null |
from_documents
|
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, distance_strategy=
distance_strategy, metadatas=metadatas, ids=ids, collection_name=
collection_name, **kwargs)
|
@classmethod
def from_documents(cls: Type[PGVector], documents: List[Document],
embedding: Embeddings, collection_name: str=
_LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy
=DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None,
pre_delete_collection: bool=False, **kwargs: Any) ->PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, distance_strategy=
distance_strategy, metadatas=metadatas, ids=ids, collection_name=
collection_name, **kwargs)
|
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
|
_run
|
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
html_content = page.content()
return self.scrape_page(page, html_content, absolute_urls)
|
def _run(self, absolute_urls: bool=False, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
html_content = page.content()
return self.scrape_page(page, html_content, absolute_urls)
|
Use the tool.
|
similarity_search_with_relevance_scores
|
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
score_threshold = kwargs.pop('score_threshold', None)
docs_and_similarities = self._similarity_search_with_relevance_scores(query,
k=k, **kwargs)
if any(similarity < 0.0 or similarity > 1.0 for _, similarity in
docs_and_similarities):
warnings.warn(
f'Relevance scores must be between 0 and 1, got {docs_and_similarities}'
)
if score_threshold is not None:
docs_and_similarities = [(doc, similarity) for doc, similarity in
docs_and_similarities if similarity >= score_threshold]
if len(docs_and_similarities) == 0:
warnings.warn(
f'No relevant docs were retrieved using the relevance score threshold {score_threshold}'
)
return docs_and_similarities
|
def similarity_search_with_relevance_scores(self, query: str, k: int=4, **
kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
score_threshold = kwargs.pop('score_threshold', None)
docs_and_similarities = self._similarity_search_with_relevance_scores(query
, k=k, **kwargs)
if any(similarity < 0.0 or similarity > 1.0 for _, similarity in
docs_and_similarities):
warnings.warn(
f'Relevance scores must be between 0 and 1, got {docs_and_similarities}'
)
if score_threshold is not None:
docs_and_similarities = [(doc, similarity) for doc, similarity in
docs_and_similarities if similarity >= score_threshold]
if len(docs_and_similarities) == 0:
warnings.warn(
f'No relevant docs were retrieved using the relevance score threshold {score_threshold}'
)
return docs_and_similarities
|
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
|
evaluation_name
|
return 'correctness'
|
@property
def evaluation_name(self) ->str:
return 'correctness'
| null |
test_batch
|
"""Test batch tokens from ChatMistralAI"""
llm = ChatMistralAI()
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
def test_batch() ->None:
"""Test batch tokens from ChatMistralAI"""
llm = ChatMistralAI()
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
Test batch tokens from ChatMistralAI
|
_import_google_search_tool_GoogleSearchResults
|
from langchain_community.tools.google_search.tool import GoogleSearchResults
return GoogleSearchResults
|
def _import_google_search_tool_GoogleSearchResults() ->Any:
from langchain_community.tools.google_search.tool import GoogleSearchResults
return GoogleSearchResults
| null |
generate_img_summaries
|
"""
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
"""
image_summaries = []
processed_images = []
prompt = (
'You are an assistant tasked with summarizing images for retrieval. These summaries will be embedded and used to retrieve the raw image. Give a concise summary of the image that is well optimized for retrieval.'
)
for i, base64_image in enumerate(img_base64_list):
try:
image_summaries.append(image_summarize(base64_image, prompt))
processed_images.append(base64_image)
except Exception as e:
print(f'Error with image {i + 1}: {e}')
return image_summaries, processed_images
|
def generate_img_summaries(img_base64_list):
"""
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
"""
image_summaries = []
processed_images = []
prompt = (
'You are an assistant tasked with summarizing images for retrieval. These summaries will be embedded and used to retrieve the raw image. Give a concise summary of the image that is well optimized for retrieval.'
)
for i, base64_image in enumerate(img_base64_list):
try:
image_summaries.append(image_summarize(base64_image, prompt))
processed_images.append(base64_image)
except Exception as e:
print(f'Error with image {i + 1}: {e}')
return image_summaries, processed_images
|
Generate summaries for images
:param img_base64_list: Base64 encoded images
:return: List of image summaries and processed images
|
visit_Lambda
|
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
|
def visit_Lambda(self, node: ast.Lambda) ->Any:
visitor = NonLocals()
visitor.visit(node)
self.nonlocals.update(visitor.loads - visitor.stores)
| null |
load
|
"""Load file."""
from urllib.request import urlopen
elements = urlopen(self.file_path)
text = '\n\n'.join([str(el.decode('utf-8-sig')) for el in elements])
metadata = {'source': self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load file."""
from urllib.request import urlopen
elements = urlopen(self.file_path)
text = '\n\n'.join([str(el.decode('utf-8-sig')) for el in elements])
metadata = {'source': self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
Load file.
|
update
|
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
f'RedisSemanticCache only supports caching of normal LLM generations, got {type(gen)}'
)
llm_cache = self._get_llm_cache(llm_string)
metadata = {'llm_string': llm_string, 'prompt': prompt, 'return_val': dumps
([g for g in return_val])}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) ->None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
f'RedisSemanticCache only supports caching of normal LLM generations, got {type(gen)}'
)
llm_cache = self._get_llm_cache(llm_string)
metadata = {'llm_string': llm_string, 'prompt': prompt, 'return_val':
dumps([g for g in return_val])}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
|
Update cache based on prompt and llm_string.
|
_construct_scratchpad
|
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f'\n{self.observation_prefix}{observation}\n{self.llm_prefix}'
return thoughts
|
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction,
str]]) ->Union[str, List[BaseMessage]]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += (
f'\n{self.observation_prefix}{observation}\n{self.llm_prefix}')
return thoughts
|
Construct the scratchpad that lets the agent continue its thought process.
|
from_texts
|
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ScaNN
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
scann = ScaNN.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=
ids, **kwargs)
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None, **kwargs: Any
) ->ScaNN:
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ScaNN
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
scann = ScaNN.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas=metadatas,
ids=ids, **kwargs)
|
Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ScaNN
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
scann = ScaNN.from_texts(texts, embeddings)
|
_get_repo_path
|
ref_str = ref if ref is not None else ''
hashed = hashlib.sha256(f'{gitstring}:{ref_str}'.encode('utf-8')).hexdigest()[:
8]
removed_protocol = gitstring.split('://')[-1]
removed_basename = re.split('[/:]', removed_protocol, 1)[-1]
removed_extras = removed_basename.split('#')[0]
foldername = re.sub('[^a-zA-Z0-9_]', '_', removed_extras)
directory_name = f'{foldername}_{hashed}'
return repo_dir / directory_name
|
def _get_repo_path(gitstring: str, ref: Optional[str], repo_dir: Path) ->Path:
ref_str = ref if ref is not None else ''
hashed = hashlib.sha256(f'{gitstring}:{ref_str}'.encode('utf-8')
).hexdigest()[:8]
removed_protocol = gitstring.split('://')[-1]
removed_basename = re.split('[/:]', removed_protocol, 1)[-1]
removed_extras = removed_basename.split('#')[0]
foldername = re.sub('[^a-zA-Z0-9_]', '_', removed_extras)
directory_name = f'{foldername}_{hashed}'
return repo_dir / directory_name
| null |
save_progress
|
"""
This function should be called to save the state of the learned policy model.
"""
self.active_policy.save()
|
def save_progress(self) ->None:
"""
This function should be called to save the state of the learned policy model.
"""
self.active_policy.save()
|
This function should be called to save the state of the learned policy model.
|
get_authorized_teams
|
"""Get all teams for the user."""
url = f'{DEFAULT_URL}/team'
response = requests.get(url, headers=self.get_headers())
data = response.json()
parsed_teams = self.attempt_parse_teams(data)
return parsed_teams
|
def get_authorized_teams(self) ->Dict[Any, Any]:
"""Get all teams for the user."""
url = f'{DEFAULT_URL}/team'
response = requests.get(url, headers=self.get_headers())
data = response.json()
parsed_teams = self.attempt_parse_teams(data)
return parsed_teams
|
Get all teams for the user.
|
test_simple
|
"""
Given a simple math word problem here we are test and illustrate the
the data structures that are produced by the CPAL chain.
"""
narrative_input = (
'jan has three times the number of pets as marcia.marcia has two more pets than cindy.If cindy has ten pets, how many pets does jan have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
output = cpal_chain(narrative_input)
data = output[Constant.chain_data.value]
expected_output = {'causal_operations': {'attribute': 'pet_count',
'entities': [{'code': 'pass', 'depends_on': [], 'name': 'cindy',
'value': 10.0}, {'code': 'marcia.value = cindy.value + 2', 'depends_on':
['cindy'], 'name': 'marcia', 'value': 12.0}, {'code':
'jan.value = marcia.value * 3', 'depends_on': ['marcia'], 'name': 'jan',
'value': 36.0}]}, 'intervention': {'entity_settings': [{'attribute':
'pet_count', 'name': 'cindy', 'value': 10.0}], 'system_settings': None},
'query': {'expression': "SELECT name, value FROM df WHERE name = 'jan'",
'llm_error_msg': '', 'question': 'how many pets does jan have?'}}
self.assertDictEqual(data.dict(), expected_output)
"""
Illustrate the query model's result table as a printed pandas dataframe
>>> data._outcome_table
name code value depends_on
0 cindy pass 10.0 []
1 marcia marcia.value = cindy.value + 2 12.0 [cindy]
2 jan jan.value = marcia.value * 3 36.0 [marcia]
"""
expected_output = {'code': {(0): 'pass', (1):
'marcia.value = cindy.value + 2', (2): 'jan.value = marcia.value * 3'},
'depends_on': {(0): [], (1): ['cindy'], (2): ['marcia']}, 'name': {(0):
'cindy', (1): 'marcia', (2): 'jan'}, 'value': {(0): 10.0, (1): 12.0, (2
): 36.0}}
self.assertDictEqual(data._outcome_table.to_dict(), expected_output)
expected_output = {'name': {(0): 'jan'}, 'value': {(0): 36.0}}
self.assertDictEqual(data.query._result_table.to_dict(), expected_output)
df = data.query._result_table
expr = "name == 'jan'"
answer = df.query(expr).iloc[0]['value']
self.assertEqual(float(answer), 36.0)
|
def test_simple(self) ->None:
"""
Given a simple math word problem here we are test and illustrate the
the data structures that are produced by the CPAL chain.
"""
narrative_input = (
'jan has three times the number of pets as marcia.marcia has two more pets than cindy.If cindy has ten pets, how many pets does jan have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
output = cpal_chain(narrative_input)
data = output[Constant.chain_data.value]
expected_output = {'causal_operations': {'attribute': 'pet_count',
'entities': [{'code': 'pass', 'depends_on': [], 'name': 'cindy',
'value': 10.0}, {'code': 'marcia.value = cindy.value + 2',
'depends_on': ['cindy'], 'name': 'marcia', 'value': 12.0}, {'code':
'jan.value = marcia.value * 3', 'depends_on': ['marcia'], 'name':
'jan', 'value': 36.0}]}, 'intervention': {'entity_settings': [{
'attribute': 'pet_count', 'name': 'cindy', 'value': 10.0}],
'system_settings': None}, 'query': {'expression':
"SELECT name, value FROM df WHERE name = 'jan'", 'llm_error_msg':
'', 'question': 'how many pets does jan have?'}}
self.assertDictEqual(data.dict(), expected_output)
"""
Illustrate the query model's result table as a printed pandas dataframe
>>> data._outcome_table
name code value depends_on
0 cindy pass 10.0 []
1 marcia marcia.value = cindy.value + 2 12.0 [cindy]
2 jan jan.value = marcia.value * 3 36.0 [marcia]
"""
expected_output = {'code': {(0): 'pass', (1):
'marcia.value = cindy.value + 2', (2):
'jan.value = marcia.value * 3'}, 'depends_on': {(0): [], (1): [
'cindy'], (2): ['marcia']}, 'name': {(0): 'cindy', (1): 'marcia', (
2): 'jan'}, 'value': {(0): 10.0, (1): 12.0, (2): 36.0}}
self.assertDictEqual(data._outcome_table.to_dict(), expected_output)
expected_output = {'name': {(0): 'jan'}, 'value': {(0): 36.0}}
self.assertDictEqual(data.query._result_table.to_dict(), expected_output)
df = data.query._result_table
expr = "name == 'jan'"
answer = df.query(expr).iloc[0]['value']
self.assertEqual(float(answer), 36.0)
|
Given a simple math word problem here we are test and illustrate the
the data structures that are produced by the CPAL chain.
|
parse_ai_message_to_openai_tool_action
|
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f'Expected an AI message got {type(message)}')
if not message.additional_kwargs.get('tool_calls'):
return AgentFinish(return_values={'output': message.content}, log=str(
message.content))
actions: List = []
for tool_call in message.additional_kwargs['tool_calls']:
function = tool_call['function']
function_name = function['name']
try:
_tool_input = json.loads(function['arguments'] or '{}')
except JSONDecodeError:
raise OutputParserException(
f'Could not parse tool input: {function} because the `arguments` is not valid JSON.'
)
if '__arg1' in _tool_input:
tool_input = _tool_input['__arg1']
else:
tool_input = _tool_input
content_msg = (f'responded: {message.content}\n' if message.content else
'\n')
log = f'\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n'
actions.append(OpenAIToolAgentAction(tool=function_name, tool_input=
tool_input, log=log, message_log=[message], tool_call_id=tool_call[
'id']))
return actions
|
def parse_ai_message_to_openai_tool_action(message: BaseMessage) ->Union[
List[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f'Expected an AI message got {type(message)}')
if not message.additional_kwargs.get('tool_calls'):
return AgentFinish(return_values={'output': message.content}, log=
str(message.content))
actions: List = []
for tool_call in message.additional_kwargs['tool_calls']:
function = tool_call['function']
function_name = function['name']
try:
_tool_input = json.loads(function['arguments'] or '{}')
except JSONDecodeError:
raise OutputParserException(
f'Could not parse tool input: {function} because the `arguments` is not valid JSON.'
)
if '__arg1' in _tool_input:
tool_input = _tool_input['__arg1']
else:
tool_input = _tool_input
content_msg = (f'responded: {message.content}\n' if message.content
else '\n')
log = (
f'\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n'
)
actions.append(OpenAIToolAgentAction(tool=function_name, tool_input
=tool_input, log=log, message_log=[message], tool_call_id=
tool_call['id']))
return actions
|
Parse an AI message potentially containing tool_calls.
|
test_get_recommended_games
|
"""Test for getting recommended games on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run('get_recommended_games', '76561198362745711')
output = ast.literal_eval(output)
assert len(output) == 5
|
def test_get_recommended_games() ->None:
"""Test for getting recommended games on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run('get_recommended_games', '76561198362745711')
output = ast.literal_eval(output)
assert len(output) == 5
|
Test for getting recommended games on Steam
|
__init__
|
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
|
def __init__(self, query: str, lang: str='en', load_max_docs: Optional[int]
=25, load_all_available_meta: Optional[bool]=False,
doc_content_chars_max: Optional[int]=4000):
"""
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
"""
self.query = query
self.lang = lang
self.load_max_docs = load_max_docs
self.load_all_available_meta = load_all_available_meta
self.doc_content_chars_max = doc_content_chars_max
|
Initializes a new instance of the WikipediaLoader class.
Args:
query (str): The query string to search on Wikipedia.
lang (str, optional): The language code for the Wikipedia language edition.
Defaults to "en".
load_max_docs (int, optional): The maximum number of documents to load.
Defaults to 100.
load_all_available_meta (bool, optional): Indicates whether to load all
available metadata for each document. Defaults to False.
doc_content_chars_max (int, optional): The maximum number of characters
for the document content. Defaults to 4000.
|
resize_base64_image
|
"""
Resize an image encoded as a Base64 string
:param base64_string: Base64 string
:param size: Image size
:return: Re-sized Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string
:param base64_string: Base64 string
:param size: Image size
:return: Re-sized Base64 string
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
Resize an image encoded as a Base64 string
:param base64_string: Base64 string
:param size: Image size
:return: Re-sized Base64 string
|
test_table_info
|
"""Test that table info is constructed properly."""
engine = create_engine('duckdb:///:memory:')
metadata_obj.create_all(engine)
db = SQLDatabase(engine, schema='schema_a', metadata=metadata_obj)
output = db.table_info
expected_output = """
CREATE TABLE schema_a."user" (
user_id INTEGER NOT NULL,
user_name VARCHAR NOT NULL,
PRIMARY KEY (user_id)
)
/*
3 rows from user table:
user_id user_name
*/
"""
assert sorted(' '.join(output.split())) == sorted(' '.join(expected_output.
split()))
|
def test_table_info() ->None:
"""Test that table info is constructed properly."""
engine = create_engine('duckdb:///:memory:')
metadata_obj.create_all(engine)
db = SQLDatabase(engine, schema='schema_a', metadata=metadata_obj)
output = db.table_info
expected_output = """
CREATE TABLE schema_a."user" (
user_id INTEGER NOT NULL,
user_name VARCHAR NOT NULL,
PRIMARY KEY (user_id)
)
/*
3 rows from user table:
user_id user_name
*/
"""
assert sorted(' '.join(output.split())) == sorted(' '.join(
expected_output.split()))
|
Test that table info is constructed properly.
|
load
|
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.'
)
client = storage.Client(project=self.project_name, client_info=
get_client_info(module='google-cloud-storage'))
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
if blob.name.endswith('/'):
continue
loader = GCSFileLoader(self.project_name, self.bucket, blob.name,
loader_func=self._loader_func)
docs.extend(loader.load())
return docs
|
def load(self) ->List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ImportError(
'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.'
)
client = storage.Client(project=self.project_name, client_info=
get_client_info(module='google-cloud-storage'))
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
if blob.name.endswith('/'):
continue
loader = GCSFileLoader(self.project_name, self.bucket, blob.name,
loader_func=self._loader_func)
docs.extend(loader.load())
return docs
|
Load documents.
|
_parse_response
|
result = response[0]
if self.return_type == 'url':
return result['audio_resource_url']
else:
self._download_wav(result['audio_resource_url'], 'audio.wav')
return 'audio.wav'
|
def _parse_response(self, response: list) ->str:
result = response[0]
if self.return_type == 'url':
return result['audio_resource_url']
else:
self._download_wav(result['audio_resource_url'], 'audio.wav')
return 'audio.wav'
| null |
_import_clarifai
|
from langchain_community.vectorstores.clarifai import Clarifai
return Clarifai
|
def _import_clarifai() ->Any:
from langchain_community.vectorstores.clarifai import Clarifai
return Clarifai
| null |
_import_epsilla
|
from langchain_community.vectorstores.epsilla import Epsilla
return Epsilla
|
def _import_epsilla() ->Any:
from langchain_community.vectorstores.epsilla import Epsilla
return Epsilla
| null |
parse_result
|
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
| null |
_parse_results
|
return ' '.join(self._parse_snippets(results))
|
def _parse_results(self, results: dict) ->str:
return ' '.join(self._parse_snippets(results))
| null |
test_opensearch_painless_scripting
|
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {'bool': {'filter': {'term': {'text': 'baz'}}}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False)
output = docsearch.similarity_search('foo', k=1, search_type=
PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val)
assert output == [Document(page_content='baz')]
|
def test_opensearch_painless_scripting() ->None:
"""Test end to end indexing and search using Painless Scripting Search."""
pre_filter_val = {'bool': {'filter': {'term': {'text': 'baz'}}}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False)
output = docsearch.similarity_search('foo', k=1, search_type=
PAINLESS_SCRIPTING_SEARCH, pre_filter=pre_filter_val)
assert output == [Document(page_content='baz')]
|
Test end to end indexing and search using Painless Scripting Search.
|
pairwise_embedding_distance_eval_chain
|
"""Create a PairwiseEmbeddingDistanceEvalChain."""
return PairwiseEmbeddingDistanceEvalChain()
|
@pytest.fixture
def pairwise_embedding_distance_eval_chain(
) ->PairwiseEmbeddingDistanceEvalChain:
"""Create a PairwiseEmbeddingDistanceEvalChain."""
return PairwiseEmbeddingDistanceEvalChain()
|
Create a PairwiseEmbeddingDistanceEvalChain.
|
_import_typesense
|
from langchain_community.vectorstores.typesense import Typesense
return Typesense
|
def _import_typesense() ->Any:
from langchain_community.vectorstores.typesense import Typesense
return Typesense
| null |
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
_default_params
|
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
|
Get the default parameters for calling OpenAI API.
|
execute
|
"""Figure out next browser command to run.
Args:
url: URL of the site currently on.
browser_content: Content of the page as currently displayed by the browser.
Returns:
Next browser command to run.
Example:
.. code-block:: python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
"""
_inputs = {self.input_url_key: url, self.input_browser_content_key:
browser_content}
return self(_inputs)[self.output_key]
|
def execute(self, url: str, browser_content: str) ->str:
"""Figure out next browser command to run.
Args:
url: URL of the site currently on.
browser_content: Content of the page as currently displayed by the browser.
Returns:
Next browser command to run.
Example:
.. code-block:: python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
"""
_inputs = {self.input_url_key: url, self.input_browser_content_key:
browser_content}
return self(_inputs)[self.output_key]
|
Figure out next browser command to run.
Args:
url: URL of the site currently on.
browser_content: Content of the page as currently displayed by the browser.
Returns:
Next browser command to run.
Example:
.. code-block:: python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
|
_call
|
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data['results'][0]['generated_text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
"""
request = Requests(headers=self._headers())
response = request.post(url=self._url(), data=self._body(prompt, kwargs))
self._handle_status(response.status_code, response.text)
data = response.json()
return data['results'][0]['generated_text']
|
Call out to DeepInfra's inference API endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = di("Tell me a joke.")
|
assert_docs
|
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {'Published', 'Title', 'Authors', 'Summary'}
|
def assert_docs(docs: List[Document]) ->None:
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata) == {'Published', 'Title', 'Authors', 'Summary'
}
| null |
create_results_with_score
|
"""Parsing the returned results with scores.
Args:
json_result: Results from OpenSearch query.
Returns:
query_result_list: Results with scores.
"""
items = json_result['result']
query_result_list: List[Tuple[Document, float]] = []
for item in items:
fields = item['fields']
query_result_list.append((Document(page_content=fields[self.config.
field_name_mapping['document']], metadata=self.
create_inverse_metadata(fields)), float(item['score'])))
return query_result_list
|
def create_results_with_score(self, json_result: Dict[str, Any]) ->List[Tuple
[Document, float]]:
"""Parsing the returned results with scores.
Args:
json_result: Results from OpenSearch query.
Returns:
query_result_list: Results with scores.
"""
items = json_result['result']
query_result_list: List[Tuple[Document, float]] = []
for item in items:
fields = item['fields']
query_result_list.append((Document(page_content=fields[self.config.
field_name_mapping['document']], metadata=self.
create_inverse_metadata(fields)), float(item['score'])))
return query_result_list
|
Parsing the returned results with scores.
Args:
json_result: Results from OpenSearch query.
Returns:
query_result_list: Results with scores.
|
add_message
|
"""Append the message to the Xata table"""
msg = message_to_dict(message)
r = self._client.records().insert(self._table_name, {'sessionId': self.
_session_id, 'type': msg['type'], 'content': message.content,
'additionalKwargs': json.dumps(message.additional_kwargs), 'role': msg[
'data'].get('role'), 'name': msg['data'].get('name')})
if r.status_code > 299:
raise Exception(f'Error adding message to Xata: {r.status_code} {r}')
|
def add_message(self, message: BaseMessage) ->None:
"""Append the message to the Xata table"""
msg = message_to_dict(message)
r = self._client.records().insert(self._table_name, {'sessionId': self.
_session_id, 'type': msg['type'], 'content': message.content,
'additionalKwargs': json.dumps(message.additional_kwargs), 'role':
msg['data'].get('role'), 'name': msg['data'].get('name')})
if r.status_code > 299:
raise Exception(f'Error adding message to Xata: {r.status_code} {r}')
|
Append the message to the Xata table
|
ds
|
logger.warning(
'this method is deprecated and will be removed, better to use `db.vectorstore.dataset` instead.'
)
return self.vectorstore.dataset
|
def ds(self) ->Any:
logger.warning(
'this method is deprecated and will be removed, better to use `db.vectorstore.dataset` instead.'
)
return self.vectorstore.dataset
| null |
run_no_throw
|
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch, include_columns)
except SQLAlchemyError as e:
"""Format the error message"""
return f'Error: {e}'
|
def run_no_throw(self, command: str, fetch: Literal['all', 'one']='all',
include_columns: bool=False) ->str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
return self.run(command, fetch, include_columns)
except SQLAlchemyError as e:
"""Format the error message"""
return f'Error: {e}'
|
Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
|
_run
|
python_code = add_last_line_print(python_code)
if callbacks is not None:
on_artifact = getattr(callbacks.metadata, 'on_artifact', None)
else:
on_artifact = None
stdout, stderr, artifacts = self.session.run_python(python_code,
on_artifact=on_artifact)
out = {'stdout': stdout, 'stderr': stderr, 'artifacts': list(map(lambda
artifact: artifact.name, artifacts))}
return json.dumps(out)
|
def _run(self, python_code: str, run_manager: Optional[
CallbackManagerForToolRun]=None, callbacks: Optional[CallbackManager]=None
) ->str:
python_code = add_last_line_print(python_code)
if callbacks is not None:
on_artifact = getattr(callbacks.metadata, 'on_artifact', None)
else:
on_artifact = None
stdout, stderr, artifacts = self.session.run_python(python_code,
on_artifact=on_artifact)
out = {'stdout': stdout, 'stderr': stderr, 'artifacts': list(map(lambda
artifact: artifact.name, artifacts))}
return json.dumps(out)
| null |
on_llm_start
|
self.on_llm_start_common()
|
def on_llm_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_llm_start_common()
| null |
test_anonymize_allow_list
|
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = 'Hello, my name is John Doe.'
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=['John Doe'])
assert ('John Doe' in anonymized_text) == should_contain
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
@pytest.mark.parametrize('analyzed_fields,should_contain', [(['PERSON'],
True), (['PHONE_NUMBER'], True), (None, True)])
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool
) ->None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = 'Hello, my name is John Doe.'
anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=['John Doe'])
assert ('John Doe' in anonymized_text) == should_contain
|
Test anonymizing a name in a simple sentence
|
__repr__
|
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += f"""[1musername: {self.config.username}[0m
Table Schema:
"""
_repr += '-' * 51 + '\n'
for r in self.client.query(f'DESC {self.config.database}.{self.config.table}'
).named_results():
_repr += (
f"|\x1b[94m{r['name']:24s}\x1b[0m|\x1b[96m{r['type']:24s}\x1b[0m|\n")
_repr += '-' * 51 + '\n'
return _repr
|
def __repr__(self) ->str:
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += (
f'\x1b[1musername: {self.config.username}\x1b[0m\n\nTable Schema:\n')
_repr += '-' * 51 + '\n'
for r in self.client.query(
f'DESC {self.config.database}.{self.config.table}').named_results():
_repr += (
f"|\x1b[94m{r['name']:24s}\x1b[0m|\x1b[96m{r['type']:24s}\x1b[0m|\n"
)
_repr += '-' * 51 + '\n'
return _repr
|
Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
|
test_math_prompt
|
"""Test math prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
question = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?'
)
output = pal_chain.run(question)
assert output == '28'
|
def test_math_prompt() ->None:
"""Test math prompt."""
llm = OpenAI(temperature=0, max_tokens=512)
pal_chain = PALChain.from_math_prompt(llm, timeout=None)
question = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?'
)
output = pal_chain.run(question)
assert output == '28'
|
Test math prompt.
|
_copy_run
|
if run.dotted_order:
levels = run.dotted_order.split('.')
processed_levels = []
for level in levels:
timestamp, run_id = level.split('Z')
new_run_id = self._replace_uuid(UUID(run_id))
processed_level = f'{timestamp}Z{new_run_id}'
processed_levels.append(processed_level)
new_dotted_order = '.'.join(processed_levels)
else:
new_dotted_order = None
return run.copy(update={'id': self._replace_uuid(run.id), 'parent_run_id':
self.uuids_map[run.parent_run_id] if run.parent_run_id else None,
'child_runs': [self._copy_run(child) for child in run.child_runs],
'execution_order': None, 'child_execution_order': None, 'trace_id':
self._replace_uuid(run.trace_id) if run.trace_id else None,
'dotted_order': new_dotted_order})
|
def _copy_run(self, run: Run) ->Run:
if run.dotted_order:
levels = run.dotted_order.split('.')
processed_levels = []
for level in levels:
timestamp, run_id = level.split('Z')
new_run_id = self._replace_uuid(UUID(run_id))
processed_level = f'{timestamp}Z{new_run_id}'
processed_levels.append(processed_level)
new_dotted_order = '.'.join(processed_levels)
else:
new_dotted_order = None
return run.copy(update={'id': self._replace_uuid(run.id),
'parent_run_id': self.uuids_map[run.parent_run_id] if run.
parent_run_id else None, 'child_runs': [self._copy_run(child) for
child in run.child_runs], 'execution_order': None,
'child_execution_order': None, 'trace_id': self._replace_uuid(run.
trace_id) if run.trace_id else None, 'dotted_order': new_dotted_order})
| null |
test_skipping_errors
|
loader = MWDumpLoader(file_path=(PARENT_DIR / 'mwtest_current_pages.xml').
absolute(), stop_on_error=False)
documents = loader.load()
assert len(documents) == 3
|
@pytest.mark.requires('mwparserfromhell', 'mwxml')
def test_skipping_errors() ->None:
loader = MWDumpLoader(file_path=(PARENT_DIR /
'mwtest_current_pages.xml').absolute(), stop_on_error=False)
documents = loader.load()
assert len(documents) == 3
| null |
clear
|
"""Nothing to clear."""
|
def clear(self) ->None:
"""Nothing to clear."""
|
Nothing to clear.
|
test_vearch
|
"""
Test end to end create vearch ,store vector into it and search
"""
texts = ['Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用',
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库',
'vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装']
metadatas = [{'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'}]
vearch_db = Vearch.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, table_name='test_vearch', metadata_path='./')
result = vearch_db.similarity_search(
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', 1)
assert result == [Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'})
]
|
def test_vearch() ->None:
"""
Test end to end create vearch ,store vector into it and search
"""
texts = [
'Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用',
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库',
'vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装']
metadatas = [{'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}]
vearch_db = Vearch.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, table_name='test_vearch', metadata_path='./')
result = vearch_db.similarity_search(
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', 1)
assert result == [Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={
'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
})]
|
Test end to end create vearch ,store vector into it and search
|
test_multimodal
|
llm = ChatVertexAI(model_name='gemini-ultra-vision')
gcs_url = (
'gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg'
)
image_message = {'type': 'image_url', 'image_url': {'url': gcs_url}}
text_message = {'type': 'text', 'text': 'What is shown in this image?'}
message = HumanMessage(content=[text_message, image_message])
output = llm([message])
assert isinstance(output.content, str)
|
def test_multimodal() ->None:
llm = ChatVertexAI(model_name='gemini-ultra-vision')
gcs_url = (
'gs://cloud-samples-data/generative-ai/image/320px-Felis_catus-cat_on_snow.jpg'
)
image_message = {'type': 'image_url', 'image_url': {'url': gcs_url}}
text_message = {'type': 'text', 'text': 'What is shown in this image?'}
message = HumanMessage(content=[text_message, image_message])
output = llm([message])
assert isinstance(output.content, str)
| null |
create_pandas_dataframe_agent
|
"""Construct a pandas agent from an LLM and dataframe."""
agent: BaseSingleActionAgent
base_tools: Sequence[BaseTool]
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt, base_tools = _get_prompt_and_tools(df, prefix=prefix, suffix=
suffix, input_variables=input_variables, include_df_in_prompt=
include_df_in_prompt, number_of_head_rows=number_of_head_rows,
extra_tools=extra_tools)
tools = base_tools
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names,
callback_manager=callback_manager, **kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
_prompt, base_tools = _get_functions_prompt_and_tools(df, prefix=prefix,
suffix=suffix, input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows)
tools = list(base_tools) + list(extra_tools)
agent = OpenAIFunctionsAgent(llm=llm, prompt=_prompt, tools=tools,
callback_manager=callback_manager, **kwargs)
else:
raise ValueError(f'Agent type {agent_type} not supported at the moment.')
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose,
return_intermediate_steps=return_intermediate_steps, max_iterations=
max_iterations, max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method, **agent_executor_kwargs or {})
|
def create_pandas_dataframe_agent(llm: BaseLanguageModel, df: Any,
agent_type: AgentType=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager]=None, prefix: Optional[
str]=None, suffix: Optional[str]=None, input_variables: Optional[List[
str]]=None, verbose: bool=False, return_intermediate_steps: bool=False,
max_iterations: Optional[int]=15, max_execution_time: Optional[float]=
None, early_stopping_method: str='force', agent_executor_kwargs:
Optional[Dict[str, Any]]=None, include_df_in_prompt: Optional[bool]=
True, number_of_head_rows: int=5, extra_tools: Sequence[BaseTool]=(),
**kwargs: Dict[str, Any]) ->AgentExecutor:
"""Construct a pandas agent from an LLM and dataframe."""
agent: BaseSingleActionAgent
base_tools: Sequence[BaseTool]
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt, base_tools = _get_prompt_and_tools(df, prefix=prefix,
suffix=suffix, input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows, extra_tools=extra_tools)
tools = base_tools
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names,
callback_manager=callback_manager, **kwargs)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
_prompt, base_tools = _get_functions_prompt_and_tools(df, prefix=
prefix, suffix=suffix, input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt, number_of_head_rows=
number_of_head_rows)
tools = list(base_tools) + list(extra_tools)
agent = OpenAIFunctionsAgent(llm=llm, prompt=_prompt, tools=tools,
callback_manager=callback_manager, **kwargs)
else:
raise ValueError(
f'Agent type {agent_type} not supported at the moment.')
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, verbose=verbose,
return_intermediate_steps=return_intermediate_steps, max_iterations
=max_iterations, max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method, **
agent_executor_kwargs or {})
|
Construct a pandas agent from an LLM and dataframe.
|
create_message_model
|
"""
Create a message model for a given table name.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
"""
class Message(DynamicBase):
__tablename__ = table_name
id = Column(Integer, primary_key=True)
session_id = Column(Text)
message = Column(Text)
return Message
|
def create_message_model(table_name, DynamicBase):
"""
Create a message model for a given table name.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
"""
class Message(DynamicBase):
__tablename__ = table_name
id = Column(Integer, primary_key=True)
session_id = Column(Text)
message = Column(Text)
return Message
|
Create a message model for a given table name.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
|
_persist_run
|
"""Persist a run."""
self._log_trace_from_run(run)
|
def _persist_run(self, run: 'Run') ->None:
"""Persist a run."""
self._log_trace_from_run(run)
|
Persist a run.
|
_create_collection
|
enum = guard_import('tcvectordb.model.enum')
vdb_index = guard_import('tcvectordb.model.index')
index_type = None
for k, v in enum.IndexType.__members__.items():
if k == self.index_params.index_type:
index_type = v
if index_type is None:
raise ValueError('unsupported index_type')
metric_type = None
for k, v in enum.MetricType.__members__.items():
if k == self.index_params.metric_type:
metric_type = v
if metric_type is None:
raise ValueError('unsupported metric_type')
if self.index_params.params is None:
params = vdb_index.HNSWParams(m=16, efconstruction=200)
else:
params = vdb_index.HNSWParams(m=self.index_params.params.get('M', 16),
efconstruction=self.index_params.params.get('efConstruction', 200))
index = vdb_index.Index(vdb_index.FilterIndex(self.field_id, enum.FieldType
.String, enum.IndexType.PRIMARY_KEY), vdb_index.VectorIndex(self.
field_vector, self.index_params.dimension, index_type, metric_type,
params), vdb_index.FilterIndex(self.field_text, enum.FieldType.String,
enum.IndexType.FILTER), vdb_index.FilterIndex(self.field_metadata, enum
.FieldType.String, enum.IndexType.FILTER))
self.collection = self.database.create_collection(name=collection_name,
shard=self.index_params.shard, replicas=self.index_params.replicas,
description='Collection for LangChain', index=index)
|
def _create_collection(self, collection_name: str) ->None:
enum = guard_import('tcvectordb.model.enum')
vdb_index = guard_import('tcvectordb.model.index')
index_type = None
for k, v in enum.IndexType.__members__.items():
if k == self.index_params.index_type:
index_type = v
if index_type is None:
raise ValueError('unsupported index_type')
metric_type = None
for k, v in enum.MetricType.__members__.items():
if k == self.index_params.metric_type:
metric_type = v
if metric_type is None:
raise ValueError('unsupported metric_type')
if self.index_params.params is None:
params = vdb_index.HNSWParams(m=16, efconstruction=200)
else:
params = vdb_index.HNSWParams(m=self.index_params.params.get('M',
16), efconstruction=self.index_params.params.get(
'efConstruction', 200))
index = vdb_index.Index(vdb_index.FilterIndex(self.field_id, enum.
FieldType.String, enum.IndexType.PRIMARY_KEY), vdb_index.
VectorIndex(self.field_vector, self.index_params.dimension,
index_type, metric_type, params), vdb_index.FilterIndex(self.
field_text, enum.FieldType.String, enum.IndexType.FILTER),
vdb_index.FilterIndex(self.field_metadata, enum.FieldType.String,
enum.IndexType.FILTER))
self.collection = self.database.create_collection(name=collection_name,
shard=self.index_params.shard, replicas=self.index_params.replicas,
description='Collection for LangChain', index=index)
| null |
_load_stuff_chain
|
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name=
document_variable_name, verbose=verbose, **kwargs)
|
def _load_stuff_chain(llm: BaseLanguageModel, prompt: BasePromptTemplate=
stuff_prompt.PROMPT, document_variable_name: str='text', verbose:
Optional[bool]=None, **kwargs: Any) ->StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name=
document_variable_name, verbose=verbose, **kwargs)
| null |
parse_numbered_list
|
"""Parses a numbered list into a list of dictionaries
Each element having two keys:
'index' for the index in the numbered list, and 'point' for the content.
"""
lines = input_str.split('\n')
parsed_list = []
for line in lines:
parts = line.split('. ', 1)
if len(parts) == 2:
index = int(parts[0])
point = parts[1].strip()
parsed_list.append({'point_index': index, 'point_skeleton': point})
return parsed_list
|
def parse_numbered_list(input_str):
"""Parses a numbered list into a list of dictionaries
Each element having two keys:
'index' for the index in the numbered list, and 'point' for the content.
"""
lines = input_str.split('\n')
parsed_list = []
for line in lines:
parts = line.split('. ', 1)
if len(parts) == 2:
index = int(parts[0])
point = parts[1].strip()
parsed_list.append({'point_index': index, 'point_skeleton': point})
return parsed_list
|
Parses a numbered list into a list of dictionaries
Each element having two keys:
'index' for the index in the numbered list, and 'point' for the content.
|
test_tencent_vector_db_with_score
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
|
def test_tencent_vector_db_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _tencent_vector_db_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
|
Test end to end construction and search with scores and IDs.
|
_calculate_fp_distance
|
"""Calculate the distance based on the vector datatype
Two datatypes supported:
- FLOAT32
- FLOAT64
if it's FLOAT32, we need to round the distance to 4 decimal places
otherwise, round to 7 decimal places.
"""
if self._schema.content_vector.datatype == 'FLOAT32':
return round(float(distance), 4)
return round(float(distance), 7)
|
def _calculate_fp_distance(self, distance: str) ->float:
"""Calculate the distance based on the vector datatype
Two datatypes supported:
- FLOAT32
- FLOAT64
if it's FLOAT32, we need to round the distance to 4 decimal places
otherwise, round to 7 decimal places.
"""
if self._schema.content_vector.datatype == 'FLOAT32':
return round(float(distance), 4)
return round(float(distance), 7)
|
Calculate the distance based on the vector datatype
Two datatypes supported:
- FLOAT32
- FLOAT64
if it's FLOAT32, we need to round the distance to 4 decimal places
otherwise, round to 7 decimal places.
|
validate_schema
|
schema = values['pydantic_schema']
if 'args_only' not in values:
values['args_only'] = isinstance(schema, type) and issubclass(schema,
BaseModel)
elif values['args_only'] and isinstance(schema, Dict):
raise ValueError(
'If multiple pydantic schemas are provided then args_only should be False.'
)
return values
|
@root_validator(pre=True)
def validate_schema(cls, values: Dict) ->Dict:
schema = values['pydantic_schema']
if 'args_only' not in values:
values['args_only'] = isinstance(schema, type) and issubclass(schema,
BaseModel)
elif values['args_only'] and isinstance(schema, Dict):
raise ValueError(
'If multiple pydantic schemas are provided then args_only should be False.'
)
return values
| null |
mget
|
"""Get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
values: List[Optional[bytes]] = []
for key in keys:
full_path = self._get_full_path(key)
if full_path.exists():
value = full_path.read_bytes()
values.append(value)
else:
values.append(None)
return values
|
def mget(self, keys: Sequence[str]) ->List[Optional[bytes]]:
"""Get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
values: List[Optional[bytes]] = []
for key in keys:
full_path = self._get_full_path(key)
if full_path.exists():
value = full_path.read_bytes()
values.append(value)
else:
values.append(None)
return values
|
Get the values associated with the given keys.
Args:
keys: A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
|
test_pgvector_with_filter_no_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'5'})
assert output == []
|
def test_pgvector_with_filter_no_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '5'})
assert output == []
|
Test end to end construction and search.
|
test_from_documents
|
input_docs = [Document(page_content='I have a pen.', metadata={'foo': 'bar'
}), Document(page_content='Do you have a pen?'), Document(page_content=
'I have a bag.')]
svm_retriever = SVMRetriever.from_documents(documents=input_docs,
embeddings=FakeEmbeddings(size=100))
assert len(svm_retriever.texts) == 3
|
@pytest.mark.requires('sklearn')
def test_from_documents(self) ->None:
input_docs = [Document(page_content='I have a pen.', metadata={'foo':
'bar'}), Document(page_content='Do you have a pen?'), Document(
page_content='I have a bag.')]
svm_retriever = SVMRetriever.from_documents(documents=input_docs,
embeddings=FakeEmbeddings(size=100))
assert len(svm_retriever.texts) == 3
| null |
input_keys
|
"""Use this since so some prompt vars come from history."""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
|
Use this since so some prompt vars come from history.
|
format
|
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return AIMessage(content=text, additional_kwargs=self.additional_kwargs)
|
def format(self, **kwargs: Any) ->BaseMessage:
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return AIMessage(content=text, additional_kwargs=self.additional_kwargs)
|
Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
|
test_suffix_only
|
"""Test prompt works with just a suffix."""
suffix = 'This is a {foo} test.'
input_variables = ['foo']
prompt = FewShotPromptTemplate(input_variables=input_variables, suffix=
suffix, examples=[], example_prompt=EXAMPLE_PROMPT)
output = prompt.format(foo='bar')
expected_output = 'This is a bar test.'
assert output == expected_output
|
def test_suffix_only() ->None:
"""Test prompt works with just a suffix."""
suffix = 'This is a {foo} test.'
input_variables = ['foo']
prompt = FewShotPromptTemplate(input_variables=input_variables, suffix=
suffix, examples=[], example_prompt=EXAMPLE_PROMPT)
output = prompt.format(foo='bar')
expected_output = 'This is a bar test.'
assert output == expected_output
|
Test prompt works with just a suffix.
|
args
|
return items
|
def args(self, *items: Any) ->tuple:
return items
| null |
from_llm_and_tools
|
"""Construct an agent from an LLM and tools."""
prompt = cls.create_prompt(extra_prompt_messages=extra_prompt_messages,
system_message=system_message)
return cls(llm=llm, prompt=prompt, tools=tools, callback_manager=
callback_manager, **kwargs)
|
@classmethod
def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[
BaseTool], callback_manager: Optional[BaseCallbackManager]=None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]]=None,
system_message: Optional[SystemMessage]=SystemMessage(content=
'You are a helpful AI assistant.'), **kwargs: Any) ->BaseMultiActionAgent:
"""Construct an agent from an LLM and tools."""
prompt = cls.create_prompt(extra_prompt_messages=extra_prompt_messages,
system_message=system_message)
return cls(llm=llm, prompt=prompt, tools=tools, callback_manager=
callback_manager, **kwargs)
|
Construct an agent from an LLM and tools.
|
_is_gemini_model
|
return 'gemini' in model_name
|
def _is_gemini_model(model_name: str) ->bool:
return 'gemini' in model_name
| null |
_convert_message_to_dict
|
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
'Function messages are not supported by the MLflow AI Gateway. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
else:
raise ValueError(f'Got unknown message type: {message}')
if 'function_call' in message.additional_kwargs:
ChatMLflowAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
'Additional message arguments are unsupported by MLflow AI Gateway and will be ignored: %s'
, message.additional_kwargs)
return message_dict
|
@staticmethod
def _convert_message_to_dict(message: BaseMessage) ->dict:
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
'Function messages are not supported by the MLflow AI Gateway. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
else:
raise ValueError(f'Got unknown message type: {message}')
if 'function_call' in message.additional_kwargs:
ChatMLflowAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
'Additional message arguments are unsupported by MLflow AI Gateway and will be ignored: %s'
, message.additional_kwargs)
return message_dict
| null |
create_pull_request
|
"""
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README
made changes to add info"
Returns:
str: A success or failure message
"""
if self.github_base_branch == self.active_branch:
return """Cannot make a pull request because
commits are already in the main or master branch."""
else:
try:
title = pr_query.split('\n')[0]
body = pr_query[len(title) + 2:]
pr = self.github_repo_instance.create_pull(title=title, body=body,
head=self.active_branch, base=self.github_base_branch)
return f'Successfully created PR number {pr.number}'
except Exception as e:
return 'Unable to make pull request due to error:\n' + str(e)
|
def create_pull_request(self, pr_query: str) ->str:
"""
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README
made changes to add info"
Returns:
str: A success or failure message
"""
if self.github_base_branch == self.active_branch:
return """Cannot make a pull request because
commits are already in the main or master branch."""
else:
try:
title = pr_query.split('\n')[0]
body = pr_query[len(title) + 2:]
pr = self.github_repo_instance.create_pull(title=title, body=
body, head=self.active_branch, base=self.github_base_branch)
return f'Successfully created PR number {pr.number}'
except Exception as e:
return 'Unable to make pull request due to error:\n' + str(e)
|
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README
made changes to add info"
Returns:
str: A success or failure message
|
comma_list
|
"""Convert a list to a comma-separated string."""
return ', '.join(str(item) for item in items)
|
def comma_list(items: List[Any]) ->str:
"""Convert a list to a comma-separated string."""
return ', '.join(str(item) for item in items)
|
Convert a list to a comma-separated string.
|
test_dependency_string_edge_case
|
_assert_dependency_equals(parse_dependency_string('git+ssh://a@b', None,
None, None), git='ssh://a@b', subdirectory=None, ref=None)
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git@subdirectory=src', None, None,
None), git='https://github.com/efriis/myrepo.git', subdirectory=None,
ref='subdirectory=src')
|
def test_dependency_string_edge_case() ->None:
_assert_dependency_equals(parse_dependency_string('git+ssh://a@b', None,
None, None), git='ssh://a@b', subdirectory=None, ref=None)
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git@subdirectory=src', None,
None, None), git='https://github.com/efriis/myrepo.git',
subdirectory=None, ref='subdirectory=src')
| null |
embed_query
|
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
|
def embed_query(self, text: str) ->List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
|
Return simple embeddings.
|
_llm_type
|
return 'promptlayer-openai-chat'
|
@property
def _llm_type(self) ->str:
return 'promptlayer-openai-chat'
| null |
test_chat_openai_model
|
"""Test ChatOpenAI wrapper handles model_name."""
chat = ChatOpenAI(model='foo')
assert chat.model_name == 'foo'
chat = ChatOpenAI(model_name='bar')
assert chat.model_name == 'bar'
|
def test_chat_openai_model() ->None:
"""Test ChatOpenAI wrapper handles model_name."""
chat = ChatOpenAI(model='foo')
assert chat.model_name == 'foo'
chat = ChatOpenAI(model_name='bar')
assert chat.model_name == 'bar'
|
Test ChatOpenAI wrapper handles model_name.
|
llm_prefix
|
"""Prefix to append the llm call with."""
return 'Thought:'
|
@property
def llm_prefix(self) ->str:
"""Prefix to append the llm call with."""
return 'Thought:'
|
Prefix to append the llm call with.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.