method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_get_get_session_history
|
chat_history_store = store if store is not None else {}
def get_session_history(session_id: str, **kwargs: Any) ->ChatMessageHistory:
if session_id not in chat_history_store:
chat_history_store[session_id] = ChatMessageHistory()
return chat_history_store[session_id]
return get_session_history
|
def _get_get_session_history(*, store: Optional[Dict[str, Any]]=None
) ->Callable[..., ChatMessageHistory]:
chat_history_store = store if store is not None else {}
def get_session_history(session_id: str, **kwargs: Any
) ->ChatMessageHistory:
if session_id not in chat_history_store:
chat_history_store[session_id] = ChatMessageHistory()
return chat_history_store[session_id]
return get_session_history
| null |
load
|
"""Load Documents from URLs."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load Documents from URLs."""
return list(self.lazy_load())
|
Load Documents from URLs.
|
buffer_as_str
|
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2:] if self.k > 0 else []
return get_buffer_string(messages, human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix)
|
@property
def buffer_as_str(self) ->str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2:] if self.k > 0 else []
return get_buffer_string(messages, human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix)
|
Exposes the buffer as a string in case return_messages is True.
|
_clean_data
|
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser', **self.bs_kwargs)
html_tags = [('div', {'role': 'main'}), ('main', {'id': 'main-content'})]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
element = None
for tag, attrs in html_tags[::-1]:
element = soup.find(tag, attrs)
if element is not None:
break
if element is not None and _get_link_ratio(element
) <= self.exclude_links_ratio:
text = _get_clean_text(element)
else:
text = ''
return '\n'.join([t for t in text.split('\n') if t])
|
def _clean_data(self, data: str) ->str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(data, 'html.parser', **self.bs_kwargs)
html_tags = [('div', {'role': 'main'}), ('main', {'id': 'main-content'})]
if self.custom_html_tag is not None:
html_tags.append(self.custom_html_tag)
element = None
for tag, attrs in html_tags[::-1]:
element = soup.find(tag, attrs)
if element is not None:
break
if element is not None and _get_link_ratio(element
) <= self.exclude_links_ratio:
text = _get_clean_text(element)
else:
text = ''
return '\n'.join([t for t in text.split('\n') if t])
| null |
create_schema
|
"""Create the database schema for the record manager."""
|
@abstractmethod
def create_schema(self) ->None:
"""Create the database schema for the record manager."""
|
Create the database schema for the record manager.
|
_import_azure_cognitive_services_AzureCogsFormRecognizerTool
|
from langchain_community.tools.azure_cognitive_services import AzureCogsFormRecognizerTool
return AzureCogsFormRecognizerTool
|
def _import_azure_cognitive_services_AzureCogsFormRecognizerTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsFormRecognizerTool
return AzureCogsFormRecognizerTool
| null |
similarity_search_with_score
|
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.embedding_func is None:
raise ValueError('embedding_func is None!!!')
embeddings = self.embedding_func.embed_query(query)
embed = np.array(embeddings)
if self.flag:
query_data = {'query': {'sum': [{'field': 'text_embedding', 'feature':
(embed / np.linalg.norm(embed)).tolist()}]}, 'size': k, 'fields': [
'text_embedding', 'text', 'metadata']}
query_result = self.vearch.search(self.using_db_name, self.
using_table_name, query_data)
res = query_result['hits']['hits']
else:
query_data = {'vector': [{'field': 'text_embedding', 'feature': embed /
np.linalg.norm(embed)}], 'fields': [], 'is_brute_search': 1,
'retrieval_param': {'metric_type': 'InnerProduct', 'nprobe': 20},
'topn': k}
query_result = self.vearch.search(query_data)
res = query_result[0]['result_items']
results: List[Tuple[Document, float]] = []
for item in res:
content = ''
meta_data = {}
if self.flag:
score = item['_score']
item = item['_source']
for item_key in item:
if item_key == 'text':
content = item[item_key]
continue
if item_key == 'metadata':
meta_data['source'] = item[item_key]
continue
if self.flag != 1 and item_key == 'score':
score = item[item_key]
continue
tmp_res = Document(page_content=content, metadata=meta_data), score
results.append(tmp_res)
return results
|
def similarity_search_with_score(self, query: str, k: int=DEFAULT_TOPN, **
kwargs: Any) ->List[Tuple[Document, float]]:
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.embedding_func is None:
raise ValueError('embedding_func is None!!!')
embeddings = self.embedding_func.embed_query(query)
embed = np.array(embeddings)
if self.flag:
query_data = {'query': {'sum': [{'field': 'text_embedding',
'feature': (embed / np.linalg.norm(embed)).tolist()}]}, 'size':
k, 'fields': ['text_embedding', 'text', 'metadata']}
query_result = self.vearch.search(self.using_db_name, self.
using_table_name, query_data)
res = query_result['hits']['hits']
else:
query_data = {'vector': [{'field': 'text_embedding', 'feature':
embed / np.linalg.norm(embed)}], 'fields': [],
'is_brute_search': 1, 'retrieval_param': {'metric_type':
'InnerProduct', 'nprobe': 20}, 'topn': k}
query_result = self.vearch.search(query_data)
res = query_result[0]['result_items']
results: List[Tuple[Document, float]] = []
for item in res:
content = ''
meta_data = {}
if self.flag:
score = item['_score']
item = item['_source']
for item_key in item:
if item_key == 'text':
content = item[item_key]
continue
if item_key == 'metadata':
meta_data['source'] = item[item_key]
continue
if self.flag != 1 and item_key == 'score':
score = item[item_key]
continue
tmp_res = Document(page_content=content, metadata=meta_data), score
results.append(tmp_res)
return results
|
The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
plan
|
"""Given input, decide what to do."""
|
@abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks=None, **kwargs: Any) ->Plan:
"""Given input, decide what to do."""
|
Given input, decide what to do.
|
eval_project_name
|
return f'lcp integration tests - {str(uuid4())[-8:]}'
|
@pytest.fixture
def eval_project_name() ->str:
return f'lcp integration tests - {str(uuid4())[-8:]}'
| null |
create_sync_playwright_browser
|
"""
Create a playwright browser.
Args:
headless: Whether to run the browser in headless mode. Defaults to True.
args: arguments to pass to browser.chromium.launch
Returns:
SyncBrowser: The playwright browser.
"""
from playwright.sync_api import sync_playwright
browser = sync_playwright().start()
return browser.chromium.launch(headless=headless, args=args)
|
def create_sync_playwright_browser(headless: bool=True, args: Optional[List
[str]]=None) ->SyncBrowser:
"""
Create a playwright browser.
Args:
headless: Whether to run the browser in headless mode. Defaults to True.
args: arguments to pass to browser.chromium.launch
Returns:
SyncBrowser: The playwright browser.
"""
from playwright.sync_api import sync_playwright
browser = sync_playwright().start()
return browser.chromium.launch(headless=headless, args=args)
|
Create a playwright browser.
Args:
headless: Whether to run the browser in headless mode. Defaults to True.
args: arguments to pass to browser.chromium.launch
Returns:
SyncBrowser: The playwright browser.
|
_import_spark_sql_tool_QuerySparkSQLTool
|
from langchain_community.tools.spark_sql.tool import QuerySparkSQLTool
return QuerySparkSQLTool
|
def _import_spark_sql_tool_QuerySparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import QuerySparkSQLTool
return QuerySparkSQLTool
| null |
test_parse_scores
|
result = output_parser.parse_folder(answer)
assert result['answer'] == 'foo bar answer.'
score = int(result['score'])
assert score == 80
|
@pytest.mark.parametrize('answer', (GOOD_SCORE, SCORE_WITH_EXPLANATION))
def test_parse_scores(answer: str) ->None:
result = output_parser.parse_folder(answer)
assert result['answer'] == 'foo bar answer.'
score = int(result['score'])
assert score == 80
| null |
format_request_payload
|
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'inputs': {'input_string': [f'"{prompt}"']},
'parameters': model_kwargs})
return str.encode(request_payload)
|
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'inputs': {'input_string': [f'"{prompt}"'
]}, 'parameters': model_kwargs})
return str.encode(request_payload)
| null |
handle_event
|
"""Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler,
ignore_condition_name):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == 'on_chat_model_start':
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
handle_event([handler], 'on_llm_start', 'ignore_llm', args[
0], message_strings, *args[2:], **kwargs)
else:
handler_name = handler.__class__.__name__
logger.warning(
f'NotImplementedError in {handler_name}.{event_name} callback: {repr(e)}'
)
except Exception as e:
logger.warning(
f'Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}'
)
if handler.raise_error:
raise e
finally:
if coros:
try:
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
with ThreadPoolExecutor(1) as executor:
executor.submit(cast(Callable, copy_context().run),
_run_coros, coros).result()
else:
_run_coros(coros)
|
def handle_event(handlers: List[BaseCallbackHandler], event_name: str,
ignore_condition_name: Optional[str], *args: Any, **kwargs: Any) ->None:
"""Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
"""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler,
ignore_condition_name):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == 'on_chat_model_start':
if message_strings is None:
message_strings = [get_buffer_string(m) for m in
args[1]]
handle_event([handler], 'on_llm_start', 'ignore_llm',
args[0], message_strings, *args[2:], **kwargs)
else:
handler_name = handler.__class__.__name__
logger.warning(
f'NotImplementedError in {handler_name}.{event_name} callback: {repr(e)}'
)
except Exception as e:
logger.warning(
f'Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}'
)
if handler.raise_error:
raise e
finally:
if coros:
try:
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
with ThreadPoolExecutor(1) as executor:
executor.submit(cast(Callable, copy_context().run),
_run_coros, coros).result()
else:
_run_coros(coros)
|
Generic event handler for CallbackManager.
Note: This function is used by langserve to handle events.
Args:
handlers: The list of handlers that will handle the event
event_name: The name of the event (e.g., "on_llm_start")
ignore_condition_name: Name of the attribute defined on handler
that if True will cause the handler to be skipped for the given event
*args: The arguments to pass to the event handler
**kwargs: The keyword arguments to pass to the event handler
|
_default_params
|
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route': self.
route, **self.params.dict() if self.params else {}}
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route':
self.route, **self.params.dict() if self.params else {}}
return params
| null |
ids
|
prefix = self.prefix + '/' if self.prefix else ''
keys = self.key if isinstance(self.key, list) else [self.key]
return [f'{CONTEXT_CONFIG_PREFIX}{prefix}{k}{CONTEXT_CONFIG_SUFFIX_GET}' for
k in keys]
|
@property
def ids(self) ->List[str]:
prefix = self.prefix + '/' if self.prefix else ''
keys = self.key if isinstance(self.key, list) else [self.key]
return [f'{CONTEXT_CONFIG_PREFIX}{prefix}{k}{CONTEXT_CONFIG_SUFFIX_GET}'
for k in keys]
| null |
top_parent
|
"""Get the parent of the top of the stack without popping it."""
return self.stack[-2] if len(self.stack) > 1 else None
|
def top_parent(self) ->Optional[Thought]:
"""Get the parent of the top of the stack without popping it."""
return self.stack[-2] if len(self.stack) > 1 else None
|
Get the parent of the top of the stack without popping it.
|
test_md_header_text_splitter_2
|
"""Test markdown splitter by header: Case 2."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
### Boo
Hi this is Lance
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###',
'Header 3')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
"""Hi this is Jim
Hi this is Joe""", metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata
={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(
page_content='Hi this is Molly', metadata={'Header 1': 'Foo',
'Header 2': 'Baz'})]
assert output == expected_output
|
def test_md_header_text_splitter_2() ->None:
"""Test markdown splitter by header: Case 2."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
### Boo
Hi this is Lance
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###',
'Header 3')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
'Hi this is Jim \nHi this is Joe', metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Lance',
metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}),
Document(page_content='Hi this is Molly', metadata={'Header 1':
'Foo', 'Header 2': 'Baz'})]
assert output == expected_output
|
Test markdown splitter by header: Case 2.
|
on_llm_end
|
"""Run when LLM ends running."""
self.metrics['step'] += 1
self.metrics['llm_ends'] += 1
self.metrics['ends'] += 1
llm_ends = self.metrics['llm_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_end'})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(analyze_text(generation.text, nlp=self.nlp))
complexity_metrics: Dict[str, float] = generation_resp.pop(
'text_complexity_metrics')
self.mlflg.metrics(complexity_metrics, step=self.metrics['step'])
self.records['on_llm_end_records'].append(generation_resp)
self.records['action_records'].append(generation_resp)
self.mlflg.jsonf(resp, f'llm_end_{llm_ends}_generation_{idx}')
dependency_tree = generation_resp['dependency_tree']
entities = generation_resp['entities']
self.mlflg.html(dependency_tree, 'dep-' + hash_string(generation.text))
self.mlflg.html(entities, 'ent-' + hash_string(generation.text))
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Run when LLM ends running."""
self.metrics['step'] += 1
self.metrics['llm_ends'] += 1
self.metrics['ends'] += 1
llm_ends = self.metrics['llm_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_end'})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
for generations in response.generations:
for idx, generation in enumerate(generations):
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(analyze_text(generation.text, nlp=self.nlp))
complexity_metrics: Dict[str, float] = generation_resp.pop(
'text_complexity_metrics')
self.mlflg.metrics(complexity_metrics, step=self.metrics['step'])
self.records['on_llm_end_records'].append(generation_resp)
self.records['action_records'].append(generation_resp)
self.mlflg.jsonf(resp, f'llm_end_{llm_ends}_generation_{idx}')
dependency_tree = generation_resp['dependency_tree']
entities = generation_resp['entities']
self.mlflg.html(dependency_tree, 'dep-' + hash_string(
generation.text))
self.mlflg.html(entities, 'ent-' + hash_string(generation.text))
|
Run when LLM ends running.
|
thread_target
|
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(new_loop.run_until_complete(self._arun(*args,
**kwargs)))
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
|
def thread_target() ->None:
nonlocal result_container
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result_container.append(new_loop.run_until_complete(self._arun(*
args, **kwargs)))
except Exception as e:
result_container.append(e)
finally:
new_loop.close()
| null |
test_tracer_chain_run_on_error
|
"""Test tracer on a Chain run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time':
datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now(
timezone.utc)}], extra={}, execution_order=1, child_execution_order=1,
serialized={'name': 'chain'}, inputs={}, outputs=None, error=repr(
exception), run_type='chain', trace_id=uuid, dotted_order=
f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id=uuid)
tracer.on_chain_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
|
@freeze_time('2023-01-01')
def test_tracer_chain_run_on_error() ->None:
"""Test tracer on a Chain run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start',
'time': datetime.now(timezone.utc)}, {'name': 'error', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized={'name': 'chain'}, inputs={},
outputs=None, error=repr(exception), run_type='chain', trace_id=
uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_chain_start(serialized={'name': 'chain'}, inputs={}, run_id=uuid)
tracer.on_chain_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
|
Test tracer on a Chain run with an error.
|
test_chat_openai
|
"""Test AzureChatOpenAI wrapper."""
message = HumanMessage(content='Hello')
response = llm([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_chat_openai(llm: AzureChatOpenAI) ->None:
"""Test AzureChatOpenAI wrapper."""
message = HumanMessage(content='Hello')
response = llm([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test AzureChatOpenAI wrapper.
|
process_threads
|
"""Process a list of thread into a list of documents."""
docs = []
for thread_id in thread_ids:
doc = self.process_thread(thread_id, include_images, include_messages)
if doc is not None:
docs.append(doc)
return docs
|
def process_threads(self, thread_ids: Sequence[str], include_images: bool,
include_messages: bool) ->List[Document]:
"""Process a list of thread into a list of documents."""
docs = []
for thread_id in thread_ids:
doc = self.process_thread(thread_id, include_images, include_messages)
if doc is not None:
docs.append(doc)
return docs
|
Process a list of thread into a list of documents.
|
test_visit_structured_query_operation
|
query = 'What is the capital of France?'
op = Operation(operator=Operator.OR, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value=2), Comparison(comparator=
Comparator.CONTAIN, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected_filter = (RedisNum('foo') == 2) | (RedisText('bar') == 'baz')
actual_query, actual_filter = translator.visit_structured_query(
structured_query)
assert actual_query == query
assert str(actual_filter['filter']) == str(expected_filter)
|
def test_visit_structured_query_operation(translator: RedisTranslator) ->None:
query = 'What is the capital of France?'
op = Operation(operator=Operator.OR, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value=2), Comparison(comparator=
Comparator.CONTAIN, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected_filter = (RedisNum('foo') == 2) | (RedisText('bar') == 'baz')
actual_query, actual_filter = translator.visit_structured_query(
structured_query)
assert actual_query == query
assert str(actual_filter['filter']) == str(expected_filter)
| null |
_get_relevant_documents
|
ctxs = self.client.query(query=query, num_context=self.num_contexts)
docs = []
for ctx in ctxs:
page_content = ctx.pop('chunk_embed_text', None)
if page_content is None:
continue
docs.append(Document(page_content=page_content, metadata={**ctx}))
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
ctxs = self.client.query(query=query, num_context=self.num_contexts)
docs = []
for ctx in ctxs:
page_content = ctx.pop('chunk_embed_text', None)
if page_content is None:
continue
docs.append(Document(page_content=page_content, metadata={**ctx}))
return docs
| null |
_load_document_from_bytes
|
"""Return a document from a bytes representation."""
obj = loads(serialized.decode('utf-8'))
if not isinstance(obj, Document):
raise TypeError(f'Expected a Document instance. Got {type(obj)}')
return obj
|
def _load_document_from_bytes(serialized: bytes) ->Document:
"""Return a document from a bytes representation."""
obj = loads(serialized.decode('utf-8'))
if not isinstance(obj, Document):
raise TypeError(f'Expected a Document instance. Got {type(obj)}')
return obj
|
Return a document from a bytes representation.
|
_prompt_type
|
"""Return the prompt type key."""
raise NotImplementedError
|
@property
def _prompt_type(self) ->str:
"""Return the prompt type key."""
raise NotImplementedError
|
Return the prompt type key.
|
_stream_generate
|
"""
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
"""
streaming_response = model.generate(prompt=prompt, generate_config=
generate_config)
for chunk in streaming_response:
if isinstance(chunk, dict):
choices = chunk.get('choices', [])
if choices:
choice = choices[0]
if isinstance(choice, dict):
token = choice.get('text', '')
log_probs = choice.get('logprobs')
if run_manager:
run_manager.on_llm_new_token(token=token, verbose=self.
verbose, log_probs=log_probs)
yield token
|
def _stream_generate(self, model: Union['RESTfulGenerateModelHandle',
'RESTfulChatModelHandle'], prompt: str, run_manager: Optional[
CallbackManagerForLLMRun]=None, generate_config: Optional[
'LlamaCppGenerateConfig']=None) ->Generator[str, None, None]:
"""
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
"""
streaming_response = model.generate(prompt=prompt, generate_config=
generate_config)
for chunk in streaming_response:
if isinstance(chunk, dict):
choices = chunk.get('choices', [])
if choices:
choice = choices[0]
if isinstance(choice, dict):
token = choice.get('text', '')
log_probs = choice.get('logprobs')
if run_manager:
run_manager.on_llm_new_token(token=token, verbose=
self.verbose, log_probs=log_probs)
yield token
|
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
|
ignore_llm
|
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
|
@property
def ignore_llm(self) ->bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
|
Whether to ignore LLM callbacks.
|
_default_params
|
"""Return default params."""
return {'host': self.host, 'endpoint_name': self.endpoint_name,
'cluster_id': self.cluster_id, 'cluster_driver_port': self.
cluster_driver_port, 'databricks_uri': self.databricks_uri,
'model_kwargs': self.model_kwargs, 'temperature': self.temperature, 'n':
self.n, 'stop': self.stop, 'max_tokens': self.max_tokens,
'extra_params': self.extra_params, 'task': self.task}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Return default params."""
return {'host': self.host, 'endpoint_name': self.endpoint_name,
'cluster_id': self.cluster_id, 'cluster_driver_port': self.
cluster_driver_port, 'databricks_uri': self.databricks_uri,
'model_kwargs': self.model_kwargs, 'temperature': self.temperature,
'n': self.n, 'stop': self.stop, 'max_tokens': self.max_tokens,
'extra_params': self.extra_params, 'task': self.task}
|
Return default params.
|
test_extract_html
|
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html>Begin of html tag<h1>Header</h1><p>First paragraph.</p>Middle of html tag<p>Second paragraph.</p>End of html tag</html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['html', 'p'])
assert docs_transformed[0
].page_content == 'Begin of html tag Header First paragraph. Middle of html tag Second paragraph. End of html tag'
|
@pytest.mark.requires('bs4')
def test_extract_html() ->None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html>Begin of html tag<h1>Header</h1><p>First paragraph.</p>Middle of html tag<p>Second paragraph.</p>End of html tag</html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['html', 'p'])
assert docs_transformed[0
].page_content == 'Begin of html tag Header First paragraph. Middle of html tag Second paragraph. End of html tag'
| null |
test_strip_whitespace
|
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p><span>First</span> paragraph.</p><p>Second paragraph. </p></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == 'First paragraph. Second paragraph.'
|
@pytest.mark.requires('bs4')
def test_strip_whitespace() ->None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p><span>First</span> paragraph.</p><p>Second paragraph. </p></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph.'
| null |
_identifying_params
|
return {}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {}
| null |
_construct_json_body
|
return {'inputs': prompt, 'parameters': params}
|
def _construct_json_body(self, prompt: str, params: dict) ->dict:
return {'inputs': prompt, 'parameters': params}
| null |
_Continue
|
self.fill('continue')
|
def _Continue(self, t):
self.fill('continue')
| null |
deactivate_selection_scorer
|
"""
Deactivates the selection scorer, meaning that the chain will no longer attempt to use the selection scorer to score responses.
"""
self.selection_scorer_activated = False
|
def deactivate_selection_scorer(self) ->None:
"""
Deactivates the selection scorer, meaning that the chain will no longer attempt to use the selection scorer to score responses.
"""
self.selection_scorer_activated = False
|
Deactivates the selection scorer, meaning that the chain will no longer attempt to use the selection scorer to score responses.
|
_encode
|
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(text,
max_length=self._max_length_equal_32_bit_integer, truncation=
'do_not_truncate')
return token_ids_with_start_and_end_token_ids
|
def _encode(self, text: str) ->List[int]:
token_ids_with_start_and_end_token_ids = self.tokenizer.encode(text,
max_length=self._max_length_equal_32_bit_integer, truncation=
'do_not_truncate')
return token_ids_with_start_and_end_token_ids
| null |
load
|
"""Load from a file path."""
with open(self.file_path, encoding='utf8') as f:
tsv = list(csv.reader(f, delimiter='\t'))
lines = [line for line in tsv if len(line) > 1]
text = ''
for i, line in enumerate(lines):
if line[9] == 'SpaceAfter=No' or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + ' '
metadata = {'source': self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load from a file path."""
with open(self.file_path, encoding='utf8') as f:
tsv = list(csv.reader(f, delimiter='\t'))
lines = [line for line in tsv if len(line) > 1]
text = ''
for i, line in enumerate(lines):
if line[9] == 'SpaceAfter=No' or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + ' '
metadata = {'source': self.file_path}
return [Document(page_content=text, metadata=metadata)]
|
Load from a file path.
|
_get_parameters
|
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
if self.stopping_strings and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
if self.preset is None:
params = self._default_params
else:
params = {'preset': self.preset}
params['stopping_strings'] = self.stopping_strings or stop or []
return params
|
def _get_parameters(self, stop: Optional[List[str]]=None) ->Dict[str, Any]:
"""
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
"""
if self.stopping_strings and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
if self.preset is None:
params = self._default_params
else:
params = {'preset': self.preset}
params['stopping_strings'] = self.stopping_strings or stop or []
return params
|
Performs sanity check, preparing parameters in format needed by textgen.
Args:
stop (Optional[List[str]]): List of stop sequences for textgen.
Returns:
Dictionary containing the combined parameters.
|
head_file
|
"""Get the first n lines of a file."""
try:
with open(path, 'r') as f:
return [str(line) for line in itertools.islice(f, n)]
except Exception:
return []
|
def head_file(path: str, n: int) ->List[str]:
"""Get the first n lines of a file."""
try:
with open(path, 'r') as f:
return [str(line) for line in itertools.islice(f, n)]
except Exception:
return []
|
Get the first n lines of a file.
|
test_chat_google_genai_invoke_multimodal_invalid_model
|
messages: list = [HumanMessage(content=[{'type': 'text', 'text':
"I'm doing great! Guess what's in this picture!"}, {'type': 'image_url',
'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_MODEL)
with pytest.raises(ChatGoogleGenerativeAIError):
llm.invoke(messages)
|
def test_chat_google_genai_invoke_multimodal_invalid_model() ->None:
messages: list = [HumanMessage(content=[{'type': 'text', 'text':
"I'm doing great! Guess what's in this picture!"}, {'type':
'image_url', 'image_url': 'data:image/png;base64,' + _B64_string}])]
llm = ChatGoogleGenerativeAI(model=_MODEL)
with pytest.raises(ChatGoogleGenerativeAIError):
llm.invoke(messages)
| null |
clear
|
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(item=self.session_id, partition_key=self.
user_id)
|
def clear(self) ->None:
"""Clear session memory from this memory and cosmos."""
self.messages = []
if self._container:
self._container.delete_item(item=self.session_id, partition_key=
self.user_id)
|
Clear session memory from this memory and cosmos.
|
get_format_instructions
|
return FORMAT_INSTRUCTIONS
|
def get_format_instructions(self) ->str:
return FORMAT_INSTRUCTIONS
| null |
_forward_propagate
|
try:
import pandas as pd
except ImportError as e:
raise ImportError(
'Unable to import pandas, please install with `pip install pandas`.'
) from e
entity_scope = {entity.name: entity for entity in self.causal_operations.
entities}
for entity in self.causal_operations.entities:
if entity.code == 'pass':
continue
else:
exec(entity.code, globals(), entity_scope)
row_values = [entity.dict() for entity in entity_scope.values()]
self._outcome_table = pd.DataFrame(row_values)
|
def _forward_propagate(self) ->None:
try:
import pandas as pd
except ImportError as e:
raise ImportError(
'Unable to import pandas, please install with `pip install pandas`.'
) from e
entity_scope = {entity.name: entity for entity in self.
causal_operations.entities}
for entity in self.causal_operations.entities:
if entity.code == 'pass':
continue
else:
exec(entity.code, globals(), entity_scope)
row_values = [entity.dict() for entity in entity_scope.values()]
self._outcome_table = pd.DataFrame(row_values)
| null |
test_pairwise_embedding_distance_eval_chain_euclidean_distance
|
"""Test the euclidean distance."""
from scipy.spatial.distance import euclidean
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.EUCLIDEAN)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array(
vectors))
expected = euclidean(*vectors)
assert np.isclose(result, expected)
|
@pytest.mark.requires('scipy')
def test_pairwise_embedding_distance_eval_chain_euclidean_distance(
pairwise_embedding_distance_eval_chain:
PairwiseEmbeddingDistanceEvalChain, vectors: Tuple[np.ndarray, np.ndarray]
) ->None:
"""Test the euclidean distance."""
from scipy.spatial.distance import euclidean
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.EUCLIDEAN)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array
(vectors))
expected = euclidean(*vectors)
assert np.isclose(result, expected)
|
Test the euclidean distance.
|
from_params
|
"""Initialize DocArrayHnswSearch store.
Args:
embedding (Embeddings): Embedding function.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
dist_metric (str): Distance metric for DocArrayHnswSearch can be one of:
"cosine", "ip", and "l2". Defaults to "cosine".
max_elements (int): Maximum number of vectors that can be stored.
Defaults to 1024.
index (bool): Whether an index should be built for this field.
Defaults to True.
ef_construction (int): defines a construction time/accuracy trade-off.
Defaults to 200.
ef (int): parameter controlling query time/accuracy trade-off.
Defaults to 10.
M (int): parameter that defines the maximum number of outgoing
connections in the graph. Defaults to 16.
allow_replace_deleted (bool): Enables replacing of deleted elements
with new added ones. Defaults to True.
num_threads (int): Sets the number of cpu threads to use. Defaults to 1.
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import HnswDocumentIndex
doc_cls = cls._get_doc_cls(dim=n_dim, space=dist_metric, max_elements=
max_elements, index=index, ef_construction=ef_construction, ef=ef, M=M,
allow_replace_deleted=allow_replace_deleted, num_threads=num_threads,
**kwargs)
doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir)
return cls(doc_index, embedding)
|
@classmethod
def from_params(cls, embedding: Embeddings, work_dir: str, n_dim: int,
dist_metric: Literal['cosine', 'ip', 'l2']='cosine', max_elements: int=
1024, index: bool=True, ef_construction: int=200, ef: int=10, M: int=16,
allow_replace_deleted: bool=True, num_threads: int=1, **kwargs: Any
) ->DocArrayHnswSearch:
"""Initialize DocArrayHnswSearch store.
Args:
embedding (Embeddings): Embedding function.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
dist_metric (str): Distance metric for DocArrayHnswSearch can be one of:
"cosine", "ip", and "l2". Defaults to "cosine".
max_elements (int): Maximum number of vectors that can be stored.
Defaults to 1024.
index (bool): Whether an index should be built for this field.
Defaults to True.
ef_construction (int): defines a construction time/accuracy trade-off.
Defaults to 200.
ef (int): parameter controlling query time/accuracy trade-off.
Defaults to 10.
M (int): parameter that defines the maximum number of outgoing
connections in the graph. Defaults to 16.
allow_replace_deleted (bool): Enables replacing of deleted elements
with new added ones. Defaults to True.
num_threads (int): Sets the number of cpu threads to use. Defaults to 1.
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
"""
_check_docarray_import()
from docarray.index import HnswDocumentIndex
doc_cls = cls._get_doc_cls(dim=n_dim, space=dist_metric, max_elements=
max_elements, index=index, ef_construction=ef_construction, ef=ef,
M=M, allow_replace_deleted=allow_replace_deleted, num_threads=
num_threads, **kwargs)
doc_index = HnswDocumentIndex[doc_cls](work_dir=work_dir)
return cls(doc_index, embedding)
|
Initialize DocArrayHnswSearch store.
Args:
embedding (Embeddings): Embedding function.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
dist_metric (str): Distance metric for DocArrayHnswSearch can be one of:
"cosine", "ip", and "l2". Defaults to "cosine".
max_elements (int): Maximum number of vectors that can be stored.
Defaults to 1024.
index (bool): Whether an index should be built for this field.
Defaults to True.
ef_construction (int): defines a construction time/accuracy trade-off.
Defaults to 200.
ef (int): parameter controlling query time/accuracy trade-off.
Defaults to 10.
M (int): parameter that defines the maximum number of outgoing
connections in the graph. Defaults to 16.
allow_replace_deleted (bool): Enables replacing of deleted elements
with new added ones. Defaults to True.
num_threads (int): Sets the number of cpu threads to use. Defaults to 1.
**kwargs: Other keyword arguments to be passed to the get_doc_cls method.
|
__init__
|
from azure.search.documents.indexes.models import SearchableField, SearchField, SearchFieldDataType, SimpleField
"""Initialize with necessary components."""
self.embedding_function = embedding_function
default_fields = [SimpleField(name=FIELDS_ID, type=SearchFieldDataType.
String, key=True, filterable=True), SearchableField(name=FIELDS_CONTENT,
type=SearchFieldDataType.String), SearchField(name=
FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(
SearchFieldDataType.Single), searchable=True, vector_search_dimensions=
len(embedding_function('Text')), vector_search_configuration='default'),
SearchableField(name=FIELDS_METADATA, type=SearchFieldDataType.String)]
user_agent = 'langchain'
if 'user_agent' in kwargs and kwargs['user_agent']:
user_agent += ' ' + kwargs['user_agent']
self.client = _get_search_client(azure_search_endpoint, azure_search_key,
index_name, semantic_configuration_name=semantic_configuration_name,
fields=fields, vector_search=vector_search, semantic_settings=
semantic_settings, scoring_profiles=scoring_profiles,
default_scoring_profile=default_scoring_profile, default_fields=
default_fields, user_agent=user_agent, cors_options=cors_options)
self.search_type = search_type
self.semantic_configuration_name = semantic_configuration_name
self.semantic_query_language = semantic_query_language
self.fields = fields if fields else default_fields
|
def __init__(self, azure_search_endpoint: str, azure_search_key: str,
index_name: str, embedding_function: Callable, search_type: str=
'hybrid', semantic_configuration_name: Optional[str]=None,
semantic_query_language: str='en-us', fields: Optional[List[SearchField
]]=None, vector_search: Optional[VectorSearch]=None, semantic_settings:
Optional[Union[SemanticSearch, SemanticSettings]]=None,
scoring_profiles: Optional[List[ScoringProfile]]=None,
default_scoring_profile: Optional[str]=None, cors_options: Optional[
CorsOptions]=None, **kwargs: Any):
from azure.search.documents.indexes.models import SearchableField, SearchField, SearchFieldDataType, SimpleField
"""Initialize with necessary components."""
self.embedding_function = embedding_function
default_fields = [SimpleField(name=FIELDS_ID, type=SearchFieldDataType.
String, key=True, filterable=True), SearchableField(name=
FIELDS_CONTENT, type=SearchFieldDataType.String), SearchField(name=
FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(
SearchFieldDataType.Single), searchable=True,
vector_search_dimensions=len(embedding_function('Text')),
vector_search_configuration='default'), SearchableField(name=
FIELDS_METADATA, type=SearchFieldDataType.String)]
user_agent = 'langchain'
if 'user_agent' in kwargs and kwargs['user_agent']:
user_agent += ' ' + kwargs['user_agent']
self.client = _get_search_client(azure_search_endpoint,
azure_search_key, index_name, semantic_configuration_name=
semantic_configuration_name, fields=fields, vector_search=
vector_search, semantic_settings=semantic_settings,
scoring_profiles=scoring_profiles, default_scoring_profile=
default_scoring_profile, default_fields=default_fields, user_agent=
user_agent, cors_options=cors_options)
self.search_type = search_type
self.semantic_configuration_name = semantic_configuration_name
self.semantic_query_language = semantic_query_language
self.fields = fields if fields else default_fields
| null |
get_vectorstore
|
"""Get the vectorstore used for this example."""
return Chroma(collection_name=collection_name, persist_directory=str(Path(
__file__).parent.parent / 'chroma_db_proposals'), embedding_function=
OpenAIEmbeddings())
|
def get_vectorstore(collection_name: str='proposals'):
"""Get the vectorstore used for this example."""
return Chroma(collection_name=collection_name, persist_directory=str(
Path(__file__).parent.parent / 'chroma_db_proposals'),
embedding_function=OpenAIEmbeddings())
|
Get the vectorstore used for this example.
|
get_tools
|
"""Get the tools in the toolkit."""
return [MultionCreateSession(), MultionUpdateSession(), MultionCloseSession()]
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return [MultionCreateSession(), MultionUpdateSession(),
MultionCloseSession()]
|
Get the tools in the toolkit.
|
_approximate_search_query_with_boolean_filter
|
"""For Approximate k-NN Search, with Boolean Filter."""
return {'size': k, 'query': {'bool': {'filter': boolean_filter,
subquery_clause: [{'knn': {vector_field: {'vector': query_vector, 'k':
k}}}]}}}
|
def _approximate_search_query_with_boolean_filter(query_vector: List[float],
boolean_filter: Dict, k: int=4, vector_field: str='vector_field',
subquery_clause: str='must') ->Dict:
"""For Approximate k-NN Search, with Boolean Filter."""
return {'size': k, 'query': {'bool': {'filter': boolean_filter,
subquery_clause: [{'knn': {vector_field: {'vector': query_vector,
'k': k}}}]}}}
|
For Approximate k-NN Search, with Boolean Filter.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
Get the namespace of the langchain object.
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
_get_python_function_name
|
"""Get the name of a Python function."""
return function.__name__
|
def _get_python_function_name(function: Callable) ->str:
"""Get the name of a Python function."""
return function.__name__
|
Get the name of a Python function.
|
validate_environment
|
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
'Could not import fireworks-ai python package. Please install it with `pip install fireworks-ai`.'
) from e
fireworks_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'fireworks_api_key', 'FIREWORKS_API_KEY'))
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
'Could not import fireworks-ai python package. Please install it with `pip install fireworks-ai`.'
) from e
fireworks_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'fireworks_api_key', 'FIREWORKS_API_KEY'))
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
|
Validate that api key in environment.
|
_create_retry_decorator
|
multiplier = 1
min_seconds = 1
max_seconds = 4
return retry(reraise=True, stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds),
retry=retry_if_exception_type(HTTPError), before_sleep=before_sleep_log
(logger, logging.WARNING))
|
def _create_retry_decorator(embeddings: DashScopeEmbeddings) ->Callable[[
Any], Any]:
multiplier = 1
min_seconds = 1
max_seconds = 4
return retry(reraise=True, stop=stop_after_attempt(embeddings.
max_retries), wait=wait_exponential(multiplier, min=min_seconds,
max=max_seconds), retry=retry_if_exception_type(HTTPError),
before_sleep=before_sleep_log(logger, logging.WARNING))
| null |
_prepare_batches
|
"""Splits texts in batches based on current maximum batch size
and maximum tokens per request.
"""
text_index = 0
texts_len = len(texts)
batch_token_len = 0
batches: List[List[str]] = []
current_batch: List[str] = []
if texts_len == 0:
return []
while text_index < texts_len:
current_text = texts[text_index]
current_text_token_cnt = len(VertexAIEmbeddings._split_by_punctuation(
current_text)) * 2
end_of_batch = False
if current_text_token_cnt > _MAX_TOKENS_PER_BATCH:
if len(current_batch) > 0:
batches.append(current_batch)
current_batch = [current_text]
text_index += 1
end_of_batch = True
elif batch_token_len + current_text_token_cnt > _MAX_TOKENS_PER_BATCH or len(
current_batch) == batch_size:
end_of_batch = True
else:
if text_index == texts_len - 1:
end_of_batch = True
batch_token_len += current_text_token_cnt
current_batch.append(current_text)
text_index += 1
if end_of_batch:
batches.append(current_batch)
current_batch = []
batch_token_len = 0
return batches
|
@staticmethod
def _prepare_batches(texts: List[str], batch_size: int) ->List[List[str]]:
"""Splits texts in batches based on current maximum batch size
and maximum tokens per request.
"""
text_index = 0
texts_len = len(texts)
batch_token_len = 0
batches: List[List[str]] = []
current_batch: List[str] = []
if texts_len == 0:
return []
while text_index < texts_len:
current_text = texts[text_index]
current_text_token_cnt = len(VertexAIEmbeddings.
_split_by_punctuation(current_text)) * 2
end_of_batch = False
if current_text_token_cnt > _MAX_TOKENS_PER_BATCH:
if len(current_batch) > 0:
batches.append(current_batch)
current_batch = [current_text]
text_index += 1
end_of_batch = True
elif batch_token_len + current_text_token_cnt > _MAX_TOKENS_PER_BATCH or len(
current_batch) == batch_size:
end_of_batch = True
else:
if text_index == texts_len - 1:
end_of_batch = True
batch_token_len += current_text_token_cnt
current_batch.append(current_text)
text_index += 1
if end_of_batch:
batches.append(current_batch)
current_batch = []
batch_token_len = 0
return batches
|
Splits texts in batches based on current maximum batch size
and maximum tokens per request.
|
from_texts
|
vespa = cls(embedding_function=embedding, **kwargs)
vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return vespa
|
@classmethod
def from_texts(cls: Type[VespaStore], texts: List[str], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[
str]]=None, **kwargs: Any) ->VespaStore:
vespa = cls(embedding_function=embedding, **kwargs)
vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return vespa
| null |
_to_chat_prompt
|
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError('at least one HumanMessage must be provided')
if not isinstance(messages[-1], HumanMessage):
raise ValueError('last message must be a HumanMessage')
messages_dicts = [self._to_chatml_format(m) for m in messages]
return self.tokenizer.apply_chat_template(messages_dicts, tokenize=False,
add_generation_prompt=True)
|
def _to_chat_prompt(self, messages: List[BaseMessage]) ->str:
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
if not messages:
raise ValueError('at least one HumanMessage must be provided')
if not isinstance(messages[-1], HumanMessage):
raise ValueError('last message must be a HumanMessage')
messages_dicts = [self._to_chatml_format(m) for m in messages]
return self.tokenizer.apply_chat_template(messages_dicts, tokenize=
False, add_generation_prompt=True)
|
Convert a list of messages into a prompt format expected by wrapped LLM.
|
_create_thread_and_run
|
params = {k: v for k, v in input.items() if k in ('instructions', 'model',
'tools', 'run_metadata')}
run = self.client.beta.threads.create_and_run(assistant_id=self.
assistant_id, thread=thread, **params)
return run
|
def _create_thread_and_run(self, input: dict, thread: dict) ->Any:
params = {k: v for k, v in input.items() if k in ('instructions',
'model', 'tools', 'run_metadata')}
run = self.client.beta.threads.create_and_run(assistant_id=self.
assistant_id, thread=thread, **params)
return run
| null |
test_load_no_content
|
"""Returns a Document without content."""
api_client = PubMedLoader(query='37548971')
docs = api_client.load()
print(docs)
assert len(docs) > 0
assert docs[0].page_content == ''
|
def test_load_no_content() ->None:
"""Returns a Document without content."""
api_client = PubMedLoader(query='37548971')
docs = api_client.load()
print(docs)
assert len(docs) > 0
assert docs[0].page_content == ''
|
Returns a Document without content.
|
list_branches_in_repo
|
"""
Fetches a list of all branches in the repository.
Returns:
str: A plaintext report containing the names of the branches.
"""
try:
branches = [branch.name for branch in self.github_repo_instance.
get_branches()]
if branches:
branches_str = '\n'.join(branches)
return (
f'Found {len(branches)} branches in the repository:\n{branches_str}'
)
else:
return 'No branches found in the repository'
except Exception as e:
return str(e)
|
def list_branches_in_repo(self) ->str:
"""
Fetches a list of all branches in the repository.
Returns:
str: A plaintext report containing the names of the branches.
"""
try:
branches = [branch.name for branch in self.github_repo_instance.
get_branches()]
if branches:
branches_str = '\n'.join(branches)
return (
f'Found {len(branches)} branches in the repository:\n{branches_str}'
)
else:
return 'No branches found in the repository'
except Exception as e:
return str(e)
|
Fetches a list of all branches in the repository.
Returns:
str: A plaintext report containing the names of the branches.
|
wrapper
|
other = kwargs.get('other') if 'other' in kwargs else None
if not other:
for arg in args:
if isinstance(arg, type(instance)):
other = arg
break
if isinstance(other, type(instance)):
raise ValueError(
'Equality operators are overridden for FilterExpression creation. Use .equals() for equality checks'
)
return func(instance, *args, **kwargs)
|
@wraps(func)
def wrapper(instance: Any, *args: Any, **kwargs: Any) ->Any:
other = kwargs.get('other') if 'other' in kwargs else None
if not other:
for arg in args:
if isinstance(arg, type(instance)):
other = arg
break
if isinstance(other, type(instance)):
raise ValueError(
'Equality operators are overridden for FilterExpression creation. Use .equals() for equality checks'
)
return func(instance, *args, **kwargs)
| null |
_compute_score
|
"""Compute the score based on the distance metric.
Args:
vectors (np.ndarray): The input vectors.
Returns:
float: The computed score.
"""
metric = self._get_metric(self.distance_metric)
score = metric(vectors[0].reshape(1, -1), vectors[1].reshape(1, -1)).item()
return score
|
def _compute_score(self, vectors: np.ndarray) ->float:
"""Compute the score based on the distance metric.
Args:
vectors (np.ndarray): The input vectors.
Returns:
float: The computed score.
"""
metric = self._get_metric(self.distance_metric)
score = metric(vectors[0].reshape(1, -1), vectors[1].reshape(1, -1)).item()
return score
|
Compute the score based on the distance metric.
Args:
vectors (np.ndarray): The input vectors.
Returns:
float: The computed score.
|
similarity_search
|
"""Find the most similar documents to the given query."""
raise NotImplementedError()
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Find the most similar documents to the given query."""
raise NotImplementedError()
|
Find the most similar documents to the given query.
|
_select_relevance_score_fn
|
return self.relevance_score_fn if self.relevance_score_fn else _default_score_normalizer
|
def _select_relevance_score_fn(self) ->Callable[[float], float]:
return (self.relevance_score_fn if self.relevance_score_fn else
_default_score_normalizer)
| null |
validate_environment
|
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(values,
'openweathermap_api_key', 'OPENWEATHERMAP_API_KEY')
try:
import pyowm
except ImportError:
raise ImportError(
'pyowm is not installed. Please install it with `pip install pyowm`')
owm = pyowm.OWM(openweathermap_api_key)
values['owm'] = owm
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
openweathermap_api_key = get_from_dict_or_env(values,
'openweathermap_api_key', 'OPENWEATHERMAP_API_KEY')
try:
import pyowm
except ImportError:
raise ImportError(
'pyowm is not installed. Please install it with `pip install pyowm`'
)
owm = pyowm.OWM(openweathermap_api_key)
values['owm'] = owm
return values
|
Validate that api key exists in environment.
|
test_show_progress
|
"""Verify that file system loader works with a progress bar."""
loader = FileSystemBlobLoader(toy_dir)
blobs = list(loader.yield_blobs())
assert len(blobs) == loader.count_matching_files()
|
@pytest.mark.requires('tqdm')
def test_show_progress(toy_dir: str) ->None:
"""Verify that file system loader works with a progress bar."""
loader = FileSystemBlobLoader(toy_dir)
blobs = list(loader.yield_blobs())
assert len(blobs) == loader.count_matching_files()
|
Verify that file system loader works with a progress bar.
|
map
|
"""
Return a new Runnable that maps a list of inputs to a list of outputs,
by calling invoke() with each input.
"""
return RunnableEach(bound=self)
|
def map(self) ->Runnable[List[Input], List[Output]]:
"""
Return a new Runnable that maps a list of inputs to a list of outputs,
by calling invoke() with each input.
"""
return RunnableEach(bound=self)
|
Return a new Runnable that maps a list of inputs to a list of outputs,
by calling invoke() with each input.
|
_upload_to_gcs
|
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
|
def _upload_to_gcs(self, data: str, gcs_location: str) ->None:
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
|
Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
|
test_video_id_extraction
|
"""Test that the video id is extracted from a youtube url"""
assert YoutubeLoader.extract_video_id(youtube_url) == expected_video_id
|
@pytest.mark.parametrize('youtube_url, expected_video_id', [(
'http://www.youtube.com/watch?v=-wtIMTCHWuI', '-wtIMTCHWuI'), (
'http://youtube.com/watch?v=-wtIMTCHWuI', '-wtIMTCHWuI'), (
'http://m.youtube.com/watch?v=-wtIMTCHWuI', '-wtIMTCHWuI'), (
'http://youtu.be/-wtIMTCHWuI', '-wtIMTCHWuI'), (
'https://youtu.be/-wtIMTCHWuI', '-wtIMTCHWuI'), (
'https://www.youtube.com/watch?v=lalOy8Mbfdc', 'lalOy8Mbfdc'), (
'https://m.youtube.com/watch?v=lalOy8Mbfdc', 'lalOy8Mbfdc'), (
'https://youtube.com/watch?v=lalOy8Mbfdc', 'lalOy8Mbfdc'), (
'http://youtu.be/lalOy8Mbfdc?t=1', 'lalOy8Mbfdc'), (
'http://youtu.be/lalOy8Mbfdc?t=1s', 'lalOy8Mbfdc'), (
'https://youtu.be/lalOy8Mbfdc?t=1', 'lalOy8Mbfdc'), (
'http://www.youtube-nocookie.com/embed/lalOy8Mbfdc?rel=0',
'lalOy8Mbfdc'), ('https://youtu.be/lalOy8Mbfdc?t=1s', 'lalOy8Mbfdc'), (
'https://www.youtube.com/shorts/cd0Fy92_w_s', 'cd0Fy92_w_s')])
def test_video_id_extraction(youtube_url: str, expected_video_id: str) ->None:
"""Test that the video id is extracted from a youtube url"""
assert YoutubeLoader.extract_video_id(youtube_url) == expected_video_id
|
Test that the video id is extracted from a youtube url
|
clear
|
"""Clear cache. This is for all LLMs at once."""
self.kv_cache.clear()
|
def clear(self, **kwargs: Any) ->None:
"""Clear cache. This is for all LLMs at once."""
self.kv_cache.clear()
|
Clear cache. This is for all LLMs at once.
|
_call
|
"""Call out to Titan Takeoff generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager):
text_output += chunk.text
return text_output
url = f'{self.base_url}/generate'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = 'utf-8'
text = ''
if 'message' in response.json():
text = response.json()['message']
else:
raise ValueError('Something went wrong.')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
'Could not connect to Titan Takeoff server. Please make sure that the server is running.'
)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Titan Takeoff generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager
=run_manager):
text_output += chunk.text
return text_output
url = f'{self.base_url}/generate'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = 'utf-8'
text = ''
if 'message' in response.json():
text = response.json()['message']
else:
raise ValueError('Something went wrong.')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
'Could not connect to Titan Takeoff server. Please make sure that the server is running.'
)
|
Call out to Titan Takeoff generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
|
_generate
|
params = {**self._default_params, **kwargs}
encoded_prompts = self.tokenizer(prompts)['input_ids']
tokenized_prompts = [self.tokenizer.convert_ids_to_tokens(encoded_prompt) for
encoded_prompt in encoded_prompts]
results = self.client.generate_batch(tokenized_prompts, **params)
sequences = [result.sequences_ids[0] for result in results]
decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences]
generations = []
for text in decoded_sequences:
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
params = {**self._default_params, **kwargs}
encoded_prompts = self.tokenizer(prompts)['input_ids']
tokenized_prompts = [self.tokenizer.convert_ids_to_tokens(
encoded_prompt) for encoded_prompt in encoded_prompts]
results = self.client.generate_batch(tokenized_prompts, **params)
sequences = [result.sequences_ids[0] for result in results]
decoded_sequences = [self.tokenizer.decode(seq) for seq in sequences]
generations = []
for text in decoded_sequences:
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| null |
_persist_run
|
pass
|
def _persist_run(self, run: Run) ->None:
pass
| null |
load_memory_variables
|
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
raise ValueError(
f'The variable {key} is repeated in the CombinedMemory.')
memory_data[key] = value
return memory_data
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: Dict[str, Any] = {}
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
raise ValueError(
f'The variable {key} is repeated in the CombinedMemory.')
memory_data[key] = value
return memory_data
|
Load all vars from sub-memories.
|
decode_to_str
|
return item.numpy().decode('utf-8')
|
def decode_to_str(item: tf.Tensor) ->str:
return item.numpy().decode('utf-8')
| null |
_format_memory_detail
|
created_time = memory.metadata['created_at'].strftime('%B %d, %Y, %I:%M %p')
return f'{prefix}[{created_time}] {memory.page_content.strip()}'
|
def _format_memory_detail(self, memory: Document, prefix: str='') ->str:
created_time = memory.metadata['created_at'].strftime('%B %d, %Y, %I:%M %p'
)
return f'{prefix}[{created_time}] {memory.page_content.strip()}'
| null |
from_texts
|
"""
Create a new ElasticKnnSearch instance and add a list of texts to the
Elasticsearch index.
Args:
texts (List[str]): The texts to add to the index.
embedding (Embeddings): The embedding model to use for transforming the
texts into vectors.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A new ElasticKnnSearch instance.
"""
index_name = kwargs.get('index_name', str(uuid.uuid4()))
es_connection = kwargs.get('es_connection')
es_cloud_id = kwargs.get('es_cloud_id')
es_user = kwargs.get('es_user')
es_password = kwargs.get('es_password')
vector_query_field = kwargs.get('vector_query_field', 'vector')
query_field = kwargs.get('query_field', 'text')
model_id = kwargs.get('model_id')
dims = kwargs.get('dims')
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
optional_args = {}
if vector_query_field is not None:
optional_args['vector_query_field'] = vector_query_field
if query_field is not None:
optional_args['query_field'] = query_field
knnvectorsearch = cls(index_name=index_name, embedding=embedding,
es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=es_user,
es_password=es_password, **optional_args)
knnvectorsearch.add_texts(texts, model_id=model_id, dims=dims, **optional_args)
return knnvectorsearch
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[Dict[Any, Any]]]=None, **kwargs: Any) ->ElasticKnnSearch:
"""
Create a new ElasticKnnSearch instance and add a list of texts to the
Elasticsearch index.
Args:
texts (List[str]): The texts to add to the index.
embedding (Embeddings): The embedding model to use for transforming the
texts into vectors.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A new ElasticKnnSearch instance.
"""
index_name = kwargs.get('index_name', str(uuid.uuid4()))
es_connection = kwargs.get('es_connection')
es_cloud_id = kwargs.get('es_cloud_id')
es_user = kwargs.get('es_user')
es_password = kwargs.get('es_password')
vector_query_field = kwargs.get('vector_query_field', 'vector')
query_field = kwargs.get('query_field', 'text')
model_id = kwargs.get('model_id')
dims = kwargs.get('dims')
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
optional_args = {}
if vector_query_field is not None:
optional_args['vector_query_field'] = vector_query_field
if query_field is not None:
optional_args['query_field'] = query_field
knnvectorsearch = cls(index_name=index_name, embedding=embedding,
es_connection=es_connection, es_cloud_id=es_cloud_id, es_user=
es_user, es_password=es_password, **optional_args)
knnvectorsearch.add_texts(texts, model_id=model_id, dims=dims, **
optional_args)
return knnvectorsearch
|
Create a new ElasticKnnSearch instance and add a list of texts to the
Elasticsearch index.
Args:
texts (List[str]): The texts to add to the index.
embedding (Embeddings): The embedding model to use for transforming the
texts into vectors.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A new ElasticKnnSearch instance.
|
_import_gmail_GmailGetMessage
|
from langchain_community.tools.gmail import GmailGetMessage
return GmailGetMessage
|
def _import_gmail_GmailGetMessage() ->Any:
from langchain_community.tools.gmail import GmailGetMessage
return GmailGetMessage
| null |
delete_collection
|
self.logger.debug('Trying to delete collection')
drop_statement = text(f'DROP TABLE IF EXISTS {self.collection_name};')
with self.engine.connect() as conn:
with conn.begin():
conn.execute(drop_statement)
|
def delete_collection(self) ->None:
self.logger.debug('Trying to delete collection')
drop_statement = text(f'DROP TABLE IF EXISTS {self.collection_name};')
with self.engine.connect() as conn:
with conn.begin():
conn.execute(drop_statement)
| null |
format_auto_embed_off
|
"""
Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW
"""
chosen_action, cost, prob = self.get_label(event)
context_emb, action_embs = self.get_context_and_action_embeddings(event)
example_string = ''
example_string += 'shared '
for context_item in context_emb:
for ns, based_on in context_item.items():
e = ' '.join(based_on) if isinstance(based_on, list) else based_on
example_string += f'|{ns} {e} '
example_string += '\n'
for i, action in enumerate(action_embs):
if cost is not None and chosen_action == i:
example_string += f'{chosen_action}:{cost}:{prob} '
for ns, action_embedding in action.items():
e = ' '.join(action_embedding) if isinstance(action_embedding, list
) else action_embedding
example_string += f'|{ns} {e} '
example_string += '\n'
return example_string[:-1]
|
def format_auto_embed_off(self, event: PickBestEvent) ->str:
"""
Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW
"""
chosen_action, cost, prob = self.get_label(event)
context_emb, action_embs = self.get_context_and_action_embeddings(event)
example_string = ''
example_string += 'shared '
for context_item in context_emb:
for ns, based_on in context_item.items():
e = ' '.join(based_on) if isinstance(based_on, list) else based_on
example_string += f'|{ns} {e} '
example_string += '\n'
for i, action in enumerate(action_embs):
if cost is not None and chosen_action == i:
example_string += f'{chosen_action}:{cost}:{prob} '
for ns, action_embedding in action.items():
e = ' '.join(action_embedding) if isinstance(action_embedding, list
) else action_embedding
example_string += f'|{ns} {e} '
example_string += '\n'
return example_string[:-1]
|
Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW
|
test_final_answer_before_parsable_action
|
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output)
assert agent_finish.return_values.get('output'
) == 'The best pizza to eat is margaritta'
|
def test_final_answer_before_parsable_action() ->None:
llm_output = """Final Answer: The best pizza to eat is margaritta
Action: foo
Action Input: bar
"""
agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output)
assert agent_finish.return_values.get('output'
) == 'The best pizza to eat is margaritta'
| null |
messages
|
"""Retrieve the messages from PostgreSQL"""
query = (
f'SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;'
)
self.cursor.execute(query, (self.session_id,))
items = [record['message'] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the messages from PostgreSQL"""
query = (
f'SELECT message FROM {self.table_name} WHERE session_id = %s ORDER BY id;'
)
self.cursor.execute(query, (self.session_id,))
items = [record['message'] for record in self.cursor.fetchall()]
messages = messages_from_dict(items)
return messages
|
Retrieve the messages from PostgreSQL
|
_get_and_update_kg
|
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
|
def _get_and_update_kg(self, inputs: Dict[str, Any]) ->None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
|
Get and update knowledge graph from the conversation history.
|
_call
|
"""Run text through and get agent response."""
name_to_tool_map = {tool.name: tool for tool in self.tools}
color_mapping = get_color_mapping([tool.name for tool in self.tools],
excluded_colors=['green', 'red'])
intermediate_steps: List[Tuple[AgentAction, str]] = []
iterations = 0
time_elapsed = 0.0
start_time = time.time()
while self._should_continue(iterations, time_elapsed):
next_step_output = self._take_next_step(name_to_tool_map, color_mapping,
inputs, intermediate_steps, run_manager=run_manager)
if isinstance(next_step_output, AgentFinish):
return self._return(next_step_output, intermediate_steps,
run_manager=run_manager)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(tool_return, intermediate_steps,
run_manager=run_manager)
iterations += 1
time_elapsed = time.time() - start_time
output = self.agent.return_stopped_response(self.early_stopping_method,
intermediate_steps, **inputs)
return self._return(output, intermediate_steps, run_manager=run_manager)
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Run text through and get agent response."""
name_to_tool_map = {tool.name: tool for tool in self.tools}
color_mapping = get_color_mapping([tool.name for tool in self.tools],
excluded_colors=['green', 'red'])
intermediate_steps: List[Tuple[AgentAction, str]] = []
iterations = 0
time_elapsed = 0.0
start_time = time.time()
while self._should_continue(iterations, time_elapsed):
next_step_output = self._take_next_step(name_to_tool_map,
color_mapping, inputs, intermediate_steps, run_manager=run_manager)
if isinstance(next_step_output, AgentFinish):
return self._return(next_step_output, intermediate_steps,
run_manager=run_manager)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(tool_return, intermediate_steps,
run_manager=run_manager)
iterations += 1
time_elapsed = time.time() - start_time
output = self.agent.return_stopped_response(self.early_stopping_method,
intermediate_steps, **inputs)
return self._return(output, intermediate_steps, run_manager=run_manager)
|
Run text through and get agent response.
|
test_astradb_vectorstore_similarity_scale
|
"""Scale of the similarity scores."""
store_parseremb.add_texts(texts=[json.dumps([1, 1]), json.dumps([-1, -1])],
ids=['near', 'far'])
res1 = store_parseremb.similarity_search_with_score(json.dumps([0.5, 0.5]), k=2
)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
|
def test_astradb_vectorstore_similarity_scale(self, store_parseremb: AstraDB
) ->None:
"""Scale of the similarity scores."""
store_parseremb.add_texts(texts=[json.dumps([1, 1]), json.dumps([-1, -1
])], ids=['near', 'far'])
res1 = store_parseremb.similarity_search_with_score(json.dumps([0.5,
0.5]), k=2)
scores = [sco for _, sco in res1]
sco_near, sco_far = scores
assert abs(1 - sco_near) < 0.001 and abs(sco_far) < 0.001
|
Scale of the similarity scores.
|
_get_opensearch_client
|
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url, **kwargs)
except ValueError as e:
raise ImportError(
f'OpenSearch client string provided is not in proper format. Got error: {e} '
)
return client
|
def _get_opensearch_client(opensearch_url: str, **kwargs: Any) ->Any:
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url, **kwargs)
except ValueError as e:
raise ImportError(
f'OpenSearch client string provided is not in proper format. Got error: {e} '
)
return client
|
Get OpenSearch client from the opensearch_url, otherwise raise error.
|
on_text
|
pass
|
def on_text(self, text: str, color: Optional[str]=None, end: str='', **
kwargs: Any) ->None:
pass
| null |
_import_golden_query
|
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
return GoldenQueryAPIWrapper
|
def _import_golden_query() ->Any:
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
return GoldenQueryAPIWrapper
| null |
test_runnable_branch_invoke
|
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
branch = RunnableBranch[int, int]((lambda x: x > 100, raise_value_error), (
lambda x: x > 0 and x < 5, lambda x: x + 1), (lambda x: x > 5, lambda x:
x * 10), lambda x: x - 1)
assert branch.invoke(1) == 2
assert branch.invoke(10) == 100
assert branch.invoke(0) == -1
with pytest.raises(ValueError):
branch.invoke(1000)
|
def test_runnable_branch_invoke() ->None:
def raise_value_error(x: int) ->int:
"""Raise a value error."""
raise ValueError('x is too large')
branch = RunnableBranch[int, int]((lambda x: x > 100, raise_value_error
), (lambda x: x > 0 and x < 5, lambda x: x + 1), (lambda x: x > 5,
lambda x: x * 10), lambda x: x - 1)
assert branch.invoke(1) == 2
assert branch.invoke(10) == 100
assert branch.invoke(0) == -1
with pytest.raises(ValueError):
branch.invoke(1000)
| null |
get_num_tokens
|
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
if self.is_gemini:
raise ValueError('Counting tokens is not yet supported!')
result = self.client.count_text_tokens(model=self.model_name, prompt=text)
return result['token_count']
|
def get_num_tokens(self, text: str) ->int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
if self.is_gemini:
raise ValueError('Counting tokens is not yet supported!')
result = self.client.count_text_tokens(model=self.model_name, prompt=text)
return result['token_count']
|
Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
|
test_prompt_from_jinja2_template_multiple_inputs_with_repeats
|
"""Test with multiple input variables and repeats."""
template = """Hello world
Your variable: {{ foo }}
{# This will not get rendered #}
{% if bar %}
You just set bar boolean variable to true
{% endif %}
{% for i in foo_list %}
{{ i }}
{% endfor %}
{% if bar %}
Your variable again: {{ foo }}
{% endif %}
"""
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=['bar',
'foo', 'foo_list'], template_format='jinja2')
assert prompt == expected_prompt
|
@pytest.mark.requires('jinja2')
def test_prompt_from_jinja2_template_multiple_inputs_with_repeats() ->None:
"""Test with multiple input variables and repeats."""
template = """Hello world
Your variable: {{ foo }}
{# This will not get rendered #}
{% if bar %}
You just set bar boolean variable to true
{% endif %}
{% for i in foo_list %}
{{ i }}
{% endfor %}
{% if bar %}
Your variable again: {{ foo }}
{% endif %}
"""
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=[
'bar', 'foo', 'foo_list'], template_format='jinja2')
assert prompt == expected_prompt
|
Test with multiple input variables and repeats.
|
test_vald_search_by_vector
|
"""Test end to end construction and search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_by_vector(embedding, k=3)
assert output == [Document(page_content='foo'), Document(page_content='bar'
), Document(page_content='baz')]
|
def test_vald_search_by_vector() ->None:
"""Test end to end construction and search by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_by_vector(embedding, k=3)
assert output == [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
|
Test end to end construction and search by vector.
|
test_api_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
llm = AI21(ai21_api_key='secret-api-key')
print(llm.ai21_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
llm = AI21(ai21_api_key='secret-api-key')
print(llm.ai21_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
_iter_next_step
|
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
output = self.agent.plan(intermediate_steps, callbacks=run_manager.
get_child() if run_manager else None, **inputs)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
raise ValueError(
f'An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: {str(e)}'
)
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = 'Invalid or incomplete response'
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
raise ValueError('Got unexpected type of `handle_parsing_errors`')
output = AgentAction('_Exception', observation, text)
if run_manager:
run_manager.on_agent_action(output, color='green')
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(output.tool_input, verbose=self.
verbose, color=None, callbacks=run_manager.get_child() if
run_manager else None, **tool_run_kwargs)
yield AgentStep(action=output, observation=observation)
return
if isinstance(output, AgentFinish):
yield output
return
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
for agent_action in actions:
yield agent_action
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, color='green')
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs['llm_prefix'] = ''
observation = tool.run(agent_action.tool_input, verbose=self.
verbose, color=color, callbacks=run_manager.get_child() if
run_manager else None, **tool_run_kwargs)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool().run({'requested_tool_name':
agent_action.tool, 'available_tool_names': list(
name_to_tool_map.keys())}, verbose=self.verbose, color=None,
callbacks=run_manager.get_child() if run_manager else None, **
tool_run_kwargs)
yield AgentStep(action=agent_action, observation=observation)
|
def _iter_next_step(self, name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str], inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]], run_manager:
Optional[CallbackManagerForChainRun]=None) ->Iterator[Union[AgentFinish,
AgentAction, AgentStep]]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(
intermediate_steps)
output = self.agent.plan(intermediate_steps, callbacks=run_manager.
get_child() if run_manager else None, **inputs)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
raise ValueError(
f'An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: {str(e)}'
)
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = 'Invalid or incomplete response'
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
raise ValueError('Got unexpected type of `handle_parsing_errors`')
output = AgentAction('_Exception', observation, text)
if run_manager:
run_manager.on_agent_action(output, color='green')
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(output.tool_input, verbose=self.
verbose, color=None, callbacks=run_manager.get_child() if
run_manager else None, **tool_run_kwargs)
yield AgentStep(action=output, observation=observation)
return
if isinstance(output, AgentFinish):
yield output
return
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
for agent_action in actions:
yield agent_action
for agent_action in actions:
if run_manager:
run_manager.on_agent_action(agent_action, color='green')
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs['llm_prefix'] = ''
observation = tool.run(agent_action.tool_input, verbose=self.
verbose, color=color, callbacks=run_manager.get_child() if
run_manager else None, **tool_run_kwargs)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = InvalidTool().run({'requested_tool_name':
agent_action.tool, 'available_tool_names': list(
name_to_tool_map.keys())}, verbose=self.verbose, color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs)
yield AgentStep(action=agent_action, observation=observation)
|
Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
|
test_l2
|
"""Test Flat L2 distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=l2_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == EUCLIDEAN_SCORE
assert drop(docsearch.index_name)
|
def test_l2(texts: List[str]) ->None:
"""Test Flat L2 distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=l2_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == EUCLIDEAN_SCORE
assert drop(docsearch.index_name)
|
Test Flat L2 distance.
|
test_openai_embedding_documents
|
"""Test openai embeddings."""
documents = ['foo bar']
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
|
@pytest.mark.scheduled
def test_openai_embedding_documents() ->None:
"""Test openai embeddings."""
documents = ['foo bar']
embedding = OpenAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
|
Test openai embeddings.
|
_get_reddit_search
|
return RedditSearchRun(api_wrapper=RedditSearchAPIWrapper(**kwargs))
|
def _get_reddit_search(**kwargs: Any) ->BaseTool:
return RedditSearchRun(api_wrapper=RedditSearchAPIWrapper(**kwargs))
| null |
create_metadata
|
"""Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
"""
metadata: Dict[str, Any] = {}
for key, value in fields.items():
if key == 'id' or key == 'document' or key == 'embedding':
continue
metadata[key] = value
return metadata
|
def create_metadata(fields: Dict[str, Any]) ->Dict[str, Any]:
"""Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
"""
metadata: Dict[str, Any] = {}
for key, value in fields.items():
if key == 'id' or key == 'document' or key == 'embedding':
continue
metadata[key] = value
return metadata
|
Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.