method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
is_lc_serializable
|
"""Is this class serializable?"""
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Is this class serializable?"""
return False
|
Is this class serializable?
|
test_visit_comparison_date
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value={'type':
'date', 'date': '2023-09-13'})
expected = {'operator': 'LessThan', 'path': ['foo'], 'valueDate':
'2023-09-13T00:00:00Z'}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_date() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value={
'type': 'date', 'date': '2023-09-13'})
expected = {'operator': 'LessThan', 'path': ['foo'], 'valueDate':
'2023-09-13T00:00:00Z'}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
_call
|
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = completion_with_retry(self, model=self.model, prompt=prompt, **
params)
_stop = params.get('stop_sequences')
return self._process_response(response, _stop)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = completion_with_retry(self, model=self.model, prompt=prompt,
**params)
_stop = params.get('stop_sequences')
return self._process_response(response, _stop)
|
Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
|
__query_collection
|
"""Query the collection."""
with Session(self._bind) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
if isinstance(value, dict):
filter_by_metadata = self._create_filter_clause(key, value)
if filter_by_metadata is not None:
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = session.query(self.EmbeddingStore, self.
distance_strategy(embedding).label('distance')).filter(filter_by
).order_by(sqlalchemy.asc('distance')).join(self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid).limit(k
).all()
return results
|
def __query_collection(self, embedding: List[float], k: int=4, filter:
Optional[Dict[str, str]]=None) ->List[Any]:
"""Query the collection."""
with Session(self._bind) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
if isinstance(value, dict):
filter_by_metadata = self._create_filter_clause(key, value)
if filter_by_metadata is not None:
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = session.query(self.EmbeddingStore, self.
distance_strategy(embedding).label('distance')).filter(filter_by
).order_by(sqlalchemy.asc('distance')).join(self.
CollectionStore, self.EmbeddingStore.collection_id == self.
CollectionStore.uuid).limit(k).all()
return results
|
Query the collection.
|
_call
|
"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ''
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.
verbose)
text = ''
params = {**self._default_params(), **kwargs}
for token in self.client.generate(prompt, **params):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
|
from_llm_and_tools
|
"""Construct an agent from an LLM and tools."""
prompt = cls.create_prompt(extra_prompt_messages=extra_prompt_messages,
system_message=system_message)
return cls(llm=llm, prompt=prompt, tools=tools, callback_manager=
callback_manager, **kwargs)
|
@classmethod
def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[
BaseTool], callback_manager: Optional[BaseCallbackManager]=None,
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]]=None,
system_message: Optional[SystemMessage]=SystemMessage(content=
'You are a helpful AI assistant.'), **kwargs: Any) ->BaseSingleActionAgent:
"""Construct an agent from an LLM and tools."""
prompt = cls.create_prompt(extra_prompt_messages=extra_prompt_messages,
system_message=system_message)
return cls(llm=llm, prompt=prompt, tools=tools, callback_manager=
callback_manager, **kwargs)
|
Construct an agent from an LLM and tools.
|
embed_query
|
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace('\n', ' ')
embedding = self.client.encode(self.query_instruction + text, **self.
encode_kwargs)
return embedding.tolist()
|
def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace('\n', ' ')
embedding = self.client.encode(self.query_instruction + text, **self.
encode_kwargs)
return embedding.tolist()
|
Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
_parse_chat_history
|
messages: List[genai.types.MessageDict] = []
raw_system_message: Optional[SystemMessage] = None
for i, message in enumerate(input_messages):
if i == 0 and isinstance(message, SystemMessage
) and not convert_system_message_to_human:
raise ValueError(
"""SystemMessages are not yet supported!
To automatically convert the leading SystemMessage to a HumanMessage,
set `convert_system_message_to_human` to True. Example:
llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human=True)
"""
)
elif i == 0 and isinstance(message, SystemMessage):
raw_system_message = message
continue
elif isinstance(message, AIMessage):
role = 'model'
elif isinstance(message, HumanMessage):
role = 'user'
else:
raise ValueError(
f'Unexpected message with type {type(message)} at the position {i}.'
)
parts = _convert_to_parts(message.content)
if raw_system_message:
if role == 'model':
raise ValueError(
'SystemMessage should be followed by a HumanMessage and not by AIMessage.'
)
parts = _convert_to_parts(raw_system_message.content) + parts
raw_system_message = None
messages.append({'role': role, 'parts': parts})
return messages
|
def _parse_chat_history(input_messages: Sequence[BaseMessage],
convert_system_message_to_human: bool=False) ->List[genai.types.ContentDict
]:
messages: List[genai.types.MessageDict] = []
raw_system_message: Optional[SystemMessage] = None
for i, message in enumerate(input_messages):
if i == 0 and isinstance(message, SystemMessage
) and not convert_system_message_to_human:
raise ValueError(
"""SystemMessages are not yet supported!
To automatically convert the leading SystemMessage to a HumanMessage,
set `convert_system_message_to_human` to True. Example:
llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human=True)
"""
)
elif i == 0 and isinstance(message, SystemMessage):
raw_system_message = message
continue
elif isinstance(message, AIMessage):
role = 'model'
elif isinstance(message, HumanMessage):
role = 'user'
else:
raise ValueError(
f'Unexpected message with type {type(message)} at the position {i}.'
)
parts = _convert_to_parts(message.content)
if raw_system_message:
if role == 'model':
raise ValueError(
'SystemMessage should be followed by a HumanMessage and not by AIMessage.'
)
parts = _convert_to_parts(raw_system_message.content) + parts
raw_system_message = None
messages.append({'role': role, 'parts': parts})
return messages
| null |
test_maximal_marginal_relevance_lambda_zero
|
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 2]
actual = maximal_marginal_relevance(query_embedding, embedding_list,
lambda_mult=0, k=2)
assert expected == actual
|
def test_maximal_marginal_relevance_lambda_zero() ->None:
query_embedding = np.random.random(size=5)
embedding_list = [query_embedding, query_embedding, np.zeros(5)]
expected = [0, 2]
actual = maximal_marginal_relevance(query_embedding, embedding_list,
lambda_mult=0, k=2)
assert expected == actual
| null |
_on_llm_start
|
"""Process the LLM Run upon start."""
self._process_start_trace(run)
|
def _on_llm_start(self, run: 'Run') ->None:
"""Process the LLM Run upon start."""
self._process_start_trace(run)
|
Process the LLM Run upon start.
|
generate_schema
|
"""
Generates the schema of the ArangoDB Database and returns it
User can specify a **sample_ratio** (0 to 1) to determine the
ratio of documents/edges used (in relation to the Collection size)
to render each Collection Schema.
"""
if not 0 <= sample_ratio <= 1:
raise ValueError('**sample_ratio** value must be in between 0 to 1')
graph_schema: List[Dict[str, Any]] = [{'graph_name': g['name'],
'edge_definitions': g['edge_definitions']} for g in self.db.graphs()]
collection_schema: List[Dict[str, Any]] = []
for collection in self.db.collections():
if collection['system']:
continue
col_name: str = collection['name']
col_type: str = collection['type']
col_size: int = self.db.collection(col_name).count()
if col_size == 0:
continue
limit_amount = ceil(sample_ratio * col_size) or 1
aql = f"""
FOR doc in {col_name}
LIMIT {limit_amount}
RETURN doc
"""
doc: Dict[str, Any]
properties: List[Dict[str, str]] = []
for doc in self.__db.aql.execute(aql):
for key, value in doc.items():
properties.append({'name': key, 'type': type(value).__name__})
collection_schema.append({'collection_name': col_name,
'collection_type': col_type, f'{col_type}_properties': properties,
f'example_{col_type}': doc})
return {'Graph Schema': graph_schema, 'Collection Schema': collection_schema}
|
def generate_schema(self, sample_ratio: float=0) ->Dict[str, List[Dict[str,
Any]]]:
"""
Generates the schema of the ArangoDB Database and returns it
User can specify a **sample_ratio** (0 to 1) to determine the
ratio of documents/edges used (in relation to the Collection size)
to render each Collection Schema.
"""
if not 0 <= sample_ratio <= 1:
raise ValueError('**sample_ratio** value must be in between 0 to 1')
graph_schema: List[Dict[str, Any]] = [{'graph_name': g['name'],
'edge_definitions': g['edge_definitions']} for g in self.db.graphs()]
collection_schema: List[Dict[str, Any]] = []
for collection in self.db.collections():
if collection['system']:
continue
col_name: str = collection['name']
col_type: str = collection['type']
col_size: int = self.db.collection(col_name).count()
if col_size == 0:
continue
limit_amount = ceil(sample_ratio * col_size) or 1
aql = f"""
FOR doc in {col_name}
LIMIT {limit_amount}
RETURN doc
"""
doc: Dict[str, Any]
properties: List[Dict[str, str]] = []
for doc in self.__db.aql.execute(aql):
for key, value in doc.items():
properties.append({'name': key, 'type': type(value).__name__})
collection_schema.append({'collection_name': col_name,
'collection_type': col_type, f'{col_type}_properties':
properties, f'example_{col_type}': doc})
return {'Graph Schema': graph_schema, 'Collection Schema':
collection_schema}
|
Generates the schema of the ArangoDB Database and returns it
User can specify a **sample_ratio** (0 to 1) to determine the
ratio of documents/edges used (in relation to the Collection size)
to render each Collection Schema.
|
input_variables
|
"""
Input variables for this prompt template.
Returns:
List of input variable names.
"""
return self.prompt.input_variables
|
@property
def input_variables(self) ->List[str]:
"""
Input variables for this prompt template.
Returns:
List of input variable names.
"""
return self.prompt.input_variables
|
Input variables for this prompt template.
Returns:
List of input variable names.
|
on_llm_error
|
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
Run when LLM errors.
|
_load_map_reduce_chain
|
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose
)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name, document_prompt=
document_prompt, verbose=verbose)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose),
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose)
return MapReduceDocumentsChain(llm_chain=map_chain, reduce_documents_chain=
reduce_documents_chain, document_variable_name=
map_reduce_document_variable_name, verbose=verbose, **kwargs)
|
def _load_map_reduce_chain(llm: BaseLanguageModel, question_prompt:
BasePromptTemplate=map_reduce_prompt.QUESTION_PROMPT, combine_prompt:
BasePromptTemplate=map_reduce_prompt.COMBINE_PROMPT, document_prompt:
BasePromptTemplate=map_reduce_prompt.EXAMPLE_PROMPT,
combine_document_variable_name: str='summaries',
map_reduce_document_variable_name: str='context', collapse_prompt:
Optional[BasePromptTemplate]=None, reduce_llm: Optional[
BaseLanguageModel]=None, collapse_llm: Optional[BaseLanguageModel]=None,
verbose: Optional[bool]=None, token_max: int=3000, **kwargs: Any
) ->MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose
=verbose)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt, verbose=verbose)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose),
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose)
return MapReduceDocumentsChain(llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name, verbose=
verbose, **kwargs)
| null |
rag_chain
|
"""
The RAG chain
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
"""
model = ChatOpenAI(temperature=0, model='gpt-4-1106-preview', max_tokens=1024)
prompt = ChatPromptTemplate.from_messages([('system',
"""You are an AI assistant. Answer based on the retrieved documents:
<Documents>
{context}
</Documents>"""
), ('user', '{question}?')])
chain = {'context': retriever | format_docs, 'question': RunnablePassthrough()
} | prompt | model | StrOutputParser()
return chain
|
def rag_chain(retriever):
"""
The RAG chain
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
"""
model = ChatOpenAI(temperature=0, model='gpt-4-1106-preview',
max_tokens=1024)
prompt = ChatPromptTemplate.from_messages([('system',
"""You are an AI assistant. Answer based on the retrieved documents:
<Documents>
{context}
</Documents>"""
), ('user', '{question}?')])
chain = {'context': retriever | format_docs, 'question':
RunnablePassthrough()} | prompt | model | StrOutputParser()
return chain
|
The RAG chain
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
|
similarity_search
|
"""Return docs most similar to query."""
return []
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Return docs most similar to query."""
return []
|
Return docs most similar to query.
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(self) ->bool:
return True
| null |
load_prompt_from_config
|
"""Load prompt from Config Dict."""
if '_type' not in config:
logger.warning('No `_type` key found, defaulting to `prompt`.')
config_type = config.pop('_type', 'prompt')
if config_type not in type_to_loader_dict:
raise ValueError(f'Loading {config_type} prompt not supported')
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
|
def load_prompt_from_config(config: dict) ->BasePromptTemplate:
"""Load prompt from Config Dict."""
if '_type' not in config:
logger.warning('No `_type` key found, defaulting to `prompt`.')
config_type = config.pop('_type', 'prompt')
if config_type not in type_to_loader_dict:
raise ValueError(f'Loading {config_type} prompt not supported')
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
|
Load prompt from Config Dict.
|
__add__
|
if isinstance(other, AIMessageChunk):
if self.example != other.example:
raise ValueError(
'Cannot concatenate AIMessageChunks with different example values.'
)
return self.__class__(example=self.example, content=merge_content(self.
content, other.content), additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs))
return super().__add__(other)
|
def __add__(self, other: Any) ->BaseMessageChunk:
if isinstance(other, AIMessageChunk):
if self.example != other.example:
raise ValueError(
'Cannot concatenate AIMessageChunks with different example values.'
)
return self.__class__(example=self.example, content=merge_content(
self.content, other.content), additional_kwargs=self.
_merge_kwargs_dict(self.additional_kwargs, other.additional_kwargs)
)
return super().__add__(other)
| null |
on_llm_new_token
|
self.on_llm_new_token_common()
|
def on_llm_new_token(self, *args: Any, **kwargs: Any) ->Any:
self.on_llm_new_token_common()
| null |
lazy_import_playwright_browsers
|
"""
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
"""
try:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
raise ImportError(
"The 'playwright' package is required to use the playwright tools. Please install it with 'pip install playwright'."
)
return AsyncBrowser, SyncBrowser
|
def lazy_import_playwright_browsers() ->Tuple[Type[AsyncBrowser], Type[
SyncBrowser]]:
"""
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
"""
try:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
raise ImportError(
"The 'playwright' package is required to use the playwright tools. Please install it with 'pip install playwright'."
)
return AsyncBrowser, SyncBrowser
|
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
|
_generation_from_qwen_resp
|
return dict(text=resp['output']['text'], generation_info=dict(finish_reason
=resp['output']['finish_reason'], request_id=resp['request_id'],
token_usage=dict(resp['usage'])))
|
@staticmethod
def _generation_from_qwen_resp(resp: Any) ->Dict[str, Any]:
return dict(text=resp['output']['text'], generation_info=dict(
finish_reason=resp['output']['finish_reason'], request_id=resp[
'request_id'], token_usage=dict(resp['usage'])))
| null |
test_deprecated_method_pydantic
|
"""Test deprecated method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = MyModel()
assert obj.deprecated_method() == 'This is a deprecated method.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = obj.deprecated_method.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc')
|
def test_deprecated_method_pydantic() ->None:
"""Test deprecated method."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
obj = MyModel()
assert obj.deprecated_method() == 'This is a deprecated method.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_method` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = obj.deprecated_method.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc')
|
Test deprecated method.
|
_get_relevant_documents
|
return [Document(page_content=query)]
|
def _get_relevant_documents(self, query: str) ->List[Document]:
return [Document(page_content=query)]
| null |
__init__
|
super().__init__()
self.runnable = evaluator_runnable
|
def __init__(self, evaluator_runnable: Runnable) ->None:
super().__init__()
self.runnable = evaluator_runnable
| null |
_critique
|
"""Critique each of the ideas from ideation stage & select best one."""
llm = self.critique_llm if self.critique_llm else self.llm
prompt = self.critique_prompt().format_prompt(**self.history.
critique_prompt_inputs())
callbacks = run_manager.handlers if run_manager else None
if llm:
critique = self._get_text_from_llm_result(llm.generate_prompt([prompt],
stop, callbacks), step='critique')
_colored_text = get_colored_text(critique, 'yellow')
_text = 'Critique:\n' + _colored_text
if run_manager:
run_manager.on_text(_text, end='\n', verbose=self.verbose)
return critique
else:
raise ValueError('llm is none, which should never happen')
|
def _critique(self, stop: Optional[List[str]]=None, run_manager: Optional[
CallbackManagerForChainRun]=None) ->str:
"""Critique each of the ideas from ideation stage & select best one."""
llm = self.critique_llm if self.critique_llm else self.llm
prompt = self.critique_prompt().format_prompt(**self.history.
critique_prompt_inputs())
callbacks = run_manager.handlers if run_manager else None
if llm:
critique = self._get_text_from_llm_result(llm.generate_prompt([
prompt], stop, callbacks), step='critique')
_colored_text = get_colored_text(critique, 'yellow')
_text = 'Critique:\n' + _colored_text
if run_manager:
run_manager.on_text(_text, end='\n', verbose=self.verbose)
return critique
else:
raise ValueError('llm is none, which should never happen')
|
Critique each of the ideas from ideation stage & select best one.
|
test_load_returns_list_of_documents
|
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
|
@pytest.mark.requires('geopandas')
def test_load_returns_list_of_documents(sample_gdf: GeoDataFrame) ->None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
| null |
__init__
|
"""Initialize with necessary components."""
warnings.warn(
'ElasticVectorSearch will be removed in a future release. SeeElasticsearch integration docs on how to upgrade.'
)
try:
import elasticsearch
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(elasticsearch_url, **
_ssl_verify, headers={'user-agent': self.get_user_agent()})
except ValueError as e:
raise ValueError(
f'Your elasticsearch client string is mis-formatted. Got error: {e} ')
|
def __init__(self, elasticsearch_url: str, index_name: str, embedding:
Embeddings, *, ssl_verify: Optional[Dict[str, Any]]=None):
"""Initialize with necessary components."""
warnings.warn(
'ElasticVectorSearch will be removed in a future release. SeeElasticsearch integration docs on how to upgrade.'
)
try:
import elasticsearch
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(elasticsearch_url, **
_ssl_verify, headers={'user-agent': self.get_user_agent()})
except ValueError as e:
raise ValueError(
f'Your elasticsearch client string is mis-formatted. Got error: {e} '
)
|
Initialize with necessary components.
|
test_importable_all
|
for path in glob.glob('../core/langchain_core/*'):
relative_path = Path(path).parts[-1]
if relative_path.endswith('.typed'):
continue
module_name = relative_path.split('.')[0]
module = importlib.import_module('langchain_core.' + module_name)
all_ = getattr(module, '__all__', [])
for cls_ in all_:
getattr(module, cls_)
|
def test_importable_all() ->None:
for path in glob.glob('../core/langchain_core/*'):
relative_path = Path(path).parts[-1]
if relative_path.endswith('.typed'):
continue
module_name = relative_path.split('.')[0]
module = importlib.import_module('langchain_core.' + module_name)
all_ = getattr(module, '__all__', [])
for cls_ in all_:
getattr(module, cls_)
| null |
__init__
|
"""Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
|
def __init__(self, conf: Any, bucket: str, prefix: str=''):
"""Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
"""
self.conf = conf
self.bucket = bucket
self.prefix = prefix
|
Initialize with BOS config, bucket and prefix.
:param conf(BosConfig): BOS config.
:param bucket(str): BOS bucket.
:param prefix(str): prefix.
|
validate_prompt
|
"""Validate that prompt matches format."""
prompt = values['llm_chain'].prompt
if 'agent_scratchpad' not in prompt.input_variables:
logger.warning(
'`agent_scratchpad` should be a variable in prompt.input_variables. Did not find it, so adding it at the end.'
)
prompt.input_variables.append('agent_scratchpad')
if isinstance(prompt, PromptTemplate):
prompt.template += '\n{agent_scratchpad}'
elif isinstance(prompt, FewShotPromptTemplate):
prompt.suffix += '\n{agent_scratchpad}'
else:
raise ValueError(f'Got unexpected prompt type {type(prompt)}')
return values
|
@root_validator()
def validate_prompt(cls, values: Dict) ->Dict:
"""Validate that prompt matches format."""
prompt = values['llm_chain'].prompt
if 'agent_scratchpad' not in prompt.input_variables:
logger.warning(
'`agent_scratchpad` should be a variable in prompt.input_variables. Did not find it, so adding it at the end.'
)
prompt.input_variables.append('agent_scratchpad')
if isinstance(prompt, PromptTemplate):
prompt.template += '\n{agent_scratchpad}'
elif isinstance(prompt, FewShotPromptTemplate):
prompt.suffix += '\n{agent_scratchpad}'
else:
raise ValueError(f'Got unexpected prompt type {type(prompt)}')
return values
|
Validate that prompt matches format.
|
test__convert_message_to_dict_function
|
message = FunctionMessage(name='foo', content='bar')
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
|
def test__convert_message_to_dict_function() ->None:
message = FunctionMessage(name='foo', content='bar')
with pytest.raises(TypeError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
| null |
__init__
|
"""Initialize with supabase client."""
try:
import supabase
except ImportError:
raise ImportError(
'Could not import supabase python package. Please install it with `pip install supabase`.'
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or 'documents'
self.query_name = query_name or 'match_documents'
self.chunk_size = chunk_size or 500
|
def __init__(self, client: supabase.client.Client, embedding: Embeddings,
table_name: str, chunk_size: int=500, query_name: Union[str, None]=None
) ->None:
"""Initialize with supabase client."""
try:
import supabase
except ImportError:
raise ImportError(
'Could not import supabase python package. Please install it with `pip install supabase`.'
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or 'documents'
self.query_name = query_name or 'match_documents'
self.chunk_size = chunk_size or 500
|
Initialize with supabase client.
|
test_csv_loader_load_empty_file
|
file_path = self._get_csv_file_path('test_empty.csv')
expected_docs: list = []
loader = CSVLoader(file_path=file_path)
result = loader.load()
assert result == expected_docs
|
def test_csv_loader_load_empty_file(self) ->None:
file_path = self._get_csv_file_path('test_empty.csv')
expected_docs: list = []
loader = CSVLoader(file_path=file_path)
result = loader.load()
assert result == expected_docs
| null |
process_attachment
|
try:
from PIL import Image
except ImportError:
raise ImportError(
'`Pillow` package not found, please run `pip install Pillow`')
attachments = self.confluence.get_attachments_from_content(page_id)['results']
texts = []
for attachment in attachments:
media_type = attachment['metadata']['mediaType']
absolute_url = self.base_url + attachment['_links']['download']
title = attachment['title']
try:
if media_type == 'application/pdf':
text = title + self.process_pdf(absolute_url, ocr_languages)
elif media_type == 'image/png' or media_type == 'image/jpg' or media_type == 'image/jpeg':
text = title + self.process_image(absolute_url, ocr_languages)
elif media_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
text = title + self.process_doc(absolute_url)
elif media_type == 'application/vnd.ms-excel':
text = title + self.process_xls(absolute_url)
elif media_type == 'image/svg+xml':
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f'Attachment not found at {absolute_url}')
continue
else:
raise
return texts
|
def process_attachment(self, page_id: str, ocr_languages: Optional[str]=None
) ->List[str]:
try:
from PIL import Image
except ImportError:
raise ImportError(
'`Pillow` package not found, please run `pip install Pillow`')
attachments = self.confluence.get_attachments_from_content(page_id)[
'results']
texts = []
for attachment in attachments:
media_type = attachment['metadata']['mediaType']
absolute_url = self.base_url + attachment['_links']['download']
title = attachment['title']
try:
if media_type == 'application/pdf':
text = title + self.process_pdf(absolute_url, ocr_languages)
elif media_type == 'image/png' or media_type == 'image/jpg' or media_type == 'image/jpeg':
text = title + self.process_image(absolute_url, ocr_languages)
elif media_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
text = title + self.process_doc(absolute_url)
elif media_type == 'application/vnd.ms-excel':
text = title + self.process_xls(absolute_url)
elif media_type == 'image/svg+xml':
text = title + self.process_svg(absolute_url, ocr_languages)
else:
continue
texts.append(text)
except requests.HTTPError as e:
if e.response.status_code == 404:
print(f'Attachment not found at {absolute_url}')
continue
else:
raise
return texts
| null |
test_specifying_parser_via_class_attribute
|
class TextLoader(GenericLoader):
"""Parser created for testing purposes."""
@staticmethod
def get_parser(**kwargs: Any) ->BaseBlobParser:
return TextParser()
loader = TextLoader.from_filesystem(toy_dir, suffixes=['.txt'])
docs = loader.load()
assert len(docs) == 3
assert docs[0].page_content == 'This is a test.txt file.'
|
def test_specifying_parser_via_class_attribute(toy_dir: str) ->None:
class TextLoader(GenericLoader):
"""Parser created for testing purposes."""
@staticmethod
def get_parser(**kwargs: Any) ->BaseBlobParser:
return TextParser()
loader = TextLoader.from_filesystem(toy_dir, suffixes=['.txt'])
docs = loader.load()
assert len(docs) == 3
assert docs[0].page_content == 'This is a test.txt file.'
| null |
_anonymize
|
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
PresidioAnonymizer has no built-in memory -
so it will not remember the effects of anonymizing previous texts.
>>> anonymizer = PresidioAnonymizer()
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Brett Russell. Hi Brett Russell!'
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. Supported languages are: {self.supported_languages}. Change your language configuration file to add more languages."
)
supported_entities = []
for recognizer in self._analyzer.get_recognizers(language):
recognizer_dict = recognizer.to_dict()
supported_entities.extend([recognizer_dict['supported_entity']] if
'supported_entity' in recognizer_dict else recognizer_dict[
'supported_entities'])
entities_to_analyze = list(set(supported_entities).intersection(set(self.
analyzed_fields)))
analyzer_results = self._analyzer.analyze(text, entities=
entities_to_analyze, language=language, allow_list=allow_list)
filtered_analyzer_results = (self._anonymizer.
_remove_conflicts_and_get_text_manipulation_data(analyzer_results))
anonymizer_results = self._anonymizer.anonymize(text, analyzer_results=
analyzer_results, operators=self.operators)
anonymizer_mapping = create_anonymizer_mapping(text,
filtered_analyzer_results, anonymizer_results)
return exact_matching_strategy(text, anonymizer_mapping)
|
def _anonymize(self, text: str, language: Optional[str]=None, allow_list:
Optional[List[str]]=None) ->str:
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
PresidioAnonymizer has no built-in memory -
so it will not remember the effects of anonymizing previous texts.
>>> anonymizer = PresidioAnonymizer()
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Brett Russell. Hi Brett Russell!'
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. Supported languages are: {self.supported_languages}. Change your language configuration file to add more languages."
)
supported_entities = []
for recognizer in self._analyzer.get_recognizers(language):
recognizer_dict = recognizer.to_dict()
supported_entities.extend([recognizer_dict['supported_entity']] if
'supported_entity' in recognizer_dict else recognizer_dict[
'supported_entities'])
entities_to_analyze = list(set(supported_entities).intersection(set(
self.analyzed_fields)))
analyzer_results = self._analyzer.analyze(text, entities=
entities_to_analyze, language=language, allow_list=allow_list)
filtered_analyzer_results = (self._anonymizer.
_remove_conflicts_and_get_text_manipulation_data(analyzer_results))
anonymizer_results = self._anonymizer.anonymize(text, analyzer_results=
analyzer_results, operators=self.operators)
anonymizer_mapping = create_anonymizer_mapping(text,
filtered_analyzer_results, anonymizer_results)
return exact_matching_strategy(text, anonymizer_mapping)
|
Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
PresidioAnonymizer has no built-in memory -
so it will not remember the effects of anonymizing previous texts.
>>> anonymizer = PresidioAnonymizer()
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Noah Rhodes. Hi Noah Rhodes!'
>>> anonymizer.anonymize("My name is John Doe. Hi John Doe!")
'My name is Brett Russell. Hi Brett Russell!'
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
|
import_jsonformer
|
"""Lazily import jsonformer."""
try:
import jsonformer
except ImportError:
raise ImportError(
'Could not import jsonformer python package. Please install it with `pip install jsonformer`.'
)
return jsonformer
|
def import_jsonformer() ->jsonformer:
"""Lazily import jsonformer."""
try:
import jsonformer
except ImportError:
raise ImportError(
'Could not import jsonformer python package. Please install it with `pip install jsonformer`.'
)
return jsonformer
|
Lazily import jsonformer.
|
test_tracing_sequential
|
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ['LANGCHAIN_TRACING'] = 'true'
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run(q)
|
def test_tracing_sequential() ->None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ['LANGCHAIN_TRACING'] = 'true'
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run(q)
| null |
test_cohere_embedding_documents
|
"""Test cohere embeddings."""
documents = ['foo bar']
embedding = CohereEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 2048
|
def test_cohere_embedding_documents() ->None:
"""Test cohere embeddings."""
documents = ['foo bar']
embedding = CohereEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 2048
|
Test cohere embeddings.
|
__str__
|
return str(self.value)
|
def __str__(self) ->str:
return str(self.value)
| null |
random_name
|
"""Generate a random name."""
adjective = random.choice(adjectives)
noun = random.choice(nouns)
number = random.randint(1, 100)
return f'{adjective}-{noun}-{number}'
|
def random_name() ->str:
"""Generate a random name."""
adjective = random.choice(adjectives)
noun = random.choice(nouns)
number = random.randint(1, 100)
return f'{adjective}-{noun}-{number}'
|
Generate a random name.
|
_call
|
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
| null |
consolidate_updates
|
self.vector_index = self.vector_index.consolidate_updates(**kwargs)
|
def consolidate_updates(self, **kwargs: Any) ->None:
self.vector_index = self.vector_index.consolidate_updates(**kwargs)
| null |
test_watsonxllm_call
|
watsonxllm = WatsonxLLM(model_id='google/flan-ul2', url=
'https://us-south.ml.cloud.ibm.com', apikey='***', project_id='***')
response = watsonxllm('What color sunflower is?')
assert isinstance(response, str)
|
def test_watsonxllm_call() ->None:
watsonxllm = WatsonxLLM(model_id='google/flan-ul2', url=
'https://us-south.ml.cloud.ibm.com', apikey='***', project_id='***')
response = watsonxllm('What color sunflower is?')
assert isinstance(response, str)
| null |
run
|
"""Run the tool."""
parsed_input = self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
callback_manager = CallbackManager.configure(callbacks, self.callbacks,
verbose_, tags, self.tags, metadata, self.metadata)
new_arg_supported = signature(self._run).parameters.get('run_manager')
run_manager = callback_manager.on_tool_start({'name': self.name,
'description': self.description}, tool_input if isinstance(tool_input,
str) else str(tool_input), color=start_color, name=run_name, **kwargs)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = self._run(*tool_args, run_manager=run_manager, **tool_kwargs
) if new_arg_supported else self._run(*tool_args, **tool_kwargs)
except ToolException as e:
if not self.handle_tool_error:
run_manager.on_tool_error(e)
raise e
elif isinstance(self.handle_tool_error, bool):
if e.args:
observation = e.args[0]
else:
observation = 'Tool execution error'
elif isinstance(self.handle_tool_error, str):
observation = self.handle_tool_error
elif callable(self.handle_tool_error):
observation = self.handle_tool_error(e)
else:
raise ValueError(
f'Got unexpected type of `handle_tool_error`. Expected bool, str or callable. Received: {self.handle_tool_error}'
)
run_manager.on_tool_end(str(observation), color='red', name=self.name,
**kwargs)
return observation
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
else:
run_manager.on_tool_end(str(observation), color=color, name=self.name,
**kwargs)
return observation
|
def run(self, tool_input: Union[str, Dict], verbose: Optional[bool]=None,
start_color: Optional[str]='green', color: Optional[str]='green',
callbacks: Callbacks=None, *, tags: Optional[List[str]]=None, metadata:
Optional[Dict[str, Any]]=None, run_name: Optional[str]=None, **kwargs: Any
) ->Any:
"""Run the tool."""
parsed_input = self._parse_input(tool_input)
if not self.verbose and verbose is not None:
verbose_ = verbose
else:
verbose_ = self.verbose
callback_manager = CallbackManager.configure(callbacks, self.callbacks,
verbose_, tags, self.tags, metadata, self.metadata)
new_arg_supported = signature(self._run).parameters.get('run_manager')
run_manager = callback_manager.on_tool_start({'name': self.name,
'description': self.description}, tool_input if isinstance(
tool_input, str) else str(tool_input), color=start_color, name=
run_name, **kwargs)
try:
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = self._run(*tool_args, run_manager=run_manager, **
tool_kwargs) if new_arg_supported else self._run(*tool_args, **
tool_kwargs)
except ToolException as e:
if not self.handle_tool_error:
run_manager.on_tool_error(e)
raise e
elif isinstance(self.handle_tool_error, bool):
if e.args:
observation = e.args[0]
else:
observation = 'Tool execution error'
elif isinstance(self.handle_tool_error, str):
observation = self.handle_tool_error
elif callable(self.handle_tool_error):
observation = self.handle_tool_error(e)
else:
raise ValueError(
f'Got unexpected type of `handle_tool_error`. Expected bool, str or callable. Received: {self.handle_tool_error}'
)
run_manager.on_tool_end(str(observation), color='red', name=self.
name, **kwargs)
return observation
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
else:
run_manager.on_tool_end(str(observation), color=color, name=self.
name, **kwargs)
return observation
|
Run the tool.
|
test_json_equality_evaluator_requires_input
|
assert json_equality_evaluator.requires_input is False
|
def test_json_equality_evaluator_requires_input(json_equality_evaluator:
JsonEqualityEvaluator) ->None:
assert json_equality_evaluator.requires_input is False
| null |
embeddings
|
return self.embedding_function
|
@property
def embeddings(self) ->Embeddings:
return self.embedding_function
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
__init__
|
"""
Initializes the AssemblyAI AudioTranscriptLoader.
Args:
file_path: An URL or a local file path.
transcript_format: Transcript format to use.
See class ``TranscriptFormat`` for more info.
config: Transcription options and features. If ``None`` is given,
the Transcriber's default configuration will be used.
api_key: AssemblyAI API key.
"""
try:
import assemblyai
except ImportError:
raise ImportError(
'Could not import assemblyai python package. Please install it with `pip install assemblyai`.'
)
if api_key is not None:
assemblyai.settings.api_key = api_key
self.file_path = file_path
self.transcript_format = transcript_format
self.transcriber = assemblyai.Transcriber(config=config)
|
def __init__(self, file_path: str, *, transcript_format: TranscriptFormat=
TranscriptFormat.TEXT, config: Optional[assemblyai.TranscriptionConfig]
=None, api_key: Optional[str]=None):
"""
Initializes the AssemblyAI AudioTranscriptLoader.
Args:
file_path: An URL or a local file path.
transcript_format: Transcript format to use.
See class ``TranscriptFormat`` for more info.
config: Transcription options and features. If ``None`` is given,
the Transcriber's default configuration will be used.
api_key: AssemblyAI API key.
"""
try:
import assemblyai
except ImportError:
raise ImportError(
'Could not import assemblyai python package. Please install it with `pip install assemblyai`.'
)
if api_key is not None:
assemblyai.settings.api_key = api_key
self.file_path = file_path
self.transcript_format = transcript_format
self.transcriber = assemblyai.Transcriber(config=config)
|
Initializes the AssemblyAI AudioTranscriptLoader.
Args:
file_path: An URL or a local file path.
transcript_format: Transcript format to use.
See class ``TranscriptFormat`` for more info.
config: Transcription options and features. If ``None`` is given,
the Transcriber's default configuration will be used.
api_key: AssemblyAI API key.
|
test_ToSelectFrom_not_a_list_throws
|
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder()))
actions = {'actions': ['0', '1', '2']}
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_ToSelectFrom_not_a_list_throws() ->None:
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed
=False, model=MockEncoder()))
actions = {'actions': ['0', '1', '2']}
with pytest.raises(ValueError):
chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
| null |
_prepare_query
|
params_dict: Dict[str, Union[str, bytes, float]] = {'vector':
_array_to_buffer(query_embedding, self._schema.vector_dtype)}
return_fields = [self._schema.content_key]
if with_distance:
return_fields.append('distance')
if with_metadata:
return_fields.extend(self._schema.metadata_keys)
if distance_threshold:
params_dict['distance_threshold'] = distance_threshold
return self._prepare_range_query(k, filter=filter, return_fields=
return_fields), params_dict
return self._prepare_vector_query(k, filter=filter, return_fields=return_fields
), params_dict
|
def _prepare_query(self, query_embedding: List[float], k: int=4, filter:
Optional[RedisFilterExpression]=None, distance_threshold: Optional[
float]=None, with_metadata: bool=True, with_distance: bool=False) ->Tuple[
'Query', Dict[str, Any]]:
params_dict: Dict[str, Union[str, bytes, float]] = {'vector':
_array_to_buffer(query_embedding, self._schema.vector_dtype)}
return_fields = [self._schema.content_key]
if with_distance:
return_fields.append('distance')
if with_metadata:
return_fields.extend(self._schema.metadata_keys)
if distance_threshold:
params_dict['distance_threshold'] = distance_threshold
return self._prepare_range_query(k, filter=filter, return_fields=
return_fields), params_dict
return self._prepare_vector_query(k, filter=filter, return_fields=
return_fields), params_dict
| null |
get_schema
|
"""
Returns the schema of the graph database.
"""
return self.schema
|
@property
def get_schema(self) ->str:
"""
Returns the schema of the graph database.
"""
return self.schema
|
Returns the schema of the graph database.
|
_import_azure_cognitive_services_AzureCogsFormRecognizerTool
|
from langchain_community.tools.azure_cognitive_services import AzureCogsFormRecognizerTool
return AzureCogsFormRecognizerTool
|
def _import_azure_cognitive_services_AzureCogsFormRecognizerTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsFormRecognizerTool
return AzureCogsFormRecognizerTool
| null |
embed_image
|
try:
from PIL import Image as _PILImage
except ImportError:
raise ImportError('Please install the PIL library: pip install pillow')
pil_images = [_PILImage.open(uri) for uri in uris]
image_features = []
for pil_image in pil_images:
preprocessed_image = self.preprocess(pil_image).unsqueeze(0)
embeddings_tensor = self.model.encode_image(preprocessed_image)
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
image_features.append(embeddings_list)
return image_features
|
def embed_image(self, uris: List[str]) ->List[List[float]]:
try:
from PIL import Image as _PILImage
except ImportError:
raise ImportError('Please install the PIL library: pip install pillow')
pil_images = [_PILImage.open(uri) for uri in uris]
image_features = []
for pil_image in pil_images:
preprocessed_image = self.preprocess(pil_image).unsqueeze(0)
embeddings_tensor = self.model.encode_image(preprocessed_image)
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
image_features.append(embeddings_list)
return image_features
| null |
test_update_with_delayed_score
|
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=None, feature_embedder=pick_best_chain.
PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score is None
chain.update_with_delayed_score(chain_response=response, score=100)
assert selection_metadata.selected.score == 100.0
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_update_with_delayed_score() ->None:
llm, PROMPT = setup()
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
selection_scorer=None, feature_embedder=pick_best_chain.
PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()))
actions = ['0', '1', '2']
response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain.
ToSelectFrom(actions))
assert response['response'] == 'hey'
selection_metadata = response['selection_metadata']
assert selection_metadata.selected.score is None
chain.update_with_delayed_score(chain_response=response, score=100)
assert selection_metadata.selected.score == 100.0
| null |
_llm_type
|
"""Return type of llm."""
return 'gooseai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'gooseai'
|
Return type of llm.
|
from_documents
|
"""Create an Epsilla vectorstore from a list of documents.
Args:
texts (List[str]): List of text data to be inserted.
embeddings (Embeddings): Embedding function.
client (pyepsilla.vectordb.Client): Epsilla client to connect to.
metadatas (Optional[List[dict]]): Metadata for each text.
Defaults to None.
db_path (Optional[str]): The path where the database will be persisted.
Defaults to "/tmp/langchain-epsilla".
db_name (Optional[str]): Give a name to the loaded database.
Defaults to "langchain_store".
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
Epsilla: Epsilla vector store.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, client=client,
db_path=db_path, db_name=db_name, collection_name=collection_name,
drop_old=drop_old, **kwargs)
|
@classmethod
def from_documents(cls: Type[Epsilla], documents: List[Document], embedding:
Embeddings, client: Any=None, db_path: Optional[str]=
_LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str]=
_LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str]=
_LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool]=False, **kwargs:
Any) ->Epsilla:
"""Create an Epsilla vectorstore from a list of documents.
Args:
texts (List[str]): List of text data to be inserted.
embeddings (Embeddings): Embedding function.
client (pyepsilla.vectordb.Client): Epsilla client to connect to.
metadatas (Optional[List[dict]]): Metadata for each text.
Defaults to None.
db_path (Optional[str]): The path where the database will be persisted.
Defaults to "/tmp/langchain-epsilla".
db_name (Optional[str]): Give a name to the loaded database.
Defaults to "langchain_store".
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
Epsilla: Epsilla vector store.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, client=
client, db_path=db_path, db_name=db_name, collection_name=
collection_name, drop_old=drop_old, **kwargs)
|
Create an Epsilla vectorstore from a list of documents.
Args:
texts (List[str]): List of text data to be inserted.
embeddings (Embeddings): Embedding function.
client (pyepsilla.vectordb.Client): Epsilla client to connect to.
metadatas (Optional[List[dict]]): Metadata for each text.
Defaults to None.
db_path (Optional[str]): The path where the database will be persisted.
Defaults to "/tmp/langchain-epsilla".
db_name (Optional[str]): Give a name to the loaded database.
Defaults to "langchain_store".
collection_name (Optional[str]): Which collection to use.
Defaults to "langchain_collection".
If provided, default collection name will be set as well.
drop_old (Optional[bool]): Whether to drop the previous collection
and create a new one. Defaults to False.
Returns:
Epsilla: Epsilla vector store.
|
_import_docarray_inmemory
|
from langchain_community.vectorstores.docarray import DocArrayInMemorySearch
return DocArrayInMemorySearch
|
def _import_docarray_inmemory() ->Any:
from langchain_community.vectorstores.docarray import DocArrayInMemorySearch
return DocArrayInMemorySearch
| null |
combine_docs
|
"""Combine by mapping first chain over all, then stuffing into final chain.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
|
def combine_docs(self, docs: List[Document], callbacks: Callbacks=None, **
kwargs: Any) ->Tuple[str, dict]:
"""Combine by mapping first chain over all, then stuffing into final chain.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._construct_initial_inputs(docs, **kwargs)
res = self.initial_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps = [res]
for doc in docs[1:]:
base_inputs = self._construct_refine_inputs(doc, res)
inputs = {**base_inputs, **kwargs}
res = self.refine_llm_chain.predict(callbacks=callbacks, **inputs)
refine_steps.append(res)
return self._construct_result(refine_steps, res)
|
Combine by mapping first chain over all, then stuffing into final chain.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
|
test_parse_input
|
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
expected_output = {'url': 'https://example.com', 'data': {'key': 'value'}}
assert _parse_input(input_text) == expected_output
|
def test_parse_input() ->None:
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
expected_output = {'url': 'https://example.com', 'data': {'key': 'value'}}
assert _parse_input(input_text) == expected_output
| null |
add_texts
|
"""Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
"""
ids = ids or [sha1(t.encode('utf-8')).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {colmap_['id']: ids, colmap_['document']: texts, colmap_[
'embedding']: self.embedding_function.embed_documents(list(texts))}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_['metadata']] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(zip(*values), desc='Inserting data...', total=len(
metadatas)):
assert len(v[keys.index(self.config.column_map['embedding'])]
) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m')
return []
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, batch_size: int=32, ids: Optional[Iterable[str]]=None, **kwargs: Any
) ->List[str]:
"""Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
"""
ids = ids or [sha1(t.encode('utf-8')).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {colmap_['id']: ids, colmap_['document']: texts, colmap_
['embedding']: self.embedding_function.embed_documents(list(texts))}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_['metadata']] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(zip(*values), desc='Inserting data...', total=
len(metadatas)):
assert len(v[keys.index(self.config.column_map['embedding'])]
) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m'
)
return []
|
Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
|
__add__
|
"""Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
"""
if isinstance(other, ChatPromptTemplate):
return ChatPromptTemplate(messages=self.messages + other.messages)
elif isinstance(other, (BaseMessagePromptTemplate, BaseMessage,
BaseChatPromptTemplate)):
return ChatPromptTemplate(messages=self.messages + [other])
elif isinstance(other, (list, tuple)):
_other = ChatPromptTemplate.from_messages(other)
return ChatPromptTemplate(messages=self.messages + _other.messages)
elif isinstance(other, str):
prompt = HumanMessagePromptTemplate.from_template(other)
return ChatPromptTemplate(messages=self.messages + [prompt])
else:
raise NotImplementedError(f'Unsupported operand type for +: {type(other)}')
|
def __add__(self, other: Any) ->ChatPromptTemplate:
"""Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
"""
if isinstance(other, ChatPromptTemplate):
return ChatPromptTemplate(messages=self.messages + other.messages)
elif isinstance(other, (BaseMessagePromptTemplate, BaseMessage,
BaseChatPromptTemplate)):
return ChatPromptTemplate(messages=self.messages + [other])
elif isinstance(other, (list, tuple)):
_other = ChatPromptTemplate.from_messages(other)
return ChatPromptTemplate(messages=self.messages + _other.messages)
elif isinstance(other, str):
prompt = HumanMessagePromptTemplate.from_template(other)
return ChatPromptTemplate(messages=self.messages + [prompt])
else:
raise NotImplementedError(
f'Unsupported operand type for +: {type(other)}')
|
Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
|
_iterate_files
|
"""Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
"""
if os.path.isfile(path) and path.endswith(('.html', '.json')):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith(('.html', '.json')):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith(('.html', '.json')):
with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
|
def _iterate_files(self, path: str) ->Iterator[str]:
"""Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
"""
if os.path.isfile(path) and path.endswith(('.html', '.json')):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith(('.html', '.json')):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith(('.html', '.json')):
with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
|
Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
|
test_invalid_initialization
|
with pytest.raises(ValueError):
GitHubIssuesLoader(invalid='parameter')
with pytest.raises(ValueError):
GitHubIssuesLoader(state='invalid_state')
with pytest.raises(ValueError):
GitHubIssuesLoader(labels='not_a_list')
with pytest.raises(ValueError):
GitHubIssuesLoader(since='not_a_date')
|
def test_invalid_initialization() ->None:
with pytest.raises(ValueError):
GitHubIssuesLoader(invalid='parameter')
with pytest.raises(ValueError):
GitHubIssuesLoader(state='invalid_state')
with pytest.raises(ValueError):
GitHubIssuesLoader(labels='not_a_list')
with pytest.raises(ValueError):
GitHubIssuesLoader(since='not_a_date')
| null |
setup
|
collection = prepare_collection()
collection.delete_many({})
|
@pytest.fixture(autouse=True)
def setup(self) ->None:
collection = prepare_collection()
collection.delete_many({})
| null |
_import_anthropic
|
from langchain_community.llms.anthropic import Anthropic
return Anthropic
|
def _import_anthropic() ->Any:
from langchain_community.llms.anthropic import Anthropic
return Anthropic
| null |
setup_class
|
import rockset
import rockset.models
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = rockset.Regions.use1a1
elif region == 'usw2a1':
host = rockset.Regions.usw2a1
elif region == 'euc1a1':
host = rockset.Regions.euc1a1
elif region == 'dev':
host = rockset.DevRegions.usw2a1
else:
logger.warn(
"Using ROCKSET_REGION:%s as it is.. You should know what you're doing..."
, region)
host = region
client = rockset.RocksetClient(host, api_key)
if os.environ.get('ROCKSET_DELETE_DOCS_ON_START') == '1':
logger.info(
'Deleting all existing documents from the Rockset collection %s',
collection_name)
query = f'select _id from {workspace}.{collection_name}'
query_response = client.Queries.query(sql={'query': query})
ids = [str(r['_id']) for r in getattr(query_response, query_response.
attribute_map['results'])]
logger.info('Existing ids in collection: %s', ids)
client.Documents.delete_documents(collection=collection_name, data=[
rockset.models.DeleteDocumentsRequestData(id=i) for i in ids],
workspace=workspace)
embeddings = ConsistentFakeEmbeddings()
embeddings.embed_documents(fake_texts)
cls.rockset_vectorstore = Rockset(client, embeddings, collection_name,
text_key, embedding_key, workspace)
|
@classmethod
def setup_class(cls) ->None:
import rockset
import rockset.models
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = rockset.Regions.use1a1
elif region == 'usw2a1':
host = rockset.Regions.usw2a1
elif region == 'euc1a1':
host = rockset.Regions.euc1a1
elif region == 'dev':
host = rockset.DevRegions.usw2a1
else:
logger.warn(
"Using ROCKSET_REGION:%s as it is.. You should know what you're doing..."
, region)
host = region
client = rockset.RocksetClient(host, api_key)
if os.environ.get('ROCKSET_DELETE_DOCS_ON_START') == '1':
logger.info(
'Deleting all existing documents from the Rockset collection %s',
collection_name)
query = f'select _id from {workspace}.{collection_name}'
query_response = client.Queries.query(sql={'query': query})
ids = [str(r['_id']) for r in getattr(query_response,
query_response.attribute_map['results'])]
logger.info('Existing ids in collection: %s', ids)
client.Documents.delete_documents(collection=collection_name, data=
[rockset.models.DeleteDocumentsRequestData(id=i) for i in ids],
workspace=workspace)
embeddings = ConsistentFakeEmbeddings()
embeddings.embed_documents(fake_texts)
cls.rockset_vectorstore = Rockset(client, embeddings, collection_name,
text_key, embedding_key, workspace)
| null |
test_cosine
|
"""Test cosine distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=cosine_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == COSINE_SCORE
assert drop(docsearch.index_name)
|
def test_cosine(texts: List[str]) ->None:
"""Test cosine distance."""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL, vector_schema=cosine_schema)
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == COSINE_SCORE
assert drop(docsearch.index_name)
|
Test cosine distance.
|
__init__
|
"""Initialize with necessary components."""
try:
import pyepsilla
except ImportError as e:
raise ImportError(
'Could not import pyepsilla python package. Please install pyepsilla package with `pip install pyepsilla`.'
) from e
if not isinstance(client, pyepsilla.vectordb.Client):
raise TypeError(
f'client should be an instance of pyepsilla.vectordb.Client, got {type(client)}'
)
self._client: vectordb.Client = client
self._db_name = db_name
self._embeddings = embeddings
self._collection_name = Epsilla._LANGCHAIN_DEFAULT_TABLE_NAME
self._client.load_db(db_name=db_name, db_path=db_path)
self._client.use_db(db_name=db_name)
|
def __init__(self, client: Any, embeddings: Embeddings, db_path: Optional[
str]=_LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str]=
_LANGCHAIN_DEFAULT_DB_NAME):
"""Initialize with necessary components."""
try:
import pyepsilla
except ImportError as e:
raise ImportError(
'Could not import pyepsilla python package. Please install pyepsilla package with `pip install pyepsilla`.'
) from e
if not isinstance(client, pyepsilla.vectordb.Client):
raise TypeError(
f'client should be an instance of pyepsilla.vectordb.Client, got {type(client)}'
)
self._client: vectordb.Client = client
self._db_name = db_name
self._embeddings = embeddings
self._collection_name = Epsilla._LANGCHAIN_DEFAULT_TABLE_NAME
self._client.load_db(db_name=db_name, db_path=db_path)
self._client.use_db(db_name=db_name)
|
Initialize with necessary components.
|
_import_nasa_tool
|
from langchain_community.tools.nasa.tool import NasaAction
return NasaAction
|
def _import_nasa_tool() ->Any:
from langchain_community.tools.nasa.tool import NasaAction
return NasaAction
| null |
_import_spark_sql_tool_QuerySparkSQLTool
|
from langchain_community.tools.spark_sql.tool import QuerySparkSQLTool
return QuerySparkSQLTool
|
def _import_spark_sql_tool_QuerySparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import QuerySparkSQLTool
return QuerySparkSQLTool
| null |
get_format_instructions
|
return f"Select one of the following options: {', '.join(self._valid_values)}"
|
def get_format_instructions(self) ->str:
return (
f"Select one of the following options: {', '.join(self._valid_values)}"
)
| null |
requires_reference
|
return True
|
@property
def requires_reference(self) ->bool:
return True
| null |
embed_documents
|
"""Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
document_embeddings = []
for text in texts:
document_embeddings.append(self._embed(text))
return document_embeddings
|
Call out to Aleph Alpha's Document endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
chain
|
...
|
@overload
def chain(func: Callable[[Input], Output]) ->Runnable[Input, Output]:
...
| null |
_Lambda
|
self.write('(')
self.write('lambda ')
self.dispatch(t.args)
self.write(': ')
self.dispatch(t.body)
self.write(')')
|
def _Lambda(self, t):
self.write('(')
self.write('lambda ')
self.dispatch(t.args)
self.write(': ')
self.dispatch(t.body)
self.write(')')
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
_skip_input_warning
|
"""Warning to show when input is ignored."""
return f'Ignoring input in {self.__class__.__name__}, as it is not expected.'
|
@property
def _skip_input_warning(self) ->str:
"""Warning to show when input is ignored."""
return (
f'Ignoring input in {self.__class__.__name__}, as it is not expected.')
|
Warning to show when input is ignored.
|
get_req_stream
|
invoke_url = self._get_invoke_url(model, invoke_url)
if payload.get('stream', True) is False:
payload = {**payload, 'stream': True}
last_inputs = {'url': invoke_url, 'headers': self.headers['stream'], 'json':
payload, 'stream': True}
response = self.get_session_fn().post(**last_inputs)
self._try_raise(response)
call = self.copy()
def out_gen() ->Generator[dict, Any, Any]:
for line in response.iter_lines():
if line and line.strip() != b'data: [DONE]':
line = line.decode('utf-8')
msg, final_line = call.postprocess(line, stop=stop)
yield msg
if final_line:
break
self._try_raise(response)
return (r for r in out_gen())
|
def get_req_stream(self, model: Optional[str]=None, payload: dict={},
invoke_url: Optional[str]=None, stop: Optional[Sequence[str]]=None
) ->Iterator:
invoke_url = self._get_invoke_url(model, invoke_url)
if payload.get('stream', True) is False:
payload = {**payload, 'stream': True}
last_inputs = {'url': invoke_url, 'headers': self.headers['stream'],
'json': payload, 'stream': True}
response = self.get_session_fn().post(**last_inputs)
self._try_raise(response)
call = self.copy()
def out_gen() ->Generator[dict, Any, Any]:
for line in response.iter_lines():
if line and line.strip() != b'data: [DONE]':
line = line.decode('utf-8')
msg, final_line = call.postprocess(line, stop=stop)
yield msg
if final_line:
break
self._try_raise(response)
return (r for r in out_gen())
| null |
_get_relevant_documents
|
try:
from tavily import Client
except ImportError:
raise ImportError(
'Tavily python package not found. Please install it with `pip install tavily-python`.'
)
tavily = Client(api_key=self.api_key or os.environ['TAVILY_API_KEY'])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(query=query, max_results=max_results, search_depth
=self.search_depth.value, include_answer=self.include_generated_answer,
include_domains=self.include_domains, exclude_domains=self.
exclude_domains, include_raw_content=self.include_raw_content,
include_images=self.include_images, **self.kwargs)
docs = [Document(page_content=result.get('content', '') if not self.
include_raw_content else result.get('raw_content', ''), metadata={
'title': result.get('title', ''), 'source': result.get('url', ''), **{k:
v for k, v in result.items() if k not in ('content', 'title', 'url',
'raw_content')}, 'images': response.get('images')}) for result in
response.get('results')]
if self.include_generated_answer:
docs = [Document(page_content=response.get('answer', ''), metadata={
'title': 'Suggested Answer', 'source': 'https://tavily.com/'}), *docs]
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
try:
from tavily import Client
except ImportError:
raise ImportError(
'Tavily python package not found. Please install it with `pip install tavily-python`.'
)
tavily = Client(api_key=self.api_key or os.environ['TAVILY_API_KEY'])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(query=query, max_results=max_results,
search_depth=self.search_depth.value, include_answer=self.
include_generated_answer, include_domains=self.include_domains,
exclude_domains=self.exclude_domains, include_raw_content=self.
include_raw_content, include_images=self.include_images, **self.kwargs)
docs = [Document(page_content=result.get('content', '') if not self.
include_raw_content else result.get('raw_content', ''), metadata={
'title': result.get('title', ''), 'source': result.get('url', ''),
**{k: v for k, v in result.items() if k not in ('content', 'title',
'url', 'raw_content')}, 'images': response.get('images')}) for
result in response.get('results')]
if self.include_generated_answer:
docs = [Document(page_content=response.get('answer', ''), metadata=
{'title': 'Suggested Answer', 'source': 'https://tavily.com/'}),
*docs]
return docs
| null |
ApproxRetrievalStrategy
|
"""Used to perform approximate nearest neighbor search
using the HNSW algorithm.
At build index time, this strategy will create a
dense vector field in the index and store the
embedding vectors in the index.
At query time, the text will either be embedded using the
provided embedding function or the query_model_id
will be used to embed the text using the model
deployed to Elasticsearch.
if query_model_id is used, do not provide an embedding function.
Args:
query_model_id: Optional. ID of the model to use to
embed the query text within the stack. Requires
embedding model to be deployed to Elasticsearch.
hybrid: Optional. If True, will perform a hybrid search
using both the knn query and a text query.
Defaults to False.
rrf: Optional. rrf is Reciprocal Rank Fusion.
When `hybrid` is True,
and `rrf` is True, then rrf: {}.
and `rrf` is False, then rrf is omitted.
and isinstance(rrf, dict) is True, then pass in the dict values.
rrf could be passed for adjusting 'rank_constant' and 'window_size'.
"""
return ApproxRetrievalStrategy(query_model_id=query_model_id, hybrid=hybrid,
rrf=rrf)
|
@staticmethod
def ApproxRetrievalStrategy(query_model_id: Optional[str]=None, hybrid:
Optional[bool]=False, rrf: Optional[Union[dict, bool]]=True
) ->'ApproxRetrievalStrategy':
"""Used to perform approximate nearest neighbor search
using the HNSW algorithm.
At build index time, this strategy will create a
dense vector field in the index and store the
embedding vectors in the index.
At query time, the text will either be embedded using the
provided embedding function or the query_model_id
will be used to embed the text using the model
deployed to Elasticsearch.
if query_model_id is used, do not provide an embedding function.
Args:
query_model_id: Optional. ID of the model to use to
embed the query text within the stack. Requires
embedding model to be deployed to Elasticsearch.
hybrid: Optional. If True, will perform a hybrid search
using both the knn query and a text query.
Defaults to False.
rrf: Optional. rrf is Reciprocal Rank Fusion.
When `hybrid` is True,
and `rrf` is True, then rrf: {}.
and `rrf` is False, then rrf is omitted.
and isinstance(rrf, dict) is True, then pass in the dict values.
rrf could be passed for adjusting 'rank_constant' and 'window_size'.
"""
return ApproxRetrievalStrategy(query_model_id=query_model_id, hybrid=
hybrid, rrf=rrf)
|
Used to perform approximate nearest neighbor search
using the HNSW algorithm.
At build index time, this strategy will create a
dense vector field in the index and store the
embedding vectors in the index.
At query time, the text will either be embedded using the
provided embedding function or the query_model_id
will be used to embed the text using the model
deployed to Elasticsearch.
if query_model_id is used, do not provide an embedding function.
Args:
query_model_id: Optional. ID of the model to use to
embed the query text within the stack. Requires
embedding model to be deployed to Elasticsearch.
hybrid: Optional. If True, will perform a hybrid search
using both the knn query and a text query.
Defaults to False.
rrf: Optional. rrf is Reciprocal Rank Fusion.
When `hybrid` is True,
and `rrf` is True, then rrf: {}.
and `rrf` is False, then rrf is omitted.
and isinstance(rrf, dict) is True, then pass in the dict values.
rrf could be passed for adjusting 'rank_constant' and 'window_size'.
|
lookup
|
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [Generation(**generation_dict) for generation_dict in json.loads
(res)]
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [Generation(**generation_dict) for generation_dict in json.
loads(res)]
return None
|
Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
|
test_normal_output_parsing
|
_test_convo_output("""
Action: my_action
Action Input: my action input
""",
'my_action', 'my action input')
|
def test_normal_output_parsing() ->None:
_test_convo_output('\nAction: my_action\nAction Input: my action input\n',
'my_action', 'my action input')
| null |
from_documents
|
"""Create a Cassandra vectorstore from a document list.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
session: Session = kwargs['session']
keyspace: str = kwargs['keyspace']
table_name: str = kwargs['table_name']
return cls.from_texts(texts=texts, metadatas=metadatas, embedding=embedding,
session=session, keyspace=keyspace, table_name=table_name)
|
@classmethod
def from_documents(cls: Type[CVST], documents: List[Document], embedding:
Embeddings, batch_size: int=16, **kwargs: Any) ->CVST:
"""Create a Cassandra vectorstore from a document list.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
session: Session = kwargs['session']
keyspace: str = kwargs['keyspace']
table_name: str = kwargs['table_name']
return cls.from_texts(texts=texts, metadatas=metadatas, embedding=
embedding, session=session, keyspace=keyspace, table_name=table_name)
|
Create a Cassandra vectorstore from a document list.
No support for specifying text IDs
Returns:
a Cassandra vectorstore.
|
test_exact_matching_with_ignore_case
|
prediction = 'Mindy is the CTO'
reference = 'mindy is the cto'
result = exact_match_string_evaluator_ignore_case.evaluate_strings(prediction
=prediction, reference=reference)
assert result['score'] == 1.0
reference = 'mindy is the CEO'
result = exact_match_string_evaluator_ignore_case.evaluate_strings(prediction
=prediction, reference=reference)
assert result['score'] == 0.0
|
def test_exact_matching_with_ignore_case(
exact_match_string_evaluator_ignore_case: ExactMatchStringEvaluator
) ->None:
prediction = 'Mindy is the CTO'
reference = 'mindy is the cto'
result = exact_match_string_evaluator_ignore_case.evaluate_strings(
prediction=prediction, reference=reference)
assert result['score'] == 1.0
reference = 'mindy is the CEO'
result = exact_match_string_evaluator_ignore_case.evaluate_strings(
prediction=prediction, reference=reference)
assert result['score'] == 0.0
| null |
_llm_type
|
"""Return type of llm."""
return 'writer'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'writer'
|
Return type of llm.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'runnable']
|
Get the namespace of the langchain object.
|
on_chain_start
|
"""Run when chain starts running."""
self.metrics['step'] += 1
self.metrics['chain_starts'] += 1
self.metrics['starts'] += 1
chain_starts = self.metrics['chain_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
chain_input = ','.join([f'{k}={v}' for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp['inputs'] = chain_input
self.jsonf(input_resp, self.temp_dir, f'chain_start_{chain_starts}')
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts running."""
self.metrics['step'] += 1
self.metrics['chain_starts'] += 1
self.metrics['starts'] += 1
chain_starts = self.metrics['chain_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
chain_input = ','.join([f'{k}={v}' for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp['inputs'] = chain_input
self.jsonf(input_resp, self.temp_dir, f'chain_start_{chain_starts}')
|
Run when chain starts running.
|
test_failure_no_ticker
|
"""Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = ''
result = tool.run(query)
assert f'Company ticker {query} not found.' in result
|
def test_failure_no_ticker() ->None:
"""Test that the tool fails."""
tool = YahooFinanceNewsTool()
query = ''
result = tool.run(query)
assert f'Company ticker {query} not found.' in result
|
Test that the tool fails.
|
_create_session_analysis_df
|
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.records['on_llm_start_records'])
on_llm_end_records_df = pd.DataFrame(self.records['on_llm_end_records'])
llm_input_columns = ['step', 'prompt']
if 'name' in on_llm_start_records_df.columns:
llm_input_columns.append('name')
elif 'id' in on_llm_start_records_df.columns:
on_llm_start_records_df['name'] = on_llm_start_records_df['id'].apply(
lambda id_: id_[-1])
llm_input_columns.append('name')
llm_input_prompts_df = on_llm_start_records_df[llm_input_columns].dropna(axis=1
).rename({'step': 'prompt_step'}, axis=1)
complexity_metrics_columns = []
visualizations_columns = []
complexity_metrics_columns = ['flesch_reading_ease', 'flesch_kincaid_grade',
'smog_index', 'coleman_liau_index', 'automated_readability_index',
'dale_chall_readability_score', 'difficult_words',
'linsear_write_formula', 'gunning_fog', 'fernandez_huerta',
'szigriszt_pazos', 'gutierrez_polini', 'crawford', 'gulpease_index',
'osman']
visualizations_columns = ['dependency_tree', 'entities']
llm_outputs_df = on_llm_end_records_df[['step', 'text',
'token_usage_total_tokens', 'token_usage_prompt_tokens',
'token_usage_completion_tokens'] + complexity_metrics_columns +
visualizations_columns].dropna(axis=1).rename({'step': 'output_step',
'text': 'output'}, axis=1)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df['chat_html'] = session_analysis_df[['prompt', 'output']
].apply(lambda row: construct_html_from_prompt_and_generation(row[
'prompt'], row['output']), axis=1)
return session_analysis_df
|
def _create_session_analysis_df(self) ->Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.records['on_llm_start_records']
)
on_llm_end_records_df = pd.DataFrame(self.records['on_llm_end_records'])
llm_input_columns = ['step', 'prompt']
if 'name' in on_llm_start_records_df.columns:
llm_input_columns.append('name')
elif 'id' in on_llm_start_records_df.columns:
on_llm_start_records_df['name'] = on_llm_start_records_df['id'].apply(
lambda id_: id_[-1])
llm_input_columns.append('name')
llm_input_prompts_df = on_llm_start_records_df[llm_input_columns].dropna(
axis=1).rename({'step': 'prompt_step'}, axis=1)
complexity_metrics_columns = []
visualizations_columns = []
complexity_metrics_columns = ['flesch_reading_ease',
'flesch_kincaid_grade', 'smog_index', 'coleman_liau_index',
'automated_readability_index', 'dale_chall_readability_score',
'difficult_words', 'linsear_write_formula', 'gunning_fog',
'fernandez_huerta', 'szigriszt_pazos', 'gutierrez_polini',
'crawford', 'gulpease_index', 'osman']
visualizations_columns = ['dependency_tree', 'entities']
llm_outputs_df = on_llm_end_records_df[['step', 'text',
'token_usage_total_tokens', 'token_usage_prompt_tokens',
'token_usage_completion_tokens'] + complexity_metrics_columns +
visualizations_columns].dropna(axis=1).rename({'step':
'output_step', 'text': 'output'}, axis=1)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df],
axis=1)
session_analysis_df['chat_html'] = session_analysis_df[['prompt', 'output']
].apply(lambda row: construct_html_from_prompt_and_generation(row[
'prompt'], row['output']), axis=1)
return session_analysis_df
|
Create a dataframe with all the information from the session.
|
test_timescalevector_relevance_score
|
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9996744261675065), (Document(page_content='baz', metadata={'page':
'2'}), 0.9986996093328621)]
|
def test_timescalevector_relevance_score() ->None:
"""Test to make sure the relevance score is scaled to 0-1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection
=True)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
1.0), (Document(page_content='bar', metadata={'page': '1'}),
0.9996744261675065), (Document(page_content='baz', metadata={'page':
'2'}), 0.9986996093328621)]
|
Test to make sure the relevance score is scaled to 0-1.
|
_get_encoding_model
|
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
try:
encoding = tiktoken_.encoding_for_model('gpt-3.5-turbo-0301')
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
|
def _get_encoding_model(self) ->tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
try:
encoding = tiktoken_.encoding_for_model('gpt-3.5-turbo-0301')
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
| null |
_bulk_ingest_embeddings
|
"""Bulk Ingest Embeddings into given index."""
if not mapping:
mapping = dict()
bulk = _import_bulk()
not_found_error = _import_not_found_error()
requests = []
return_ids = []
mapping = mapping
try:
client.indices.get(index=index_name)
except not_found_error:
client.indices.create(index=index_name, body=mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {'_op_type': 'index', '_index': index_name, vector_field:
embeddings[i], text_field: text, 'metadata': metadata}
if is_aoss:
request['id'] = _id
else:
request['_id'] = _id
requests.append(request)
return_ids.append(_id)
bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
if not is_aoss:
client.indices.refresh(index=index_name)
return return_ids
|
def _bulk_ingest_embeddings(client: Any, index_name: str, embeddings: List[
List[float]], texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, vector_field: str='vector_field',
text_field: str='text', mapping: Optional[Dict]=None, max_chunk_bytes:
Optional[int]=1 * 1024 * 1024, is_aoss: bool=False) ->List[str]:
"""Bulk Ingest Embeddings into given index."""
if not mapping:
mapping = dict()
bulk = _import_bulk()
not_found_error = _import_not_found_error()
requests = []
return_ids = []
mapping = mapping
try:
client.indices.get(index=index_name)
except not_found_error:
client.indices.create(index=index_name, body=mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {'_op_type': 'index', '_index': index_name, vector_field:
embeddings[i], text_field: text, 'metadata': metadata}
if is_aoss:
request['id'] = _id
else:
request['_id'] = _id
requests.append(request)
return_ids.append(_id)
bulk(client, requests, max_chunk_bytes=max_chunk_bytes)
if not is_aoss:
client.indices.refresh(index=index_name)
return return_ids
|
Bulk Ingest Embeddings into given index.
|
test_loading_flawed_xml
|
loader = MWDumpLoader((PARENT_DIR / 'mwtest_current_pages.xml').absolute())
with pytest.raises(TypeError):
loader.load()
|
@pytest.mark.requires('mwparserfromhell', 'mwxml')
def test_loading_flawed_xml() ->None:
loader = MWDumpLoader((PARENT_DIR / 'mwtest_current_pages.xml').absolute())
with pytest.raises(TypeError):
loader.load()
| null |
test_readonly_memory
|
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({'input': 'bar'}, {'output': 'foo'})
assert read_only_memory.load_memory_variables({}
) == memory.load_memory_variables({})
|
@pytest.mark.parametrize('memory', [ConversationBufferMemory(memory_key=
'baz'), ConversationSummaryMemory(llm=FakeLLM(), memory_key='baz'),
ConversationBufferWindowMemory(memory_key='baz')])
def test_readonly_memory(memory: BaseMemory) ->None:
read_only_memory = ReadOnlySharedMemory(memory=memory)
memory.save_context({'input': 'bar'}, {'output': 'foo'})
assert read_only_memory.load_memory_variables({}
) == memory.load_memory_variables({})
| null |
load_memory_variables
|
return self.memories
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, str]:
return self.memories
| null |
_stream_callback
|
"""Add streamed result to queue."""
if error:
result_queue.put(error)
else:
response_raw: dict = result.get_response(as_json=True)
if 'outputs' in response_raw:
response = self._process_result(response_raw)
if response in stop_words:
result_queue.put(None)
else:
result_queue.put(response)
if response_raw['parameters']['triton_final_response']['bool_param']:
result_queue.put(None)
|
def _stream_callback(self, result_queue: queue.Queue[Union[Optional[Dict[
str, str]], str]], result: grpcclient.InferResult, error: str,
stop_words: List[str]) ->None:
"""Add streamed result to queue."""
if error:
result_queue.put(error)
else:
response_raw: dict = result.get_response(as_json=True)
if 'outputs' in response_raw:
response = self._process_result(response_raw)
if response in stop_words:
result_queue.put(None)
else:
result_queue.put(response)
if response_raw['parameters']['triton_final_response']['bool_param']:
result_queue.put(None)
|
Add streamed result to queue.
|
_default_params
|
"""Get the default parameters for calling Cohere API."""
return {'max_new_tokens': self.max_new_tokens, 'temperature': self.
temperature, 'top_k': self.top_k, 'top_p': self.top_p, 'stop_sequences':
[], 'do_sample': self.do_sample, 'use_cache': self.use_cache}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {'max_new_tokens': self.max_new_tokens, 'temperature': self.
temperature, 'top_k': self.top_k, 'top_p': self.top_p,
'stop_sequences': [], 'do_sample': self.do_sample, 'use_cache':
self.use_cache}
|
Get the default parameters for calling Cohere API.
|
_import_edenai
|
from langchain_community.llms.edenai import EdenAI
return EdenAI
|
def _import_edenai() ->Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.