method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
parse_result
|
if not isinstance(result[0], ChatGeneration):
raise ValueError('This output parser only works on ChatGeneration output')
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
|
def parse_result(self, result: List[Generation], *, partial: bool=False
) ->Union[List[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError(
'This output parser only works on ChatGeneration output')
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
| null |
_import_aviary
|
from langchain_community.llms.aviary import Aviary
return Aviary
|
def _import_aviary() ->Any:
from langchain_community.llms.aviary import Aviary
return Aviary
| null |
azure_installed
|
try:
from azure.core.credentials import TokenCredential
from azure.identity import DefaultAzureCredential
return True
except Exception as e:
print(f'azure not installed, skipping test {e}')
return False
|
def azure_installed() ->bool:
try:
from azure.core.credentials import TokenCredential
from azure.identity import DefaultAzureCredential
return True
except Exception as e:
print(f'azure not installed, skipping test {e}')
return False
| null |
__init__
|
"""Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
"""
self.namespace = namespace
|
def __init__(self, namespace: str) ->None:
"""Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
"""
self.namespace = namespace
|
Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
|
test_from_texts_with_tfidf_params
|
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts, tfidf_params
={'min_df': 2})
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 2)
|
@pytest.mark.requires('sklearn')
def test_from_texts_with_tfidf_params() ->None:
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts,
tfidf_params={'min_df': 2})
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 2)
| null |
format_log_to_str
|
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f'\n{observation_prefix}{observation}\n{llm_prefix}'
return thoughts
|
def format_log_to_str(intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str='Observation: ', llm_prefix: str='Thought: '
) ->str:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f'\n{observation_prefix}{observation}\n{llm_prefix}'
return thoughts
|
Construct the scratchpad that lets the agent continue its thought process.
|
transform_documents
|
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings,
stateful_documents)
included_idxs = _filter_cluster_embeddings(embedded_documents, self.
num_clusters, self.num_closest, self.random_state, self.remove_duplicates)
results = sorted(included_idxs) if self.sorted else included_idxs
return [stateful_documents[i] for i in results]
|
def transform_documents(self, documents: Sequence[Document], **kwargs: Any
) ->Sequence[Document]:
"""Filter down documents."""
stateful_documents = get_stateful_documents(documents)
embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings,
stateful_documents)
included_idxs = _filter_cluster_embeddings(embedded_documents, self.
num_clusters, self.num_closest, self.random_state, self.
remove_duplicates)
results = sorted(included_idxs) if self.sorted else included_idxs
return [stateful_documents[i] for i in results]
|
Filter down documents.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector_field = kwargs.get('vector_field', 'vector_field')
text_field = kwargs.get('text_field', 'text')
metadata_field = kwargs.get('metadata_field', 'metadata')
embedding = self.embedding_function.embed_query(query)
results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs)
embeddings = [result['_source'][vector_field] for result in results]
mmr_selected = maximal_marginal_relevance(np.array(embedding), embeddings,
k=k, lambda_mult=lambda_mult)
return [Document(page_content=results[i]['_source'][text_field], metadata=
results[i]['_source'][metadata_field]) for i in mmr_selected]
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, **kwargs: Any) ->list[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
vector_field = kwargs.get('vector_field', 'vector_field')
text_field = kwargs.get('text_field', 'text')
metadata_field = kwargs.get('metadata_field', 'metadata')
embedding = self.embedding_function.embed_query(query)
results = self._raw_similarity_search_with_score(query, fetch_k, **kwargs)
embeddings = [result['_source'][vector_field] for result in results]
mmr_selected = maximal_marginal_relevance(np.array(embedding),
embeddings, k=k, lambda_mult=lambda_mult)
return [Document(page_content=results[i]['_source'][text_field],
metadata=results[i]['_source'][metadata_field]) for i in mmr_selected]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
test_chat_model_on_chat_dataset
|
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
run_on_dataset(dataset_name=chat_dataset_name, llm_or_chain_factory=llm,
evaluation=eval_config, client=client, project_name=eval_project_name)
_check_all_feedback_passed(eval_project_name, client)
|
def test_chat_model_on_chat_dataset(chat_dataset_name: str,
eval_project_name: str, client: Client) ->None:
llm = ChatOpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
run_on_dataset(dataset_name=chat_dataset_name, llm_or_chain_factory=llm,
evaluation=eval_config, client=client, project_name=eval_project_name)
_check_all_feedback_passed(eval_project_name, client)
| null |
test_vearch_add_texts
|
"""Test end to end adding of texts."""
texts = ['Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用',
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库',
'vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装']
metadatas = [{'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'}]
vearch_db = Vearch.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, table_name='test_vearch', metadata_path='./')
vearch_db.add_texts(texts=[
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库'], metadatas=[{
'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'}]
)
result = vearch_db.similarity_search(
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', 2)
assert result == [Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}), Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'})
]
|
def test_vearch_add_texts() ->None:
"""Test end to end adding of texts."""
texts = [
'Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用',
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库',
'vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装']
metadatas = [{'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}, {'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}]
vearch_db = Vearch.from_texts(texts=texts, embedding=FakeEmbeddings(),
metadatas=metadatas, table_name='test_vearch', metadata_path='./')
vearch_db.add_texts(texts=[
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库'], metadatas=[{
'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}])
result = vearch_db.similarity_search(
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', 2)
assert result == [Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={
'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
}), Document(page_content=
'Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库', metadata={
'source':
'/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt'
})]
|
Test end to end adding of texts.
|
test_seq_prompt_dict
|
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.'
) + '{question}'
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = prompt | passthrough | {'chat': chat, 'llm': llm}
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'invoke')
llm_spy = mocker.spy(llm.__class__, 'invoke')
tracer = FakeTracer()
assert chain.invoke({'question': 'What is your name?'}, dict(callbacks=[
tracer])) == {'chat': AIMessage(content="i'm a chatbot"), 'llm':
"i'm a textbot"}
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')])
assert llm_spy.call_args.args[1] == ChatPromptValue(messages=[SystemMessage
(content='You are a nice assistant.'), HumanMessage(content=
'What is your name?')])
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == 'RunnableParallel<chat,llm>'
assert len(map_run.child_runs) == 2
|
@freeze_time('2023-01-01')
def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion
) ->None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = SystemMessagePromptTemplate.from_template(
'You are a nice assistant.') + '{question}'
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = prompt | passthrough | {'chat': chat, 'llm': llm}
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
prompt_spy = mocker.spy(prompt.__class__, 'invoke')
chat_spy = mocker.spy(chat.__class__, 'invoke')
llm_spy = mocker.spy(llm.__class__, 'invoke')
tracer = FakeTracer()
assert chain.invoke({'question': 'What is your name?'}, dict(callbacks=
[tracer])) == {'chat': AIMessage(content="i'm a chatbot"), 'llm':
"i'm a textbot"}
assert prompt_spy.call_args.args[1] == {'question': 'What is your name?'}
assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')])
assert llm_spy.call_args.args[1] == ChatPromptValue(messages=[
SystemMessage(content='You are a nice assistant.'), HumanMessage(
content='What is your name?')])
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == 'RunnableParallel<chat,llm>'
assert len(map_run.child_runs) == 2
| null |
test_schema_complex_seq
|
prompt1 = ChatPromptTemplate.from_template('what is the city {person} is from?'
)
prompt2 = ChatPromptTemplate.from_template(
'what country is the city {city} in? respond in {language}')
model = FakeListChatModel(responses=[''])
chain1: Runnable = RunnableSequence(prompt1, model, StrOutputParser(), name
='city_chain')
assert chain1.name == 'city_chain'
chain2: Runnable = {'city': chain1, 'language': itemgetter('language')
} | prompt2 | model | StrOutputParser()
assert chain2.input_schema.schema() == {'title':
'RunnableParallel<city,language>Input', 'type': 'object', 'properties':
{'person': {'title': 'Person', 'type': 'string'}, 'language': {'title':
'Language'}}}
assert chain2.output_schema.schema() == {'title': 'StrOutputParserOutput',
'type': 'string'}
assert chain2.with_types(input_type=str).input_schema.schema() == {'title':
'RunnableSequenceInput', 'type': 'string'}
assert chain2.with_types(input_type=int).output_schema.schema() == {'title':
'StrOutputParserOutput', 'type': 'string'}
class InputType(BaseModel):
person: str
assert chain2.with_types(input_type=InputType).input_schema.schema() == {
'title': 'InputType', 'type': 'object', 'properties': {'person': {
'title': 'Person', 'type': 'string'}}, 'required': ['person']}
|
def test_schema_complex_seq() ->None:
prompt1 = ChatPromptTemplate.from_template(
'what is the city {person} is from?')
prompt2 = ChatPromptTemplate.from_template(
'what country is the city {city} in? respond in {language}')
model = FakeListChatModel(responses=[''])
chain1: Runnable = RunnableSequence(prompt1, model, StrOutputParser(),
name='city_chain')
assert chain1.name == 'city_chain'
chain2: Runnable = {'city': chain1, 'language': itemgetter('language')
} | prompt2 | model | StrOutputParser()
assert chain2.input_schema.schema() == {'title':
'RunnableParallel<city,language>Input', 'type': 'object',
'properties': {'person': {'title': 'Person', 'type': 'string'},
'language': {'title': 'Language'}}}
assert chain2.output_schema.schema() == {'title':
'StrOutputParserOutput', 'type': 'string'}
assert chain2.with_types(input_type=str).input_schema.schema() == {'title':
'RunnableSequenceInput', 'type': 'string'}
assert chain2.with_types(input_type=int).output_schema.schema() == {'title'
: 'StrOutputParserOutput', 'type': 'string'}
class InputType(BaseModel):
person: str
assert chain2.with_types(input_type=InputType).input_schema.schema() == {
'title': 'InputType', 'type': 'object', 'properties': {'person': {
'title': 'Person', 'type': 'string'}}, 'required': ['person']}
| null |
_safe_next
|
try:
return next(self.generator)
except StopIteration:
return None
|
def _safe_next(self) ->Any:
try:
return next(self.generator)
except StopIteration:
return None
| null |
input_keys
|
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
Return the input keys.
:meta private:
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
chat
|
return BedrockChat(model_id='anthropic.claude-v2', model_kwargs={
'temperature': 0})
|
@pytest.fixture
def chat() ->BedrockChat:
return BedrockChat(model_id='anthropic.claude-v2', model_kwargs={
'temperature': 0})
| null |
_batch
|
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + 'a')
return outputs
|
def _batch(self, inputs: List[str]) ->List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + 'a')
return outputs
| null |
__init__
|
"""
Initializes with default settings
"""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ''
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self, self.prompt)
|
def __init__(self, strip_newlines: bool=False, return_err_output: bool=
False, persistent: bool=False):
"""
Initializes with default settings
"""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ''
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self, self.prompt)
|
Initializes with default settings
|
requires_input
|
return False
|
@property
def requires_input(self) ->bool:
return False
| null |
_process_array_schema
|
from openapi_pydantic import Reference, Schema
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split('/')[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f'Array<{ref_name}>'
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(schema=items, name=f'{name}Item',
required=True, spec=spec, references_used=references_used)
return f'Array<{array_type.type}>'
return 'array'
|
@classmethod
def _process_array_schema(cls, schema: Schema, name: str, spec: OpenAPISpec,
references_used: List[str]) ->str:
from openapi_pydantic import Reference, Schema
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split('/')[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f'Array<{ref_name}>'
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(schema=items, name=f'{name}Item',
required=True, spec=spec, references_used=references_used)
return f'Array<{array_type.type}>'
return 'array'
| null |
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
filter=filter)
return docs
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[dict]=None) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, filter=filter)
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
|
_format_chat_history
|
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
__init__
|
super().__init__(code)
self.source_lines = self.code.splitlines()
|
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
| null |
on_chat_model_start
|
assert all(isinstance(m, BaseMessage) for m in chain(*messages))
self.on_chat_model_start_common()
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], *, run_id: UUID, parent_run_id: Optional[UUID]=None,
**kwargs: Any) ->Any:
assert all(isinstance(m, BaseMessage) for m in chain(*messages))
self.on_chat_model_start_common()
| null |
_call
|
"""Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm("Write a story about dragons.")
"""
data: Dict[str, Any] = {'prompt': prompt, 'use_story': self.use_story,
'use_authors_note': self.use_authors_note, 'use_world_info': self.
use_world_info, 'use_memory': self.use_memory, 'max_context_length':
self.max_context_length, 'max_length': self.max_length, 'rep_pen': self
.rep_pen, 'rep_pen_range': self.rep_pen_range, 'rep_pen_slope': self.
rep_pen_slope, 'temperature': self.temperature, 'tfs': self.tfs,
'top_a': self.top_a, 'top_p': self.top_p, 'top_k': self.top_k,
'typical': self.typical}
if stop is not None:
data['stop_sequence'] = stop
response = requests.post(f'{clean_url(self.endpoint)}/api/v1/generate',
json=data)
response.raise_for_status()
json_response = response.json()
if 'results' in json_response and len(json_response['results']
) > 0 and 'text' in json_response['results'][0]:
text = json_response['results'][0]['text'].strip()
if stop is not None:
for sequence in stop:
if text.endswith(sequence):
text = text[:-len(sequence)].rstrip()
return text
else:
raise ValueError(
f'Unexpected response format from Kobold API: {json_response}')
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm("Write a story about dragons.")
"""
data: Dict[str, Any] = {'prompt': prompt, 'use_story': self.use_story,
'use_authors_note': self.use_authors_note, 'use_world_info': self.
use_world_info, 'use_memory': self.use_memory, 'max_context_length':
self.max_context_length, 'max_length': self.max_length, 'rep_pen':
self.rep_pen, 'rep_pen_range': self.rep_pen_range, 'rep_pen_slope':
self.rep_pen_slope, 'temperature': self.temperature, 'tfs': self.
tfs, 'top_a': self.top_a, 'top_p': self.top_p, 'top_k': self.top_k,
'typical': self.typical}
if stop is not None:
data['stop_sequence'] = stop
response = requests.post(f'{clean_url(self.endpoint)}/api/v1/generate',
json=data)
response.raise_for_status()
json_response = response.json()
if 'results' in json_response and len(json_response['results']
) > 0 and 'text' in json_response['results'][0]:
text = json_response['results'][0]['text'].strip()
if stop is not None:
for sequence in stop:
if text.endswith(sequence):
text = text[:-len(sequence)].rstrip()
return text
else:
raise ValueError(
f'Unexpected response format from Kobold API: {json_response}')
|
Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm("Write a story about dragons.")
|
lazy_load
|
"""Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
with open(self.file_path) as f:
data = json.load(f)
sorted_data = sorted(data['messages'], key=lambda x: x['timestamp_ms'])
messages = []
for index, m in enumerate(sorted_data):
if 'content' not in m:
logger.info(
f"""Skipping Message No.
{index + 1} as no content is present in the message"""
)
continue
messages.append(HumanMessage(content=m['content'], additional_kwargs={
'sender': m['sender_name']}))
yield ChatSession(messages=messages)
|
def lazy_load(self) ->Iterator[ChatSession]:
"""Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
with open(self.file_path) as f:
data = json.load(f)
sorted_data = sorted(data['messages'], key=lambda x: x['timestamp_ms'])
messages = []
for index, m in enumerate(sorted_data):
if 'content' not in m:
logger.info(
f"""Skipping Message No.
{index + 1} as no content is present in the message"""
)
continue
messages.append(HumanMessage(content=m['content'],
additional_kwargs={'sender': m['sender_name']}))
yield ChatSession(messages=messages)
|
Lazy loads the chat data from the file.
Yields:
ChatSession: A chat session containing the loaded messages.
|
_generate
|
raise NotImplementedError
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
raise NotImplementedError
| null |
_import_nasa_tool
|
from langchain_community.tools.nasa.tool import NasaAction
return NasaAction
|
def _import_nasa_tool() ->Any:
from langchain_community.tools.nasa.tool import NasaAction
return NasaAction
| null |
from_openapi_url
|
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
|
@classmethod
def from_openapi_url(cls, spec_url: str, path: str, method: str
) ->'APIOperation':
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
|
Create an APIOperation from an OpenAPI URL.
|
convert_to_secret_str
|
"""Convert a string to a SecretStr if needed."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
def convert_to_secret_str(value: Union[SecretStr, str]) ->SecretStr:
"""Convert a string to a SecretStr if needed."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
|
Convert a string to a SecretStr if needed.
|
_import_spark_sql_tool_InfoSparkSQLTool
|
from langchain_community.tools.spark_sql.tool import InfoSparkSQLTool
return InfoSparkSQLTool
|
def _import_spark_sql_tool_InfoSparkSQLTool() ->Any:
from langchain_community.tools.spark_sql.tool import InfoSparkSQLTool
return InfoSparkSQLTool
| null |
on_llm_new_token
|
"""Run when LLM generates a new token."""
self.metrics['step'] += 1
self.metrics['llm_streams'] += 1
llm_streams = self.metrics['llm_streams']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_llm_token_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'llm_new_tokens_{llm_streams}')
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run when LLM generates a new token."""
self.metrics['step'] += 1
self.metrics['llm_streams'] += 1
llm_streams = self.metrics['llm_streams']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_llm_token_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'llm_new_tokens_{llm_streams}')
|
Run when LLM generates a new token.
|
test_custom_index_from_documents
|
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
from elasticsearch import Elasticsearch
index_name = f'custom_index_{uuid.uuid4().hex}'
elastic_vector_search = ElasticVectorSearch.from_documents(documents=
documents, embedding=embedding_openai, elasticsearch_url=
elasticsearch_url, index_name=index_name)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index='_all').keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search('sharks')
assert len(search_result) != 0
|
@pytest.mark.vcr(ignore_localhost=True)
def test_custom_index_from_documents(self, documents: List[Document],
embedding_openai: OpenAIEmbeddings, elasticsearch_url: str) ->None:
"""This test checks the construction of a custom
ElasticSearch index using the 'from_documents'."""
from elasticsearch import Elasticsearch
index_name = f'custom_index_{uuid.uuid4().hex}'
elastic_vector_search = ElasticVectorSearch.from_documents(documents=
documents, embedding=embedding_openai, elasticsearch_url=
elasticsearch_url, index_name=index_name)
es = Elasticsearch(hosts=elasticsearch_url)
index_names = es.indices.get(index='_all').keys()
assert index_name in index_names
search_result = elastic_vector_search.similarity_search('sharks')
assert len(search_result) != 0
|
This test checks the construction of a custom
ElasticSearch index using the 'from_documents'.
|
_run
|
return f'{arg1} {arg2} {arg3}'
|
def _run(self, arg1: int, arg2: bool, arg3: Optional[dict]=None) ->str:
return f'{arg1} {arg2} {arg3}'
| null |
__init__
|
"""Create a RunnableBinding from a runnable and kwargs.
Args:
bound: The underlying runnable that this runnable delegates calls to.
kwargs: optional kwargs to pass to the underlying runnable, when running
the underlying runnable (e.g., via `invoke`, `batch`,
`transform`, or `stream` or async variants)
config: config_factories:
config_factories: optional list of config factories to apply to the
custom_input_type: Specify to override the input type of the underlying
runnable with a custom type.
custom_output_type: Specify to override the output type of the underlying
runnable with a custom type.
**other_kwargs: Unpacked into the base class.
"""
config = config or {}
if (configurable := config.get('configurable', None)):
allowed_keys = set(s.id for s in bound.config_specs)
for key in configurable:
if key not in allowed_keys:
raise ValueError(
f"Configurable key '{key}' not found in runnable with config keys: {allowed_keys}"
)
super().__init__(bound=bound, kwargs=kwargs or {}, config=config or {},
config_factories=config_factories or [], custom_input_type=
custom_input_type, custom_output_type=custom_output_type, **other_kwargs)
|
def __init__(self, *, bound: Runnable[Input, Output], kwargs: Optional[
Mapping[str, Any]]=None, config: Optional[RunnableConfig]=None,
config_factories: Optional[List[Callable[[RunnableConfig],
RunnableConfig]]]=None, custom_input_type: Optional[Union[Type[Input],
BaseModel]]=None, custom_output_type: Optional[Union[Type[Output],
BaseModel]]=None, **other_kwargs: Any) ->None:
"""Create a RunnableBinding from a runnable and kwargs.
Args:
bound: The underlying runnable that this runnable delegates calls to.
kwargs: optional kwargs to pass to the underlying runnable, when running
the underlying runnable (e.g., via `invoke`, `batch`,
`transform`, or `stream` or async variants)
config: config_factories:
config_factories: optional list of config factories to apply to the
custom_input_type: Specify to override the input type of the underlying
runnable with a custom type.
custom_output_type: Specify to override the output type of the underlying
runnable with a custom type.
**other_kwargs: Unpacked into the base class.
"""
config = config or {}
if (configurable := config.get('configurable', None)):
allowed_keys = set(s.id for s in bound.config_specs)
for key in configurable:
if key not in allowed_keys:
raise ValueError(
f"Configurable key '{key}' not found in runnable with config keys: {allowed_keys}"
)
super().__init__(bound=bound, kwargs=kwargs or {}, config=config or {},
config_factories=config_factories or [], custom_input_type=
custom_input_type, custom_output_type=custom_output_type, **
other_kwargs)
|
Create a RunnableBinding from a runnable and kwargs.
Args:
bound: The underlying runnable that this runnable delegates calls to.
kwargs: optional kwargs to pass to the underlying runnable, when running
the underlying runnable (e.g., via `invoke`, `batch`,
`transform`, or `stream` or async variants)
config: config_factories:
config_factories: optional list of config factories to apply to the
custom_input_type: Specify to override the input type of the underlying
runnable with a custom type.
custom_output_type: Specify to override the output type of the underlying
runnable with a custom type.
**other_kwargs: Unpacked into the base class.
|
from_documents
|
"""
Return VectorStore initialized from documents and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=
ids, ndims=ndims, table_name=table_name, **kwargs)
|
@classmethod
def from_documents(cls: Type[Hologres], documents: List[Document],
embedding: Embeddings, ndims: int=ADA_TOKEN_COUNT, table_name: str=
_LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]]=None,
pre_delete_collection: bool=False, **kwargs: Any) ->Hologres:
"""
Return VectorStore initialized from documents and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs['connection_string'] = connection_string
return cls.from_texts(texts=texts, pre_delete_collection=
pre_delete_collection, embedding=embedding, metadatas=metadatas,
ids=ids, ndims=ndims, table_name=table_name, **kwargs)
|
Return VectorStore initialized from documents and embeddings.
Hologres connection string is required
"Either pass it as a parameter
or set the HOLOGRES_CONNECTION_STRING environment variable.
Create the connection string by calling
HologresVector.connection_string_from_db_params
|
max_marginal_relevance_search
|
emb = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(emb, k=k, fetch_k=
fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, lambda_mult=
lambda_mult, grpc_metadata=grpc_metadata)
return docs
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, radius: float=-1.0, epsilon: float=0.01,
timeout: int=3000000000, grpc_metadata: Optional[Any]=None, **kwargs: Any
) ->List[Document]:
emb = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(emb, k=k, fetch_k=
fetch_k, radius=radius, epsilon=epsilon, timeout=timeout,
lambda_mult=lambda_mult, grpc_metadata=grpc_metadata)
return docs
| null |
test_neo4jvector
|
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username, password=
password, pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(docsearch)
|
def test_neo4jvector() ->None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(texts=texts, embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username,
password=password, pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(docsearch)
|
Test end to end construction and search.
|
test_doc_intelligence
|
endpoint = 'endpoint'
key = 'key'
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(endpoint=endpoint, credential=
mock_credential(), headers={'x-ms-useragent': 'langchain-parser/1.0.0'})
assert parser.client == mock_client()
assert parser.api_model == 'prebuilt-layout'
assert parser.mode == 'markdown'
|
@pytest.mark.requires('azure', 'azure.ai', 'azure.ai.documentintelligence')
@patch('azure.ai.documentintelligence.DocumentIntelligenceClient')
@patch('azure.core.credentials.AzureKeyCredential')
def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock
) ->None:
endpoint = 'endpoint'
key = 'key'
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint,
api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(endpoint=endpoint, credential=
mock_credential(), headers={'x-ms-useragent': 'langchain-parser/1.0.0'}
)
assert parser.client == mock_client()
assert parser.api_model == 'prebuilt-layout'
assert parser.mode == 'markdown'
| null |
evaluation_name
|
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return 'regex_match'
|
@property
def evaluation_name(self) ->str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return 'regex_match'
|
Get the evaluation name.
Returns:
str: The evaluation name.
|
add_user_message
|
"""Convenience method for adding a human message string to the store.
Args:
message: The human message to add
"""
if isinstance(message, HumanMessage):
self.add_message(message)
else:
self.add_message(HumanMessage(content=message))
|
def add_user_message(self, message: Union[HumanMessage, str]) ->None:
"""Convenience method for adding a human message string to the store.
Args:
message: The human message to add
"""
if isinstance(message, HumanMessage):
self.add_message(message)
else:
self.add_message(HumanMessage(content=message))
|
Convenience method for adding a human message string to the store.
Args:
message: The human message to add
|
test_meilisearch_with_client
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
vectorstore = Meilisearch.from_texts(texts=texts, embedding=FakeEmbeddings(
), client=self.client(), index_name=INDEX_NAME)
self._wait_last_task()
output = vectorstore.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_meilisearch_with_client(self) ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
vectorstore = Meilisearch.from_texts(texts=texts, embedding=
FakeEmbeddings(), client=self.client(), index_name=INDEX_NAME)
self._wait_last_task()
output = vectorstore.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
invoke
|
return self._call_with_config(self._format_prompt_with_error_handling,
input, config, run_type='prompt')
|
def invoke(self, input: Dict, config: Optional[RunnableConfig]=None
) ->PromptValue:
return self._call_with_config(self._format_prompt_with_error_handling,
input, config, run_type='prompt')
| null |
__init__
|
"""Initiate the rolling logger."""
super().__init__()
if hasattr(handler, 'init'):
handler.init(self)
if hasattr(handler, '_get_callbacks'):
self._callbacks = handler._get_callbacks()
else:
self._callbacks = dict()
diagnostic_logger.warning('initialized handler without callbacks.')
self._logger = logger
|
def __init__(self, logger: Logger, handler: Any):
"""Initiate the rolling logger."""
super().__init__()
if hasattr(handler, 'init'):
handler.init(self)
if hasattr(handler, '_get_callbacks'):
self._callbacks = handler._get_callbacks()
else:
self._callbacks = dict()
diagnostic_logger.warning('initialized handler without callbacks.')
self._logger = logger
|
Initiate the rolling logger.
|
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
_llm_type
|
"""Return type of chat model."""
return 'anthropic-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'anthropic-chat'
|
Return type of chat model.
|
_import_edenai_EdenAiObjectDetectionTool
|
from langchain_community.tools.edenai import EdenAiObjectDetectionTool
return EdenAiObjectDetectionTool
|
def _import_edenai_EdenAiObjectDetectionTool() ->Any:
from langchain_community.tools.edenai import EdenAiObjectDetectionTool
return EdenAiObjectDetectionTool
| null |
_import_file_management_ReadFileTool
|
from langchain_community.tools.file_management import ReadFileTool
return ReadFileTool
|
def _import_file_management_ReadFileTool() ->Any:
from langchain_community.tools.file_management import ReadFileTool
return ReadFileTool
| null |
test_load_returns_limited_docs
|
"""Test that returns several docs"""
expected_docs = 2
api_client = PubMedAPIWrapper(top_k_results=expected_docs)
docs = api_client.load_docs('ChatGPT')
assert len(docs) == expected_docs
assert_docs(docs)
|
def test_load_returns_limited_docs() ->None:
"""Test that returns several docs"""
expected_docs = 2
api_client = PubMedAPIWrapper(top_k_results=expected_docs)
docs = api_client.load_docs('ChatGPT')
assert len(docs) == expected_docs
assert_docs(docs)
|
Test that returns several docs
|
delete
|
if not ids:
return None
from nuclia.sdk import NucliaResource
factory = NucliaResource()
results: List[bool] = []
for id in ids:
try:
factory.delete(rid=id, url=self.kb_url, api_key=self._config['TOKEN'])
results.append(True)
except ValueError:
results.append(False)
return all(results)
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
if not ids:
return None
from nuclia.sdk import NucliaResource
factory = NucliaResource()
results: List[bool] = []
for id in ids:
try:
factory.delete(rid=id, url=self.kb_url, api_key=self._config[
'TOKEN'])
results.append(True)
except ValueError:
results.append(False)
return all(results)
| null |
test_batch
|
"""Test batch tokens from TritonTensorRTLLM."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
|
@pytest.mark.skip(reason='Need a working Triton server')
def test_batch() ->None:
"""Test batch tokens from TritonTensorRTLLM."""
llm = TritonTensorRTLLM(model_name=_MODEL_NAME)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
|
Test batch tokens from TritonTensorRTLLM.
|
on_text
|
"""
Run when agent is ending.
"""
self.metrics['step'] += 1
self.metrics['text_ctr'] += 1
text_ctr = self.metrics['text_ctr']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_text', 'text': text})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_text_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'on_text_{text_ctr}')
|
def on_text(self, text: str, **kwargs: Any) ->None:
"""
Run when agent is ending.
"""
self.metrics['step'] += 1
self.metrics['text_ctr'] += 1
text_ctr = self.metrics['text_ctr']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_text', 'text': text})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_text_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'on_text_{text_ctr}')
|
Run when agent is ending.
|
_prepare_output
|
parsed_result = _parse_string_eval_output(result[self.output_key])
if RUN_KEY in result:
parsed_result[RUN_KEY] = result[RUN_KEY]
return parsed_result
|
def _prepare_output(self, result: dict) ->dict:
parsed_result = _parse_string_eval_output(result[self.output_key])
if RUN_KEY in result:
parsed_result[RUN_KEY] = result[RUN_KEY]
return parsed_result
| null |
__init__
|
self.fail_starts_with = fail_starts_with
|
def __init__(self, fail_starts_with: str) ->None:
self.fail_starts_with = fail_starts_with
| null |
learn
|
pass
|
def learn(self, event: PickBestEvent) ->None:
pass
| null |
similarity_search_with_score_by_vector
|
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import Search
if 'top_k' in kwargs:
k = kwargs['k']
filter_expression = kwargs.get('filter_expression', None)
response = self._client.search(self.index_name, embedding, top_k=k,
metadata_fields=ALL_METADATA, filter_expression=filter_expression)
if not isinstance(response, Search.Success):
return []
results = []
for hit in response.hits:
text = cast(str, hit.metadata.pop(self.text_field))
doc = Document(page_content=text, metadata=hit.metadata)
pair = doc, hit.score
results.append(pair)
return results
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import Search
if 'top_k' in kwargs:
k = kwargs['k']
filter_expression = kwargs.get('filter_expression', None)
response = self._client.search(self.index_name, embedding, top_k=k,
metadata_fields=ALL_METADATA, filter_expression=filter_expression)
if not isinstance(response, Search.Success):
return []
results = []
for hit in response.hits:
text = cast(str, hit.metadata.pop(self.text_field))
doc = Document(page_content=text, metadata=hit.metadata)
pair = doc, hit.score
results.append(pair)
return results
|
Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
|
output_keys
|
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
|
Return the singular output key.
:meta private:
|
init_qdrant
|
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
qdrant_config = QdrantDocumentIndex.DBConfig(path=':memory:')
qdrant_db = QdrantDocumentIndex[MyDoc](qdrant_config)
qdrant_db.index([MyDoc(title=f'My document {i}', title_embedding=np.array(
embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(embeddings
.embed_query(f'other fake emb {i}')), year=i) for i in range(100)])
filter_query = rest.Filter(must=[rest.FieldCondition(key='year', range=rest
.Range(gte=10, lt=90))])
return qdrant_db, filter_query, embeddings
|
@pytest.fixture
def init_qdrant() ->Tuple[QdrantDocumentIndex, rest.Filter, FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
qdrant_config = QdrantDocumentIndex.DBConfig(path=':memory:')
qdrant_db = QdrantDocumentIndex[MyDoc](qdrant_config)
qdrant_db.index([MyDoc(title=f'My document {i}', title_embedding=np.
array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(
embeddings.embed_query(f'other fake emb {i}')), year=i) for i in
range(100)])
filter_query = rest.Filter(must=[rest.FieldCondition(key='year', range=
rest.Range(gte=10, lt=90))])
return qdrant_db, filter_query, embeddings
| null |
input_keys
|
return ['run', 'example']
|
@property
def input_keys(self) ->List[str]:
return ['run', 'example']
| null |
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.EQ, attribute='tag', value='high')])
expected = (RedisNum('foo') < 2) & ((RedisText('bar') == 'baz') & (RedisTag
('tag') == 'high'))
actual = translator.visit_operation(op)
assert str(expected) == str(actual)
|
def test_visit_operation(translator: RedisTranslator) ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.EQ, attribute='tag', value='high')])
expected = (RedisNum('foo') < 2) & ((RedisText('bar') == 'baz') & (
RedisTag('tag') == 'high'))
actual = translator.visit_operation(op)
assert str(expected) == str(actual)
| null |
train_unsupervised
|
try:
response = requests.post(**self._kwargs_post_fine_tune_request(inputs,
kwargs))
if response.status_code != 200:
raise Exception(
f'Gradient returned an unexpected response with status {response.status_code}: {response.text}'
)
except requests.exceptions.RequestException as e:
raise Exception(f'RequestException while calling Gradient Endpoint: {e}')
response_json = response.json()
loss = response_json['sumLoss'] / response_json['numberOfTrainableTokens']
return TrainResult(loss=loss)
|
def train_unsupervised(self, inputs: Sequence[str], **kwargs: Any
) ->TrainResult:
try:
response = requests.post(**self._kwargs_post_fine_tune_request(
inputs, kwargs))
if response.status_code != 200:
raise Exception(
f'Gradient returned an unexpected response with status {response.status_code}: {response.text}'
)
except requests.exceptions.RequestException as e:
raise Exception(
f'RequestException while calling Gradient Endpoint: {e}')
response_json = response.json()
loss = response_json['sumLoss'] / response_json['numberOfTrainableTokens']
return TrainResult(loss=loss)
| null |
stringify_embedding
|
return ' '.join([f'{i}:{e}' for i, e in enumerate(embedding)])
|
def stringify_embedding(embedding: List) ->str:
return ' '.join([f'{i}:{e}' for i, e in enumerate(embedding)])
| null |
_get_output_messages
|
from langchain_core.messages import BaseMessage
if isinstance(output_val, dict):
output_val = output_val[self.output_messages_key or 'output']
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError()
|
def _get_output_messages(self, output_val: Union[str, BaseMessage, Sequence
[BaseMessage], dict]) ->List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(output_val, dict):
output_val = output_val[self.output_messages_key or 'output']
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError()
| null |
add_texts
|
"""
Add texts to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
if ids is None:
ids = [str(f'{i + 1}') for i, _ in enumerate(texts)]
batch = []
for i, text in enumerate(texts):
fields: Dict[str, Union[str, List[float]]] = {}
if self._page_content_field is not None:
fields[self._page_content_field] = text
if self._embedding_field is not None and embeddings is not None:
fields[self._embedding_field] = embeddings[i]
if metadatas is not None and self._metadata_fields is not None:
for metadata_field in self._metadata_fields:
if metadata_field in metadatas[i]:
fields[metadata_field] = metadatas[i][metadata_field]
batch.append({'id': ids[i], 'fields': fields})
results = self._vespa_app.feed_batch(batch)
for result in results:
if not str(result.status_code).startswith('2'):
raise RuntimeError(
f"Could not add document to Vespa. Error code: {result.status_code}. Message: {result.json['message']}"
)
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""
Add texts to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
if ids is None:
ids = [str(f'{i + 1}') for i, _ in enumerate(texts)]
batch = []
for i, text in enumerate(texts):
fields: Dict[str, Union[str, List[float]]] = {}
if self._page_content_field is not None:
fields[self._page_content_field] = text
if self._embedding_field is not None and embeddings is not None:
fields[self._embedding_field] = embeddings[i]
if metadatas is not None and self._metadata_fields is not None:
for metadata_field in self._metadata_fields:
if metadata_field in metadatas[i]:
fields[metadata_field] = metadatas[i][metadata_field]
batch.append({'id': ids[i], 'fields': fields})
results = self._vespa_app.feed_batch(batch)
for result in results:
if not str(result.status_code).startswith('2'):
raise RuntimeError(
f"Could not add document to Vespa. Error code: {result.status_code}. Message: {result.json['message']}"
)
return ids
|
Add texts to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
|
patch
|
return f'patch {str(data)}'
|
@staticmethod
def patch(url: str, data: Dict[str, Any], **kwargs: Any) ->str:
return f'patch {str(data)}'
| null |
_get_invocation_params
|
"""Get the parameters used to invoke the model."""
return {'model': self.model_name, **super()._get_invocation_params(stop=
stop), **self._default_params, **kwargs}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {'model': self.model_name, **super()._get_invocation_params(stop
=stop), **self._default_params, **kwargs}
|
Get the parameters used to invoke the model.
|
add_dependencies_to_pyproject_toml
|
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding='utf-8') as f:
pyproject: Dict[str, Any] = load(f)
pyproject['tool']['poetry']['dependencies'].update({name:
_get_dep_inline_table(loc.relative_to(pyproject_toml.parent)) for
name, loc in local_editable_dependencies})
with open(pyproject_toml, 'w', encoding='utf-8') as f:
dump(pyproject, f)
|
def add_dependencies_to_pyproject_toml(pyproject_toml: Path,
local_editable_dependencies: Iterable[tuple[str, Path]]) ->None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding='utf-8') as f:
pyproject: Dict[str, Any] = load(f)
pyproject['tool']['poetry']['dependencies'].update({name:
_get_dep_inline_table(loc.relative_to(pyproject_toml.parent)) for
name, loc in local_editable_dependencies})
with open(pyproject_toml, 'w', encoding='utf-8') as f:
dump(pyproject, f)
|
Add dependencies to pyproject.toml.
|
setup
|
collection = get_collection()
collection.delete_many({})
|
@pytest.fixture(autouse=True)
def setup(self) ->None:
collection = get_collection()
collection.delete_many({})
| null |
delete
|
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
assert not (ids is None and where_str is None
), 'You need to specify where to be deleted! Either with `ids` or `where_str`'
conds = []
if ids:
conds.extend([f"{self.config.column_map['id']} = '{id}'" for id in ids])
if where_str:
conds.append(where_str)
assert len(conds) > 0
where_str_final = ' AND '.join(conds)
qstr = (
f'DELETE FROM {self.config.database}.{self.config.table} WHERE {where_str_final}'
)
try:
self.client.command(qstr)
return True
except Exception as e:
logger.error(str(e))
return False
|
def delete(self, ids: Optional[List[str]]=None, where_str: Optional[str]=
None, **kwargs: Any) ->Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
assert not (ids is None and where_str is None
), 'You need to specify where to be deleted! Either with `ids` or `where_str`'
conds = []
if ids:
conds.extend([f"{self.config.column_map['id']} = '{id}'" for id in ids]
)
if where_str:
conds.append(where_str)
assert len(conds) > 0
where_str_final = ' AND '.join(conds)
qstr = (
f'DELETE FROM {self.config.database}.{self.config.table} WHERE {where_str_final}'
)
try:
self.client.command(qstr)
return True
except Exception as e:
logger.error(str(e))
return False
|
Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
|
test_init_with_pipeline_path
|
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path='models/pipeline.pkl').save().to(gpu,
path='models')
llm = SelfHostedPipeline.from_pipeline(pipeline='models/pipeline.pkl',
hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_init_with_pipeline_path() ->None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path='models/pipeline.pkl').save().to(gpu,
path='models')
llm = SelfHostedPipeline.from_pipeline(pipeline='models/pipeline.pkl',
hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test initialization with a self-hosted HF pipeline.
|
_execute_query
|
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
|
def _execute_query(self, query: str) ->Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
|
Execute a GraphQL query and return the results.
|
__init__
|
self.path = Path(path) if path else None
if self.path:
self.path.parent.mkdir(parents=True, exist_ok=True)
|
def __init__(self, path: Optional[Union[str, PathLike]]):
self.path = Path(path) if path else None
if self.path:
self.path.parent.mkdir(parents=True, exist_ok=True)
| null |
similarity_search_by_vector
|
"""Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
res = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
param=param, expr=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
param: Optional[dict]=None, expr: Optional[str]=None, timeout: Optional
[int]=None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
res = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, param=param, expr=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
Perform a similarity search against the query string.
Args:
embedding (List[float]): The embedding vector to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
|
test_manifest_wrapper
|
"""Test manifest wrapper."""
from manifest import Manifest
manifest = Manifest(client_name='openai')
llm = ManifestWrapper(client=manifest, llm_kwargs={'temperature': 0})
output = llm('The capital of New York is:')
assert output == 'Albany'
|
def test_manifest_wrapper() ->None:
"""Test manifest wrapper."""
from manifest import Manifest
manifest = Manifest(client_name='openai')
llm = ManifestWrapper(client=manifest, llm_kwargs={'temperature': 0})
output = llm('The capital of New York is:')
assert output == 'Albany'
|
Test manifest wrapper.
|
_import_google_places_api
|
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
return GooglePlacesAPIWrapper
|
def _import_google_places_api() ->Any:
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
return GooglePlacesAPIWrapper
| null |
__init__
|
self.prefix = prefix
|
def __init__(self, prefix: str=''):
self.prefix = prefix
| null |
test_bes_vector_db
|
"""Test end to end construction and search."""
docsearch = _bes_vector_db_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_bes_vector_db() ->None:
"""Test end to end construction and search."""
docsearch = _bes_vector_db_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
embeddings
|
return self.embedding_function
|
@property
def embeddings(self) ->Embeddings:
return self.embedding_function
| null |
test_bookend_embedding_query
|
"""Test Bookend AI embeddings for query."""
document = 'foo bar'
embedding = BookendEmbeddings(domain='<bookend_domain>', api_token=
'<bookend_api_token>', model_id='<bookend_embeddings_model_id>')
output = embedding.embed_query(document)
assert len(output) == 768
|
def test_bookend_embedding_query() ->None:
"""Test Bookend AI embeddings for query."""
document = 'foo bar'
embedding = BookendEmbeddings(domain='<bookend_domain>', api_token=
'<bookend_api_token>', model_id='<bookend_embeddings_model_id>')
output = embedding.embed_query(document)
assert len(output) == 768
|
Test Bookend AI embeddings for query.
|
output_keys
|
"""Will always return text key.
:meta private:
"""
return []
|
@property
def output_keys(self) ->List[str]:
"""Will always return text key.
:meta private:
"""
return []
|
Will always return text key.
:meta private:
|
test_scann_with_metadatas_and_filter
|
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={'page':
1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz',
metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1, filter={'page': 1})
assert output == [Document(page_content='bar', metadata={'page': 1})]
|
def test_scann_with_metadatas_and_filter() ->None:
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={
'page': 1}), docsearch.index_to_docstore_id[2]: Document(
page_content='baz', metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1, filter={'page': 1})
assert output == [Document(page_content='bar', metadata={'page': 1})]
| null |
from_documents
|
"""Create an Astra DB vectorstore from a document list.
Utility method that defers to 'from_texts' (see that one).
Args: see 'from_texts', except here you have to supply 'documents'
in place of 'texts' and 'metadatas'.
Returns:
an `AstraDB` vectorstore.
"""
return super().from_documents(documents, embedding, **kwargs)
|
@classmethod
def from_documents(cls: Type[ADBVST], documents: List[Document], embedding:
Embeddings, **kwargs: Any) ->ADBVST:
"""Create an Astra DB vectorstore from a document list.
Utility method that defers to 'from_texts' (see that one).
Args: see 'from_texts', except here you have to supply 'documents'
in place of 'texts' and 'metadatas'.
Returns:
an `AstraDB` vectorstore.
"""
return super().from_documents(documents, embedding, **kwargs)
|
Create an Astra DB vectorstore from a document list.
Utility method that defers to 'from_texts' (see that one).
Args: see 'from_texts', except here you have to supply 'documents'
in place of 'texts' and 'metadatas'.
Returns:
an `AstraDB` vectorstore.
|
_create_message_dicts
|
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [_convert_message_to_mistral_chat_message(m) for m in messages]
return message_dicts, params
|
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional
[List[str]]) ->Tuple[List[MistralChatMessage], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [_convert_message_to_mistral_chat_message(m) for m in
messages]
return message_dicts, params
| null |
InputType
|
return Union[str, AnyMessage]
|
@property
def InputType(self) ->Any:
return Union[str, AnyMessage]
| null |
test_summary_buffer_memory_buffer_only
|
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key='baz')
memory.save_context({'input': 'bar'}, {'output': 'foo'})
assert memory.buffer == ["""Human: bar
AI: foo"""]
output = memory.load_memory_variables({})
assert output == {'baz': """Human: bar
AI: foo"""}
|
def test_summary_buffer_memory_buffer_only() ->None:
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key='baz')
memory.save_context({'input': 'bar'}, {'output': 'foo'})
assert memory.buffer == ['Human: bar\nAI: foo']
output = memory.load_memory_variables({})
assert output == {'baz': 'Human: bar\nAI: foo'}
|
Test ConversationSummaryBufferMemory when only buffer.
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
parse
|
text = text.strip()
start = text.upper().find('SELECT')
if start >= 0:
end = text.upper().find('FROM')
text = text.replace(text[start + len('SELECT') + 1:end - 1], '*')
return super().parse(text)
|
def parse(self, text: str) ->str:
text = text.strip()
start = text.upper().find('SELECT')
if start >= 0:
end = text.upper().find('FROM')
text = text.replace(text[start + len('SELECT') + 1:end - 1], '*')
return super().parse(text)
| null |
_import_azure_cosmos_db
|
from langchain_community.vectorstores.azure_cosmos_db import AzureCosmosDBVectorSearch
return AzureCosmosDBVectorSearch
|
def _import_azure_cosmos_db() ->Any:
from langchain_community.vectorstores.azure_cosmos_db import AzureCosmosDBVectorSearch
return AzureCosmosDBVectorSearch
| null |
test_api_key_masked_when_passed_from_env
|
monkeypatch.setenv('ALEPH_ALPHA_API_KEY', 'secret-api-key')
llm = AlephAlpha()
print(llm.aleph_alpha_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('aleph_alpha_client')
def test_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
monkeypatch.setenv('ALEPH_ALPHA_API_KEY', 'secret-api-key')
llm = AlephAlpha()
print(llm.aleph_alpha_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
_form_documents
|
"""Format context from this conversation to buffer."""
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [f'{k}: {v}' for k, v in list(filtered_inputs.items()) + list(
outputs.items())]
page_content = '\n'.join(texts)
return [Document(page_content=page_content)]
|
def _form_documents(self, inputs: Dict[str, Any], outputs: Dict[str, str]
) ->List[Document]:
"""Format context from this conversation to buffer."""
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [f'{k}: {v}' for k, v in list(filtered_inputs.items()) + list(
outputs.items())]
page_content = '\n'.join(texts)
return [Document(page_content=page_content)]
|
Format context from this conversation to buffer.
|
load
|
return list(self.lazy_load())
|
def load(self) ->List[Document]:
return list(self.lazy_load())
| null |
test_pickbest_textembedder_more_namespaces_w_full_label_no_emb
|
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
named_actions = {'action1': [{'a': '0', 'b': '0'}, '1', '2']}
context = {'context1': 'context1', 'context2': 'context2'}
expected = """shared |context1 context1 |context2 context2
0:-0.0:1.0 |a 0 |b 0
|action1 1
|action1 2 """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0
)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
named_actions = {'action1': [{'a': '0', 'b': '0'}, '1', '2']}
context = {'context1': 'context1', 'context2': 'context2'}
expected = """shared |context1 context1 |context2 context2
0:-0.0:1.0 |a 0 |b 0
|action1 1
|action1 2 """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0,
score=0.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
| null |
_convert_delta_to_message_chunk
|
"""Convert a delta response to a message chunk."""
role = _dict.role
content = _dict.content or ''
additional_kwargs: Dict = {}
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict.name)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
|
def _convert_delta_to_message_chunk(_dict: Any, default_class: Type[
BaseMessageChunk]) ->BaseMessageChunk:
"""Convert a delta response to a message chunk."""
role = _dict.role
content = _dict.content or ''
additional_kwargs: Dict = {}
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=
additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict.name)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
|
Convert a delta response to a message chunk.
|
test_konko_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
chat = ChatKonko(openai_api_key='test-openai-key', konko_api_key=
'test-konko-key')
print(chat.konko_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.konko_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_konko_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
chat = ChatKonko(openai_api_key='test-openai-key', konko_api_key=
'test-konko-key')
print(chat.konko_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.konko_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
manager
|
"""Initialize the test database and yield the TimestampedSet instance."""
record_manager = SQLRecordManager('kittens', db_url='sqlite:///:memory:')
record_manager.create_schema()
return record_manager
|
@pytest.fixture()
def manager() ->SQLRecordManager:
"""Initialize the test database and yield the TimestampedSet instance."""
record_manager = SQLRecordManager('kittens', db_url='sqlite:///:memory:')
record_manager.create_schema()
return record_manager
|
Initialize the test database and yield the TimestampedSet instance.
|
type
|
self.click(id)
self.page.keyboard.type(text)
|
def type(self, id: Union[str, int], text: str) ->None:
self.click(id)
self.page.keyboard.type(text)
| null |
invoke
|
config = ensure_config(config)
return cast(ChatGeneration, self.generate_prompt([self._convert_input(input
)], stop=stop, callbacks=config.get('callbacks'), tags=config.get(
'tags'), metadata=config.get('metadata'), run_name=config.get(
'run_name'), **kwargs).generations[0][0]).message
|
def invoke(self, input: LanguageModelInput, config: Optional[RunnableConfig
]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->BaseMessage:
config = ensure_config(config)
return cast(ChatGeneration, self.generate_prompt([self._convert_input(
input)], stop=stop, callbacks=config.get('callbacks'), tags=config.
get('tags'), metadata=config.get('metadata'), run_name=config.get(
'run_name'), **kwargs).generations[0][0]).message
| null |
test_public_api
|
"""Test for changes in the public API."""
expected_all = ['Run']
assert sorted(schemas_all) == expected_all
for module_name in expected_all:
assert hasattr(schemas, module_name) and getattr(schemas, module_name
) is not None
|
def test_public_api() ->None:
"""Test for changes in the public API."""
expected_all = ['Run']
assert sorted(schemas_all) == expected_all
for module_name in expected_all:
assert hasattr(schemas, module_name) and getattr(schemas, module_name
) is not None
|
Test for changes in the public API.
|
__init__
|
"""A generic document loader.
Args:
blob_loader: A blob loader which knows how to yield blobs
blob_parser: A blob parser which knows how to parse blobs into documents
"""
self.blob_loader = blob_loader
self.blob_parser = blob_parser
|
def __init__(self, blob_loader: BlobLoader, blob_parser: BaseBlobParser
) ->None:
"""A generic document loader.
Args:
blob_loader: A blob loader which knows how to yield blobs
blob_parser: A blob parser which knows how to parse blobs into documents
"""
self.blob_loader = blob_loader
self.blob_parser = blob_parser
|
A generic document loader.
Args:
blob_loader: A blob loader which knows how to yield blobs
blob_parser: A blob parser which knows how to parse blobs into documents
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.