method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_get_schema
|
from openapi_pydantic import Reference, Schema
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f'Error dereferencing schema: {schema}')
return schema
|
@staticmethod
def _get_schema(parameter: Parameter, spec: OpenAPISpec) ->Optional[Schema]:
from openapi_pydantic import Reference, Schema
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f'Error dereferencing schema: {schema}')
return schema
| null |
test_search
|
"""
Test that `foo` is closest to `foo`
Here k is 1
"""
output = self.vectorstore.similarity_search(query='foo', k=1, metadatas=[
'author', 'category'])
assert output[0].page_content == 'foo'
assert output[0].metadata['author'] == 'Adam'
assert output[0].metadata['category'] == 'Music'
assert len(output) == 1
|
def test_search(self) ->None:
"""
Test that `foo` is closest to `foo`
Here k is 1
"""
output = self.vectorstore.similarity_search(query='foo', k=1, metadatas
=['author', 'category'])
assert output[0].page_content == 'foo'
assert output[0].metadata['author'] == 'Adam'
assert output[0].metadata['category'] == 'Music'
assert len(output) == 1
|
Test that `foo` is closest to `foo`
Here k is 1
|
test_pypdf_loader
|
"""Test PyPDFLoader."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf'
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
|
def test_pypdf_loader() ->None:
"""Test PyPDFLoader."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__
).parent.parent / 'examples/layout-parser-paper.pdf'
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
|
Test PyPDFLoader.
|
output_keys
|
"""
Get the output keys.
Returns:
List[str]: The output keys.
"""
return ['score']
|
@property
def output_keys(self) ->List[str]:
"""
Get the output keys.
Returns:
List[str]: The output keys.
"""
return ['score']
|
Get the output keys.
Returns:
List[str]: The output keys.
|
__init__
|
self.message = message
super().__init__(self.message)
|
def __init__(self, message: str=
'The prompt contains toxic content and cannot be processed'):
self.message = message
super().__init__(self.message)
| null |
test_myscale_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = 'test_myscale_with_metadatas'
docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(),
config=config, metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
docsearch.drop()
|
def test_myscale_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
config = MyScaleSettings()
config.table = 'test_myscale_with_metadatas'
docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(),
config=config, metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
docsearch.drop()
|
Test end to end construction and search.
|
__read_file
|
docs = []
csv_reader = csv.DictReader(csvfile, **self.csv_args)
for i, row in enumerate(csv_reader):
try:
source = row[self.source_column
] if self.source_column is not None else self.file_path
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file.")
content = '\n'.join(f'{k.strip()}: {v.strip() if v is not None else v}' for
k, v in row.items() if k not in self.metadata_columns)
metadata = {'source': source, 'row': i}
for col in self.metadata_columns:
try:
metadata[col] = row[col]
except KeyError:
raise ValueError(f"Metadata column '{col}' not found in CSV file.")
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
|
def __read_file(self, csvfile: TextIOWrapper) ->List[Document]:
docs = []
csv_reader = csv.DictReader(csvfile, **self.csv_args)
for i, row in enumerate(csv_reader):
try:
source = row[self.source_column
] if self.source_column is not None else self.file_path
except KeyError:
raise ValueError(
f"Source column '{self.source_column}' not found in CSV file.")
content = '\n'.join(
f'{k.strip()}: {v.strip() if v is not None else v}' for k, v in
row.items() if k not in self.metadata_columns)
metadata = {'source': source, 'row': i}
for col in self.metadata_columns:
try:
metadata[col] = row[col]
except KeyError:
raise ValueError(
f"Metadata column '{col}' not found in CSV file.")
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
| null |
embed_query
|
"""Embed query text."""
embedding = embed_with_retry(self, self.model_name, text)
return embedding['embedding']
|
def embed_query(self, text: str) ->List[float]:
"""Embed query text."""
embedding = embed_with_retry(self, self.model_name, text)
return embedding['embedding']
|
Embed query text.
|
lazy_parse
|
"""Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs
the blob.data is taken
"""
url_parse_result = urlparse(str(blob.path)) if blob.path else None
if url_parse_result and url_parse_result.scheme == 's3' and url_parse_result.netloc:
textract_response_json = self.tc.call_textract(input_document=str(blob.
path), features=self.textract_features, boto3_textract_client=self.
boto3_textract_client)
else:
textract_response_json = self.tc.call_textract(input_document=blob.
as_bytes(), features=self.textract_features, call_mode=self.tc.
Textract_Call_Mode.FORCE_SYNC, boto3_textract_client=self.
boto3_textract_client)
document = self.textractor.Document.open(textract_response_json)
linearizer_config = self.textractor.TextLinearizationConfig(hide_figure_layout
=True, title_prefix='# ', section_header_prefix='## ',
list_element_prefix='*')
for idx, page in enumerate(document.pages):
yield Document(page_content=page.get_text(config=linearizer_config),
metadata={'source': blob.source, 'page': idx + 1})
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs
the blob.data is taken
"""
url_parse_result = urlparse(str(blob.path)) if blob.path else None
if (url_parse_result and url_parse_result.scheme == 's3' and
url_parse_result.netloc):
textract_response_json = self.tc.call_textract(input_document=str(
blob.path), features=self.textract_features,
boto3_textract_client=self.boto3_textract_client)
else:
textract_response_json = self.tc.call_textract(input_document=blob.
as_bytes(), features=self.textract_features, call_mode=self.tc.
Textract_Call_Mode.FORCE_SYNC, boto3_textract_client=self.
boto3_textract_client)
document = self.textractor.Document.open(textract_response_json)
linearizer_config = self.textractor.TextLinearizationConfig(
hide_figure_layout=True, title_prefix='# ', section_header_prefix=
'## ', list_element_prefix='*')
for idx, page in enumerate(document.pages):
yield Document(page_content=page.get_text(config=linearizer_config),
metadata={'source': blob.source, 'page': idx + 1})
|
Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs
the blob.data is taken
|
on_llm_end
|
"""Log records to deepeval when an LLM ends."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from deepeval.metrics.bias_classifier import UnBiasedMetric
from deepeval.metrics.metric import Metric
from deepeval.metrics.toxic_classifier import NonToxicMetric
for metric in self.metrics:
for i, generation in enumerate(response.generations):
output = generation[0].text
query = self.prompts[i]
if isinstance(metric, AnswerRelevancy):
result = metric.measure(output=output, query=query)
print(f'Answer Relevancy: {result}')
elif isinstance(metric, UnBiasedMetric):
score = metric.measure(output)
print(f'Bias Score: {score}')
elif isinstance(metric, NonToxicMetric):
score = metric.measure(output)
print(f'Toxic Score: {score}')
else:
raise ValueError(
f"""Metric {metric.__name__} is not supported by deepeval
callbacks."""
)
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Log records to deepeval when an LLM ends."""
from deepeval.metrics.answer_relevancy import AnswerRelevancy
from deepeval.metrics.bias_classifier import UnBiasedMetric
from deepeval.metrics.metric import Metric
from deepeval.metrics.toxic_classifier import NonToxicMetric
for metric in self.metrics:
for i, generation in enumerate(response.generations):
output = generation[0].text
query = self.prompts[i]
if isinstance(metric, AnswerRelevancy):
result = metric.measure(output=output, query=query)
print(f'Answer Relevancy: {result}')
elif isinstance(metric, UnBiasedMetric):
score = metric.measure(output)
print(f'Bias Score: {score}')
elif isinstance(metric, NonToxicMetric):
score = metric.measure(output)
print(f'Toxic Score: {score}')
else:
raise ValueError(
f"""Metric {metric.__name__} is not supported by deepeval
callbacks."""
)
|
Log records to deepeval when an LLM ends.
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
from_texts
|
"""Create Myscale wrapper with existing texts
Args:
texts (Iterable[str]): List or tuple of strings to be added
embedding (Embeddings): Function to extract text embedding
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
|
@classmethod
def from_texts(cls, texts: Iterable[str], embedding: Embeddings, metadatas:
Optional[List[Dict[Any, Any]]]=None, config: Optional[MyScaleSettings]=
None, text_ids: Optional[Iterable[str]]=None, batch_size: int=32, **
kwargs: Any) ->MyScale:
"""Create Myscale wrapper with existing texts
Args:
texts (Iterable[str]): List or tuple of strings to be added
embedding (Embeddings): Function to extract text embedding
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=
metadatas)
return ctx
|
Create Myscale wrapper with existing texts
Args:
texts (Iterable[str]): List or tuple of strings to be added
embedding (Embeddings): Function to extract text embedding
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
|
_stream
|
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {'prompt': prompt, **self._default_params, **kwargs}
if stop:
params['stop_sequences'] = stop
stream_resp = self.client.completions.create(**params, stream=True)
for data in stream_resp:
delta = data.completion
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {'prompt': prompt, **self._default_params, **
kwargs}
if stop:
params['stop_sequences'] = stop
stream_resp = self.client.completions.create(**params, stream=True)
for data in stream_resp:
delta = data.completion
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
| null |
example_notebook_path
|
current_dir = pathlib.Path(__file__).parent
return os.path.join(current_dir, 'sample_documents', notebook_name)
|
@staticmethod
def example_notebook_path(notebook_name: str) ->str:
current_dir = pathlib.Path(__file__).parent
return os.path.join(current_dir, 'sample_documents', notebook_name)
| null |
parse
|
"""Parse the output of the language model."""
text = text.upper()
if 'INVALID' in text:
return ThoughtValidity.INVALID
elif 'INTERMEDIATE' in text:
return ThoughtValidity.VALID_INTERMEDIATE
elif 'VALID' in text:
return ThoughtValidity.VALID_FINAL
else:
return ThoughtValidity.INVALID
|
def parse(self, text: str) ->ThoughtValidity:
"""Parse the output of the language model."""
text = text.upper()
if 'INVALID' in text:
return ThoughtValidity.INVALID
elif 'INTERMEDIATE' in text:
return ThoughtValidity.VALID_INTERMEDIATE
elif 'VALID' in text:
return ThoughtValidity.VALID_FINAL
else:
return ThoughtValidity.INVALID
|
Parse the output of the language model.
|
create_multi_vector_retriever
|
"""
Create retriever that indexes summaries, but returns raw images or texts
:param vectorstore: Vectorstore to store embedded image sumamries
:param image_summaries: Image summaries
:param images: Base64 encoded images
:return: Retriever
"""
store = LocalFileStore(str(Path(__file__).parent /
'multi_vector_retriever_metadata'))
id_key = 'doc_id'
retriever = MultiVectorRetriever(vectorstore=vectorstore, byte_store=store,
id_key=id_key)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [Document(page_content=s, metadata={id_key: doc_ids[i]}) for
i, s in enumerate(doc_summaries)]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
add_documents(retriever, image_summaries, images)
return retriever
|
def create_multi_vector_retriever(vectorstore, image_summaries, images):
"""
Create retriever that indexes summaries, but returns raw images or texts
:param vectorstore: Vectorstore to store embedded image sumamries
:param image_summaries: Image summaries
:param images: Base64 encoded images
:return: Retriever
"""
store = LocalFileStore(str(Path(__file__).parent /
'multi_vector_retriever_metadata'))
id_key = 'doc_id'
retriever = MultiVectorRetriever(vectorstore=vectorstore, byte_store=
store, id_key=id_key)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [Document(page_content=s, metadata={id_key: doc_ids[
i]}) for i, s in enumerate(doc_summaries)]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
add_documents(retriever, image_summaries, images)
return retriever
|
Create retriever that indexes summaries, but returns raw images or texts
:param vectorstore: Vectorstore to store embedded image sumamries
:param image_summaries: Image summaries
:param images: Base64 encoded images
:return: Retriever
|
test_tracer_tool_run
|
"""Test tracer on a Tool run."""
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time':
datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now(
timezone.utc)}], extra={}, execution_order=1, child_execution_order=1,
serialized={'name': 'tool'}, inputs={'input': 'test'}, outputs={
'output': 'test'}, error=None, run_type='tool', trace_id=uuid,
dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test', run_id=uuid
)
tracer.on_tool_end('test', run_id=uuid)
assert tracer.runs == [compare_run]
|
@freeze_time('2023-01-01')
def test_tracer_tool_run() ->None:
"""Test tracer on a Tool run."""
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start',
'time': datetime.now(timezone.utc)}, {'name': 'end', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized={'name': 'tool'}, inputs={
'input': 'test'}, outputs={'output': 'test'}, error=None, run_type=
'tool', trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test',
run_id=uuid)
tracer.on_tool_end('test', run_id=uuid)
assert tracer.runs == [compare_run]
|
Test tracer on a Tool run.
|
get_token_ids_anthropic
|
"""Get the token ids for a string of text."""
client = _get_anthropic_client()
tokenizer = client.get_tokenizer()
encoded_text = tokenizer.encode(text)
return encoded_text.ids
|
def get_token_ids_anthropic(text: str) ->List[int]:
"""Get the token ids for a string of text."""
client = _get_anthropic_client()
tokenizer = client.get_tokenizer()
encoded_text = tokenizer.encode(text)
return encoded_text.ids
|
Get the token ids for a string of text.
|
_llm_type
|
"""Return type of llm."""
return 'nebula'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'nebula'
|
Return type of llm.
|
exists
|
return key in self.store
|
def exists(self, key: str) ->bool:
return key in self.store
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
embaas_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'embaas_api_key', 'EMBAAS_API_KEY'))
values['embaas_api_key'] = embaas_api_key
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'embaas_api_key', 'EMBAAS_API_KEY'))
values['embaas_api_key'] = embaas_api_key
return values
|
Validate that api key and python package exists in environment.
|
_call
|
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ImportError(
'Could not import pipeline-ai python package. Please install it with `pip install pipeline-ai`.'
)
client = PipelineCloud(token=self.pipeline_api_key.get_secret_value())
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f'A pipeline run should have a `result_preview` attribute.Run was: {run}'
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call to Pipeline Cloud endpoint."""
try:
from pipeline import PipelineCloud
except ImportError:
raise ImportError(
'Could not import pipeline-ai python package. Please install it with `pip install pipeline-ai`.'
)
client = PipelineCloud(token=self.pipeline_api_key.get_secret_value())
params = self.pipeline_kwargs or {}
params = {**params, **kwargs}
run = client.run_pipeline(self.pipeline_key, [prompt, params])
try:
text = run.result_preview[0][0]
except AttributeError:
raise AttributeError(
f'A pipeline run should have a `result_preview` attribute.Run was: {run}'
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call to Pipeline Cloud endpoint.
|
texts
|
documents = TextLoader(os.path.join(os.path.dirname(__file__), 'fixtures',
'sharks.txt')).load()
yield [doc.page_content for doc in documents]
|
@pytest.fixture(scope='function')
def texts() ->Generator[List[str], None, None]:
documents = TextLoader(os.path.join(os.path.dirname(__file__),
'fixtures', 'sharks.txt')).load()
yield [doc.page_content for doc in documents]
| null |
test_vertexai_single_call_fails_no_message
|
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert str(exc_info.value
) == 'You should provide at least one message to start the chat!'
|
def test_vertexai_single_call_fails_no_message() ->None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert str(exc_info.value
) == 'You should provide at least one message to start the chat!'
| null |
_call
|
"""Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
"""
model = self.client.get_model(self.model_uid)
generate_config: 'LlamaCppGenerateConfig' = kwargs.get('generate_config', {})
generate_config = {**self.model_kwargs, **generate_config}
if stop:
generate_config['stop'] = stop
if generate_config and generate_config.get('stream'):
combined_text_output = ''
for token in self._stream_generate(model=model, prompt=prompt,
run_manager=run_manager, generate_config=generate_config):
combined_text_output += token
return combined_text_output
else:
completion = model.generate(prompt=prompt, generate_config=generate_config)
return completion['choices'][0]['text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
"""
model = self.client.get_model(self.model_uid)
generate_config: 'LlamaCppGenerateConfig' = kwargs.get('generate_config',
{})
generate_config = {**self.model_kwargs, **generate_config}
if stop:
generate_config['stop'] = stop
if generate_config and generate_config.get('stream'):
combined_text_output = ''
for token in self._stream_generate(model=model, prompt=prompt,
run_manager=run_manager, generate_config=generate_config):
combined_text_output += token
return combined_text_output
else:
completion = model.generate(prompt=prompt, generate_config=
generate_config)
return completion['choices'][0]['text']
|
Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Zep determines this automatically and this parameter is
ignored.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
metadata: Optional, metadata to filter the resulting set of retrieved docs
Returns:
List of Documents selected by maximal marginal relevance.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
if not self._collection.is_auto_embedded and self._embedding:
query_vector = self._embedding.embed_query(query)
results = self._collection.search(embedding=query_vector, limit=k,
metadata=metadata, search_type='mmr', mmr_lambda=lambda_mult, **kwargs)
else:
results, query_vector = self._collection.search_return_query_vector(query,
limit=k, metadata=metadata, search_type='mmr', mmr_lambda=
lambda_mult, **kwargs)
return [Document(page_content=d.content, metadata=d.metadata) for d in results]
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, metadata: Optional[Dict[str, Any]]=None, **
kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Zep determines this automatically and this parameter is
ignored.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
metadata: Optional, metadata to filter the resulting set of retrieved docs
Returns:
List of Documents selected by maximal marginal relevance.
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
if not self._collection.is_auto_embedded and self._embedding:
query_vector = self._embedding.embed_query(query)
results = self._collection.search(embedding=query_vector, limit=k,
metadata=metadata, search_type='mmr', mmr_lambda=lambda_mult,
**kwargs)
else:
results, query_vector = self._collection.search_return_query_vector(
query, limit=k, metadata=metadata, search_type='mmr',
mmr_lambda=lambda_mult, **kwargs)
return [Document(page_content=d.content, metadata=d.metadata) for d in
results]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Zep determines this automatically and this parameter is
ignored.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
metadata: Optional, metadata to filter the resulting set of retrieved docs
Returns:
List of Documents selected by maximal marginal relevance.
|
input_keys
|
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
Return the input keys.
:meta private:
|
_import_semadb
|
from langchain_community.vectorstores.semadb import SemaDB
return SemaDB
|
def _import_semadb() ->Any:
from langchain_community.vectorstores.semadb import SemaDB
return SemaDB
| null |
_Index
|
self.dispatch(t.value)
|
def _Index(self, t):
self.dispatch(t.value)
| null |
input_keys
|
"""Input keys.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key]
|
Input keys.
:meta private:
|
_get_mock_psychic_loader
|
psychic_loader = PsychicLoader(api_key=self.MOCK_API_KEY, connector_id=self
.MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID)
psychic_loader.psychic = mock_psychic
return psychic_loader
|
def _get_mock_psychic_loader(self, mock_psychic: MagicMock) ->PsychicLoader:
psychic_loader = PsychicLoader(api_key=self.MOCK_API_KEY, connector_id=
self.MOCK_CONNECTOR_ID, account_id=self.MOCK_ACCOUNT_ID)
psychic_loader.psychic = mock_psychic
return psychic_loader
| null |
to_string
|
"""Return prompt as string."""
return self.text
|
def to_string(self) ->str:
"""Return prompt as string."""
return self.text
|
Return prompt as string.
|
test_scann_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={'page':
1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz',
metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_scann_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={
'page': 1}), docsearch.index_to_docstore_id[2]: Document(
page_content='baz', metadata={'page': 2})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and search.
|
__init__
|
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
http_auth = kwargs.get('http_auth')
self.is_aoss = _is_aoss_enabled(http_auth=http_auth)
self.client = _get_opensearch_client(opensearch_url, **kwargs)
self.engine = kwargs.get('engine')
|
def __init__(self, opensearch_url: str, index_name: str, embedding_function:
Embeddings, **kwargs: Any):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
http_auth = kwargs.get('http_auth')
self.is_aoss = _is_aoss_enabled(http_auth=http_auth)
self.client = _get_opensearch_client(opensearch_url, **kwargs)
self.engine = kwargs.get('engine')
|
Initialize with necessary components.
|
_to_args_and_kwargs
|
if isinstance(tool_input, str):
return (tool_input,), {}
else:
return (), tool_input
|
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) ->Tuple[Tuple, Dict
]:
if isinstance(tool_input, str):
return (tool_input,), {}
else:
return (), tool_input
| null |
_import_tencentvectordb
|
from langchain_community.vectorstores.tencentvectordb import TencentVectorDB
return TencentVectorDB
|
def _import_tencentvectordb() ->Any:
from langchain_community.vectorstores.tencentvectordb import TencentVectorDB
return TencentVectorDB
| null |
raise_deprecation
|
if 'llm' in values:
warnings.warn(
'Directly instantiating an LLMBashChain with an llm is deprecated. Please instantiate with llm_chain or using the from_llm class method.'
)
if 'llm_chain' not in values and values['llm'] is not None:
prompt = values.get('prompt', PROMPT)
values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt)
return values
|
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) ->Dict:
if 'llm' in values:
warnings.warn(
'Directly instantiating an LLMBashChain with an llm is deprecated. Please instantiate with llm_chain or using the from_llm class method.'
)
if 'llm_chain' not in values and values['llm'] is not None:
prompt = values.get('prompt', PROMPT)
values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt)
return values
| null |
_get_board
|
board = next((b for b in self.client.list_boards() if b.name == self.
board_name), None)
if not board:
raise ValueError(f'Board `{self.board_name}` not found.')
return board
|
def _get_board(self) ->Board:
board = next((b for b in self.client.list_boards() if b.name == self.
board_name), None)
if not board:
raise ValueError(f'Board `{self.board_name}` not found.')
return board
| null |
from_texts
|
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
try:
from weaviate.util import get_valid_uuid
except ImportError as e:
raise ImportError(
'Could not import weaviate python package. Please install it with `pip install weaviate-client`'
) from e
client = client or _create_weaviate_client(url=weaviate_url, api_key=
weaviate_api_key)
if batch_size:
client.batch.configure(batch_size=batch_size)
index_name = index_name or f'LangChain_{uuid4().hex}'
schema = _default_schema(index_name)
if not client.schema.exists(index_name):
client.schema.create_class(schema)
embeddings = embedding.embed_documents(texts) if embedding else None
attributes = list(metadatas[0].keys()) if metadatas else None
if 'uuids' in kwargs:
uuids = kwargs.pop('uuids')
else:
uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))]
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {text_key: text}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = uuids[i]
params = {'uuid': _id, 'data_object': data_properties, 'class_name':
index_name}
if embeddings is not None:
params['vector'] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(client, index_name, text_key, embedding=embedding, attributes=
attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, **
kwargs)
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, *, client: Optional[weaviate.Client]=None,
weaviate_url: Optional[str]=None, weaviate_api_key: Optional[str]=None,
batch_size: Optional[int]=None, index_name: Optional[str]=None,
text_key: str='text', by_text: bool=False, relevance_score_fn: Optional
[Callable[[float], float]]=_default_score_normalizer, **kwargs: Any
) ->Weaviate:
"""Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
try:
from weaviate.util import get_valid_uuid
except ImportError as e:
raise ImportError(
'Could not import weaviate python package. Please install it with `pip install weaviate-client`'
) from e
client = client or _create_weaviate_client(url=weaviate_url, api_key=
weaviate_api_key)
if batch_size:
client.batch.configure(batch_size=batch_size)
index_name = index_name or f'LangChain_{uuid4().hex}'
schema = _default_schema(index_name)
if not client.schema.exists(index_name):
client.schema.create_class(schema)
embeddings = embedding.embed_documents(texts) if embedding else None
attributes = list(metadatas[0].keys()) if metadatas else None
if 'uuids' in kwargs:
uuids = kwargs.pop('uuids')
else:
uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))]
with client.batch as batch:
for i, text in enumerate(texts):
data_properties = {text_key: text}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = uuids[i]
params = {'uuid': _id, 'data_object': data_properties,
'class_name': index_name}
if embeddings is not None:
params['vector'] = embeddings[i]
batch.add_data_object(**params)
batch.flush()
return cls(client, index_name, text_key, embedding=embedding,
attributes=attributes, relevance_score_fn=relevance_score_fn,
by_text=by_text, **kwargs)
|
Construct Weaviate wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Weaviate instance.
3. Adds the documents to the newly created Weaviate index.
This is intended to be a quick way to get started.
Args:
texts: Texts to add to vector store.
embedding: Text embedding model to use.
metadatas: Metadata associated with each text.
client: weaviate.Client to use.
weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it
from the ``Details`` tab. Can be passed in as a named param or by
setting the environment variable ``WEAVIATE_URL``. Should not be
specified if client is provided.
weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud
Services, get it from ``Details`` tab. Can be passed in as a named param
or by setting the environment variable ``WEAVIATE_API_KEY``. Should
not be specified if client is provided.
batch_size: Size of batch operations.
index_name: Index name.
text_key: Key to use for uploading/retrieving text to/from vectorstore.
by_text: Whether to search by text or by embedding.
relevance_score_fn: Function for converting whatever distance function the
vector store uses to a relevance score, which is a normalized similarity
score (0 means dissimilar, 1 means similar).
**kwargs: Additional named parameters to pass to ``Weaviate.__init__()``.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Weaviate
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
|
from_names_and_descriptions
|
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
for description in descriptions:
documents.append(Document(page_content=description, metadata={
'name': name}))
vectorstore = vectorstore_cls.from_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
|
@classmethod
def from_names_and_descriptions(cls, names_and_descriptions: Sequence[Tuple
[str, Sequence[str]]], vectorstore_cls: Type[VectorStore], embeddings:
Embeddings, **kwargs: Any) ->EmbeddingRouterChain:
"""Convenience constructor."""
documents = []
for name, descriptions in names_and_descriptions:
for description in descriptions:
documents.append(Document(page_content=description, metadata={
'name': name}))
vectorstore = vectorstore_cls.from_documents(documents, embeddings)
return cls(vectorstore=vectorstore, **kwargs)
|
Convenience constructor.
|
_block_back_door_paths
|
intervention_entities = [entity_setting.name for entity_setting in self.
intervention.entity_settings]
for entity in self.causal_operations.entities:
if entity.name in intervention_entities:
entity.depends_on = []
entity.code = 'pass'
|
def _block_back_door_paths(self) ->None:
intervention_entities = [entity_setting.name for entity_setting in self
.intervention.entity_settings]
for entity in self.causal_operations.entities:
if entity.name in intervention_entities:
entity.depends_on = []
entity.code = 'pass'
| null |
format_tool_to_openai_tool
|
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {'type': 'function', 'function': function}
|
def format_tool_to_openai_tool(tool: BaseTool) ->ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {'type': 'function', 'function': function}
|
Format tool into the OpenAI function API.
|
search_index
|
return self._vector_store
|
@property
def search_index(self) ->TigrisVectorStore:
return self._vector_store
| null |
get_all_tool_names
|
"""Get a list of all possible tool names."""
return list(_BASE_TOOLS) + list(_EXTRA_OPTIONAL_TOOLS) + list(_EXTRA_LLM_TOOLS
) + list(_LLM_TOOLS)
|
def get_all_tool_names() ->List[str]:
"""Get a list of all possible tool names."""
return list(_BASE_TOOLS) + list(_EXTRA_OPTIONAL_TOOLS) + list(
_EXTRA_LLM_TOOLS) + list(_LLM_TOOLS)
|
Get a list of all possible tool names.
|
_call
|
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
params = {**_model_kwargs, **kwargs}
response = self.client(inputs=prompt, params=params)
if 'error' in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == 'text-generation':
text = response[0]['generated_text'][len(prompt):]
elif self.client.task == 'text2text-generation':
text = response[0]['generated_text']
elif self.client.task == 'summarization':
text = response[0]['summary_text']
else:
raise ValueError(
f'Got invalid task {self.client.task}, currently only {VALID_TASKS} are supported'
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
params = {**_model_kwargs, **kwargs}
response = self.client(inputs=prompt, params=params)
if 'error' in response:
raise ValueError(f"Error raised by inference API: {response['error']}")
if self.client.task == 'text-generation':
text = response[0]['generated_text'][len(prompt):]
elif self.client.task == 'text2text-generation':
text = response[0]['generated_text']
elif self.client.task == 'summarization':
text = response[0]['summary_text']
else:
raise ValueError(
f'Got invalid task {self.client.task}, currently only {VALID_TASKS} are supported'
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
|
similarity_search_by_vector
|
"""Return docs most similar to the embedding.
Args:
embedding: Embedding to look up documents similar to.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_by_vector_with_score(embedding, k=
k, filter=filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[List[Namespace]]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to the embedding.
Args:
embedding: Embedding to look up documents similar to.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_by_vector_with_score(embedding,
k=k, filter=filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to the embedding.
Args:
embedding: Embedding to look up documents similar to.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
|
config_specs
|
return get_unique_config_specs(spec for step in self.runnables.values() for
spec in step.config_specs)
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
return get_unique_config_specs(spec for step in self.runnables.values() for
spec in step.config_specs)
| null |
test_qdrant_from_texts_recreates_collection_on_force_recreate
|
"""Test if Qdrant.from_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet'
], ConsistentFakeEmbeddings(dimensionality=10), collection_name=
collection_name, path=str(tmpdir), vector_name=vector_name)
del vec_store
vec_store = Qdrant.from_texts(['foo', 'bar'], ConsistentFakeEmbeddings(
dimensionality=5), collection_name=collection_name, path=str(tmpdir
), vector_name=vector_name, force_recreate=True)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
|
@pytest.mark.parametrize('vector_name', [None, 'custom-vector'])
def test_qdrant_from_texts_recreates_collection_on_force_recreate(vector_name:
Optional[str]) ->None:
"""Test if Qdrant.from_texts recreates the collection even if config mismatches"""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit',
'amet'], ConsistentFakeEmbeddings(dimensionality=10),
collection_name=collection_name, path=str(tmpdir), vector_name=
vector_name)
del vec_store
vec_store = Qdrant.from_texts(['foo', 'bar'],
ConsistentFakeEmbeddings(dimensionality=5), collection_name=
collection_name, path=str(tmpdir), vector_name=vector_name,
force_recreate=True)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
|
Test if Qdrant.from_texts recreates the collection even if config mismatches
|
get_methods_for_path
|
"""Return a list of valid methods for the specified path."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
|
def get_methods_for_path(self, path: str) ->List[str]:
"""Return a list of valid methods for the specified path."""
from openapi_pydantic import Operation
path_item = self._get_path_strict(path)
results = []
for method in HTTPVerb:
operation = getattr(path_item, method.value, None)
if isinstance(operation, Operation):
results.append(method.value)
return results
|
Return a list of valid methods for the specified path.
|
_invocation_params
|
"""Combines the invocation parameters with default parameters."""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
elif self.stop is not None:
params['stop'] = self.stop
elif stop is not None:
params['stop'] = stop
else:
params['stop'] = []
return {**params, **kwargs}
|
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) ->dict:
"""Combines the invocation parameters with default parameters."""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError('`stop` found in both the input and default params.')
elif self.stop is not None:
params['stop'] = self.stop
elif stop is not None:
params['stop'] = stop
else:
params['stop'] = []
return {**params, **kwargs}
|
Combines the invocation parameters with default parameters.
|
_create_collection
|
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, MilvusException
from pymilvus.orm.types import infer_dtype_bydata
dim = len(embeddings[0])
fields = []
if self._metadata_field is not None:
fields.append(FieldSchema(self._metadata_field, DataType.JSON))
elif metadatas:
for key, value in metadatas[0].items():
dtype = infer_dtype_bydata(value)
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
'Failure to create collection, unrecognized dtype for key: %s',
key)
raise ValueError(f'Unrecognized datatype for {key}.')
elif dtype == DataType.VARCHAR:
fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65535))
else:
fields.append(FieldSchema(key, dtype))
fields.append(FieldSchema(self._text_field, DataType.VARCHAR, max_length=65535)
)
fields.append(FieldSchema(self._primary_field, DataType.INT64, is_primary=
True, auto_id=True))
fields.append(FieldSchema(self._vector_field, infer_dtype_bydata(embeddings
[0]), dim=dim))
schema = CollectionSchema(fields, description=self.collection_description)
try:
self.col = Collection(name=self.collection_name, schema=schema,
consistency_level=self.consistency_level, using=self.alias)
except MilvusException as e:
logger.error('Failed to create collection: %s error: %s', self.
collection_name, e)
raise e
|
def _create_collection(self, embeddings: list, metadatas: Optional[list[
dict]]=None) ->None:
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, MilvusException
from pymilvus.orm.types import infer_dtype_bydata
dim = len(embeddings[0])
fields = []
if self._metadata_field is not None:
fields.append(FieldSchema(self._metadata_field, DataType.JSON))
elif metadatas:
for key, value in metadatas[0].items():
dtype = infer_dtype_bydata(value)
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
'Failure to create collection, unrecognized dtype for key: %s'
, key)
raise ValueError(f'Unrecognized datatype for {key}.')
elif dtype == DataType.VARCHAR:
fields.append(FieldSchema(key, DataType.VARCHAR, max_length
=65535))
else:
fields.append(FieldSchema(key, dtype))
fields.append(FieldSchema(self._text_field, DataType.VARCHAR,
max_length=65535))
fields.append(FieldSchema(self._primary_field, DataType.INT64,
is_primary=True, auto_id=True))
fields.append(FieldSchema(self._vector_field, infer_dtype_bydata(
embeddings[0]), dim=dim))
schema = CollectionSchema(fields, description=self.collection_description)
try:
self.col = Collection(name=self.collection_name, schema=schema,
consistency_level=self.consistency_level, using=self.alias)
except MilvusException as e:
logger.error('Failed to create collection: %s error: %s', self.
collection_name, e)
raise e
| null |
test_context_w_namespace_w_emb2
|
str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'test_namespace': encoded_str1}]
assert base.embed(base.Embed({'test_namespace': str1}), MockEncoder()
) == expected
expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1}]
assert base.embed(base.EmbedAndKeep({'test_namespace': str1}), MockEncoder()
) == expected_embed_and_keep
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_context_w_namespace_w_emb2() ->None:
str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'test_namespace': encoded_str1}]
assert base.embed(base.Embed({'test_namespace': str1}), MockEncoder()
) == expected
expected_embed_and_keep = [{'test_namespace': str1 + ' ' + encoded_str1}]
assert base.embed(base.EmbedAndKeep({'test_namespace': str1}),
MockEncoder()) == expected_embed_and_keep
| null |
test_hologres_with_filter_no_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name='test_table_filter',
embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas,
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'5'})
assert output == []
|
def test_hologres_with_filter_no_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name=
'test_table_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '5'})
assert output == []
|
Test end to end construction and search.
|
exists
|
"""Check if the given keys exist in the SQLite database."""
with self._make_session() as session:
records = session.query(UpsertionRecord.key).filter(and_(
UpsertionRecord.key.in_(keys), UpsertionRecord.namespace == self.
namespace)).all()
found_keys = set(r.key for r in records)
return [(k in found_keys) for k in keys]
|
def exists(self, keys: Sequence[str]) ->List[bool]:
"""Check if the given keys exist in the SQLite database."""
with self._make_session() as session:
records = session.query(UpsertionRecord.key).filter(and_(
UpsertionRecord.key.in_(keys), UpsertionRecord.namespace ==
self.namespace)).all()
found_keys = set(r.key for r in records)
return [(k in found_keys) for k in keys]
|
Check if the given keys exist in the SQLite database.
|
test_chat_google_palm_system_message
|
"""Test Google PaLM Chat API wrapper with system message."""
chat = ChatGooglePalm()
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_chat_google_palm_system_message() ->None:
"""Test Google PaLM Chat API wrapper with system message."""
chat = ChatGooglePalm()
system_message = SystemMessage(content='You are to chat with the user.')
human_message = HumanMessage(content='Hello')
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test Google PaLM Chat API wrapper with system message.
|
_build_payloads
|
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
'At least one of the texts is None. Please remove it before calling .from_texts or .add_texts on Qdrant instance.'
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append({content_payload_key: text, metadata_payload_key: metadata}
)
return payloads
|
@classmethod
def _build_payloads(cls, texts: Iterable[str], metadatas: Optional[List[
dict]], content_payload_key: str, metadata_payload_key: str) ->List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
'At least one of the texts is None. Please remove it before calling .from_texts or .add_texts on Qdrant instance.'
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append({content_payload_key: text, metadata_payload_key:
metadata})
return payloads
| null |
delete
|
"""Delete documents from the index.
Args:
ids: List of ids of documents to delete
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
body = []
if ids is None:
raise ValueError('ids must be provided.')
for _id in ids:
body.append({'_op_type': 'delete', '_index': self.index_name, '_id': _id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=kwargs.get('refresh_indices', True),
ignore_status=404)
logger.debug(f'Deleted {len(body)} texts from index')
return True
except BulkIndexError as e:
logger.error(f'Error deleting texts: {e}')
raise e
else:
logger.info('No documents to delete')
return False
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete documents from the index.
Args:
ids: List of ids of documents to delete
"""
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
body = []
if ids is None:
raise ValueError('ids must be provided.')
for _id in ids:
body.append({'_op_type': 'delete', '_index': self.index_name, '_id':
_id})
if len(body) > 0:
try:
bulk(self.client, body, refresh=kwargs.get('refresh_indices',
True), ignore_status=404)
logger.debug(f'Deleted {len(body)} texts from index')
return True
except BulkIndexError as e:
logger.error(f'Error deleting texts: {e}')
raise e
else:
logger.info('No documents to delete')
return False
|
Delete documents from the index.
Args:
ids: List of ids of documents to delete
|
__init__
|
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
'Could not import tensorflow-hub python package. Please install it with `pip install tensorflow-hub``.'
)
try:
import tensorflow_text
except ImportError:
raise ImportError(
'Could not import tensorflow_text python package. Please install it with `pip install tensorflow_text``.'
)
self.embed = tensorflow_hub.load(self.model_url)
|
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
'Could not import tensorflow-hub python package. Please install it with `pip install tensorflow-hub``.'
)
try:
import tensorflow_text
except ImportError:
raise ImportError(
'Could not import tensorflow_text python package. Please install it with `pip install tensorflow_text``.'
)
self.embed = tensorflow_hub.load(self.model_url)
|
Initialize the tensorflow_hub and tensorflow_text.
|
enforce_stop_tokens
|
"""Cut off the text as soon as any stop words occur."""
return re.split('|'.join(stop), text, maxsplit=1)[0]
|
def enforce_stop_tokens(text: str, stop: List[str]) ->str:
"""Cut off the text as soon as any stop words occur."""
return re.split('|'.join(stop), text, maxsplit=1)[0]
|
Cut off the text as soon as any stop words occur.
|
batch_parse
|
"""Parses a list of blobs lazily.
Args:
blobs: a list of blobs to parse.
gcs_output_path: a path on Google Cloud Storage to store parsing results.
timeout_sec: a timeout to wait for Document AI to complete, in seconds.
check_in_interval_sec: an interval to wait until next check
whether parsing operations have been completed, in seconds
This is a long-running operation. A recommended way is to decouple
parsing from creating LangChain Documents:
>>> operations = parser.docai_parse(blobs, gcs_path)
>>> parser.is_running(operations)
You can get operations names and save them:
>>> names = [op.operation.name for op in operations]
And when all operations are finished, you can use their results:
>>> operations = parser.operations_from_names(operation_names)
>>> results = parser.get_results(operations)
>>> docs = parser.parse_from_results(results)
"""
output_path = gcs_output_path or self._gcs_output_path
if not output_path:
raise ValueError(
'An output path on Google Cloud Storage should be provided.')
operations = self.docai_parse(blobs, gcs_output_path=output_path)
operation_names = [op.operation.name for op in operations]
logger.debug('Started parsing with Document AI, submitted operations %s',
operation_names)
time_elapsed = 0
while self.is_running(operations):
time.sleep(check_in_interval_sec)
time_elapsed += check_in_interval_sec
if time_elapsed > timeout_sec:
raise TimeoutError(
f'Timeout exceeded! Check operations {operation_names} later!')
logger.debug('.')
results = self.get_results(operations=operations)
yield from self.parse_from_results(results)
|
def batch_parse(self, blobs: Sequence[Blob], gcs_output_path: Optional[str]
=None, timeout_sec: int=3600, check_in_interval_sec: int=60) ->Iterator[
Document]:
"""Parses a list of blobs lazily.
Args:
blobs: a list of blobs to parse.
gcs_output_path: a path on Google Cloud Storage to store parsing results.
timeout_sec: a timeout to wait for Document AI to complete, in seconds.
check_in_interval_sec: an interval to wait until next check
whether parsing operations have been completed, in seconds
This is a long-running operation. A recommended way is to decouple
parsing from creating LangChain Documents:
>>> operations = parser.docai_parse(blobs, gcs_path)
>>> parser.is_running(operations)
You can get operations names and save them:
>>> names = [op.operation.name for op in operations]
And when all operations are finished, you can use their results:
>>> operations = parser.operations_from_names(operation_names)
>>> results = parser.get_results(operations)
>>> docs = parser.parse_from_results(results)
"""
output_path = gcs_output_path or self._gcs_output_path
if not output_path:
raise ValueError(
'An output path on Google Cloud Storage should be provided.')
operations = self.docai_parse(blobs, gcs_output_path=output_path)
operation_names = [op.operation.name for op in operations]
logger.debug('Started parsing with Document AI, submitted operations %s',
operation_names)
time_elapsed = 0
while self.is_running(operations):
time.sleep(check_in_interval_sec)
time_elapsed += check_in_interval_sec
if time_elapsed > timeout_sec:
raise TimeoutError(
f'Timeout exceeded! Check operations {operation_names} later!')
logger.debug('.')
results = self.get_results(operations=operations)
yield from self.parse_from_results(results)
|
Parses a list of blobs lazily.
Args:
blobs: a list of blobs to parse.
gcs_output_path: a path on Google Cloud Storage to store parsing results.
timeout_sec: a timeout to wait for Document AI to complete, in seconds.
check_in_interval_sec: an interval to wait until next check
whether parsing operations have been completed, in seconds
This is a long-running operation. A recommended way is to decouple
parsing from creating LangChain Documents:
>>> operations = parser.docai_parse(blobs, gcs_path)
>>> parser.is_running(operations)
You can get operations names and save them:
>>> names = [op.operation.name for op in operations]
And when all operations are finished, you can use their results:
>>> operations = parser.operations_from_names(operation_names)
>>> results = parser.get_results(operations)
>>> docs = parser.parse_from_results(results)
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
__repr__
|
"""Text representation for StarRocks Vector Store, prints backends, username
and schemas. Easy to use with `str(StarRocks())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += f"""[1musername: {self.config.username}[0m
Table Schema:
"""
width = 25
fields = 3
_repr += '-' * (width * fields + 1) + '\n'
columns = ['name', 'type', 'key']
_repr += f'|\x1b[94m{columns[0]:24s}\x1b[0m|\x1b[96m{columns[1]:24s}'
_repr += f'\x1b[0m|\x1b[96m{columns[2]:24s}\x1b[0m|\n'
_repr += '-' * (width * fields + 1) + '\n'
q_str = f'DESC {self.config.database}.{self.config.table}'
debug_output(q_str)
rs = get_named_result(self.connection, q_str)
for r in rs:
_repr += f"|\x1b[94m{r['Field']:24s}\x1b[0m|\x1b[96m{r['Type']:24s}"
_repr += f"\x1b[0m|\x1b[96m{r['Key']:24s}\x1b[0m|\n"
_repr += '-' * (width * fields + 1) + '\n'
return _repr
|
def __repr__(self) ->str:
"""Text representation for StarRocks Vector Store, prints backends, username
and schemas. Easy to use with `str(StarRocks())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ '
_repr += f'{self.config.host}:{self.config.port}\x1b[0m\n\n'
_repr += (
f'\x1b[1musername: {self.config.username}\x1b[0m\n\nTable Schema:\n')
width = 25
fields = 3
_repr += '-' * (width * fields + 1) + '\n'
columns = ['name', 'type', 'key']
_repr += f'|\x1b[94m{columns[0]:24s}\x1b[0m|\x1b[96m{columns[1]:24s}'
_repr += f'\x1b[0m|\x1b[96m{columns[2]:24s}\x1b[0m|\n'
_repr += '-' * (width * fields + 1) + '\n'
q_str = f'DESC {self.config.database}.{self.config.table}'
debug_output(q_str)
rs = get_named_result(self.connection, q_str)
for r in rs:
_repr += f"|\x1b[94m{r['Field']:24s}\x1b[0m|\x1b[96m{r['Type']:24s}"
_repr += f"\x1b[0m|\x1b[96m{r['Key']:24s}\x1b[0m|\n"
_repr += '-' * (width * fields + 1) + '\n'
return _repr
|
Text representation for StarRocks Vector Store, prints backends, username
and schemas. Easy to use with `str(StarRocks())`
Returns:
repr: string to show connection info and data schema
|
marshal_spec
|
"""Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
"""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
|
def marshal_spec(txt: str) ->dict:
"""Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
"""
try:
return json.loads(txt)
except json.JSONDecodeError:
return yaml.safe_load(txt)
|
Convert the yaml or json serialized spec to a dict.
Args:
txt: The yaml or json serialized spec.
Returns:
dict: The spec as a dict.
|
test_fastembed_embedding_query
|
"""Test fastembed embeddings for query."""
document = 'foo bar'
embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length)
output = embedding.embed_query(document)
assert len(output) == 384
|
@pytest.mark.parametrize('model_name', [
'sentence-transformers/all-MiniLM-L6-v2', 'BAAI/bge-small-en-v1.5'])
@pytest.mark.parametrize('max_length', [50, 512])
def test_fastembed_embedding_query(model_name: str, max_length: int) ->None:
"""Test fastembed embeddings for query."""
document = 'foo bar'
embedding = FastEmbedEmbeddings(model_name=model_name, max_length=
max_length)
output = embedding.embed_query(document)
assert len(output) == 384
|
Test fastembed embeddings for query.
|
test_passthrough_assign_schema
|
retriever = FakeRetriever()
prompt = PromptTemplate.from_template('{context} {question}')
fake_llm = FakeListLLM(responses=['a'])
seq_w_assign: Runnable = RunnablePassthrough.assign(context=itemgetter(
'question') | retriever) | prompt | fake_llm
assert seq_w_assign.input_schema.schema() == {'properties': {'question': {
'title': 'Question', 'type': 'string'}}, 'title':
'RunnableSequenceInput', 'type': 'object'}
assert seq_w_assign.output_schema.schema() == {'title': 'FakeListLLMOutput',
'type': 'string'}
invalid_seq_w_assign: Runnable = RunnablePassthrough.assign(context=
itemgetter('question') | retriever) | fake_llm
assert invalid_seq_w_assign.input_schema.schema() == {'properties': {
'question': {'title': 'Question'}}, 'title':
'RunnableParallel<context>Input', 'type': 'object'}
|
def test_passthrough_assign_schema() ->None:
retriever = FakeRetriever()
prompt = PromptTemplate.from_template('{context} {question}')
fake_llm = FakeListLLM(responses=['a'])
seq_w_assign: Runnable = RunnablePassthrough.assign(context=itemgetter(
'question') | retriever) | prompt | fake_llm
assert seq_w_assign.input_schema.schema() == {'properties': {'question':
{'title': 'Question', 'type': 'string'}}, 'title':
'RunnableSequenceInput', 'type': 'object'}
assert seq_w_assign.output_schema.schema() == {'title':
'FakeListLLMOutput', 'type': 'string'}
invalid_seq_w_assign: Runnable = RunnablePassthrough.assign(context=
itemgetter('question') | retriever) | fake_llm
assert invalid_seq_w_assign.input_schema.schema() == {'properties': {
'question': {'title': 'Question'}}, 'title':
'RunnableParallel<context>Input', 'type': 'object'}
| null |
pii_callback
|
return self.on_after_pii.__func__ is not BaseModerationCallbackHandler.on_after_pii
|
@property
def pii_callback(self) ->bool:
return (self.on_after_pii.__func__ is not BaseModerationCallbackHandler
.on_after_pii)
| null |
test_api_key_is_readable
|
"""Test that the real secret value of the API key can be read"""
azure_chat = request.getfixturevalue(fixture_name)
assert azure_chat.endpoint_api_key.get_secret_value() == 'my-api-key'
|
def test_api_key_is_readable(self, fixture_name: str, request: FixtureRequest
) ->None:
"""Test that the real secret value of the API key can be read"""
azure_chat = request.getfixturevalue(fixture_name)
assert azure_chat.endpoint_api_key.get_secret_value() == 'my-api-key'
|
Test that the real secret value of the API key can be read
|
input_keys
|
"""Get input keys. Input refers to user input here."""
return ['input']
|
@property
def input_keys(self) ->List[str]:
"""Get input keys. Input refers to user input here."""
return ['input']
|
Get input keys. Input refers to user input here.
|
__For_helper
|
self.fill(fill)
self.dispatch(t.target)
self.write(' in ')
self.dispatch(t.iter)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill('else')
self.enter()
self.dispatch(t.orelse)
self.leave()
|
def __For_helper(self, fill, t):
self.fill(fill)
self.dispatch(t.target)
self.write(' in ')
self.dispatch(t.iter)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill('else')
self.enter()
self.dispatch(t.orelse)
self.leave()
| null |
visit_operation
|
args = [arg.accept(self) for arg in operation.arguments]
return {'bool': {self._format_func(operation.operator): args}}
|
def visit_operation(self, operation: Operation) ->Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {'bool': {self._format_func(operation.operator): args}}
| null |
default_call_api
|
method = _name_to_call_map[name]['method']
url = _name_to_call_map[name]['url']
path_params = fn_args.pop('path_params', {})
url = _format_url(url, path_params)
if 'data' in fn_args and isinstance(fn_args['data'], dict):
fn_args['data'] = json.dumps(fn_args['data'])
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if 'headers' in _kwargs:
_kwargs['headers'].update(headers)
else:
_kwargs['headers'] = headers
if params is not None:
if 'params' in _kwargs:
_kwargs['params'].update(params)
else:
_kwargs['params'] = params
return requests.request(method, url, **_kwargs)
|
def default_call_api(name: str, fn_args: dict, headers: Optional[dict]=None,
params: Optional[dict]=None, **kwargs: Any) ->Any:
method = _name_to_call_map[name]['method']
url = _name_to_call_map[name]['url']
path_params = fn_args.pop('path_params', {})
url = _format_url(url, path_params)
if 'data' in fn_args and isinstance(fn_args['data'], dict):
fn_args['data'] = json.dumps(fn_args['data'])
_kwargs = {**fn_args, **kwargs}
if headers is not None:
if 'headers' in _kwargs:
_kwargs['headers'].update(headers)
else:
_kwargs['headers'] = headers
if params is not None:
if 'params' in _kwargs:
_kwargs['params'].update(params)
else:
_kwargs['params'] = params
return requests.request(method, url, **_kwargs)
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'anthropic']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'anthropic']
|
Get the namespace of the langchain object.
|
from_llm_and_tools
|
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(tools, ai_prefix=ai_prefix, human_prefix=
human_prefix, prefix=prefix, suffix=suffix, format_instructions=
format_instructions, input_variables=input_variables)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(ai_prefix=
ai_prefix)
return cls(llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=
ai_prefix, output_parser=_output_parser, **kwargs)
|
@classmethod
def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[
BaseTool], callback_manager: Optional[BaseCallbackManager]=None,
output_parser: Optional[AgentOutputParser]=None, prefix: str=PREFIX,
suffix: str=SUFFIX, format_instructions: str=FORMAT_INSTRUCTIONS,
ai_prefix: str='AI', human_prefix: str='Human', input_variables:
Optional[List[str]]=None, **kwargs: Any) ->Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(tools, ai_prefix=ai_prefix, human_prefix=
human_prefix, prefix=prefix, suffix=suffix, format_instructions=
format_instructions, input_variables=input_variables)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser(ai_prefix
=ai_prefix)
return cls(llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=
ai_prefix, output_parser=_output_parser, **kwargs)
|
Construct an agent from an LLM and tools.
|
similarity_search_with_score
|
"""Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {'q': '*', 'vector_query':
f"vec:([{','.join(embedded_query)}], k:{k})", 'filter_by': filter,
'collection': self._typesense_collection_name}
docs = []
response = self._typesense_client.multi_search.perform({'searches': [
query_obj]}, {})
for hit in response['results'][0]['hits']:
document = hit['document']
metadata = document['metadata']
text = document[self._text_key]
score = hit['vector_distance']
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
|
def similarity_search_with_score(self, query: str, k: int=10, filter:
Optional[str]='') ->List[Tuple[Document, float]]:
"""Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
"""
embedded_query = [str(x) for x in self._embedding.embed_query(query)]
query_obj = {'q': '*', 'vector_query':
f"vec:([{','.join(embedded_query)}], k:{k})", 'filter_by': filter,
'collection': self._typesense_collection_name}
docs = []
response = self._typesense_client.multi_search.perform({'searches': [
query_obj]}, {})
for hit in response['results'][0]['hits']:
document = hit['document']
metadata = document['metadata']
text = document[self._text_key]
score = hit['vector_distance']
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
|
Return typesense documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
Minimum 10 results would be returned.
filter: typesense filter_by expression to filter documents on
Returns:
List of Documents most similar to the query and score for each
|
_call
|
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
if self.streaming:
completion = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
completion += chunk.text
return completion
return self._prepare_input_and_invoke(prompt=prompt, stop=stop, **kwargs)
|
Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
|
_load_refine_documents_chain
|
if 'initial_llm_chain' in config:
initial_llm_chain_config = config.pop('initial_llm_chain')
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif 'initial_llm_chain_path' in config:
initial_llm_chain = load_chain(config.pop('initial_llm_chain_path'))
else:
raise ValueError(
'One of `initial_llm_chain` or `initial_llm_chain_path` must be present.'
)
if 'refine_llm_chain' in config:
refine_llm_chain_config = config.pop('refine_llm_chain')
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif 'refine_llm_chain_path' in config:
refine_llm_chain = load_chain(config.pop('refine_llm_chain_path'))
else:
raise ValueError(
'One of `refine_llm_chain` or `refine_llm_chain_path` must be present.'
)
if 'document_prompt' in config:
prompt_config = config.pop('document_prompt')
document_prompt = load_prompt_from_config(prompt_config)
elif 'document_prompt_path' in config:
document_prompt = load_prompt(config.pop('document_prompt_path'))
return RefineDocumentsChain(initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **
config)
|
def _load_refine_documents_chain(config: dict, **kwargs: Any
) ->RefineDocumentsChain:
if 'initial_llm_chain' in config:
initial_llm_chain_config = config.pop('initial_llm_chain')
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif 'initial_llm_chain_path' in config:
initial_llm_chain = load_chain(config.pop('initial_llm_chain_path'))
else:
raise ValueError(
'One of `initial_llm_chain` or `initial_llm_chain_path` must be present.'
)
if 'refine_llm_chain' in config:
refine_llm_chain_config = config.pop('refine_llm_chain')
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif 'refine_llm_chain_path' in config:
refine_llm_chain = load_chain(config.pop('refine_llm_chain_path'))
else:
raise ValueError(
'One of `refine_llm_chain` or `refine_llm_chain_path` must be present.'
)
if 'document_prompt' in config:
prompt_config = config.pop('document_prompt')
document_prompt = load_prompt_from_config(prompt_config)
elif 'document_prompt_path' in config:
document_prompt = load_prompt(config.pop('document_prompt_path'))
return RefineDocumentsChain(initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain, document_prompt=document_prompt,
**config)
| null |
test_pairwise_string_comparison_chain
|
llm = FakeLLM(queries={'a': """The values are the same.
[[C]]""", 'b':
"""A is clearly better than b.
[[A]]""", 'c':
"""B is clearly better than a.
[[B]]"""}, sequential_responses=True)
chain = PairwiseStringEvalChain.from_llm(llm=llm)
res = chain.evaluate_string_pairs(prediction='I like pie.', prediction_b=
'I love pie.', input='What is your favorite food?')
assert res['value'] is None
assert res['score'] == 0.5
assert res['reasoning'] == """The values are the same.
[[C]]"""
res = chain.evaluate_string_pairs(prediction='I like pie.', prediction_b=
'I like pie.', input='What is your favorite food?')
assert res['value'] == 'A'
assert res['score'] == 1
with pytest.warns(UserWarning, match=re.escape(chain._skip_reference_warning)):
res = chain.evaluate_string_pairs(prediction='I like pie.',
prediction_b='I hate pie.', input='What is your favorite food?',
reference='I enjoy pie.')
assert res['value'] == 'B'
assert res['score'] == 0
|
def test_pairwise_string_comparison_chain() ->None:
llm = FakeLLM(queries={'a': 'The values are the same.\n[[C]]', 'b':
"""A is clearly better than b.
[[A]]""", 'c':
"""B is clearly better than a.
[[B]]"""}, sequential_responses=True)
chain = PairwiseStringEvalChain.from_llm(llm=llm)
res = chain.evaluate_string_pairs(prediction='I like pie.',
prediction_b='I love pie.', input='What is your favorite food?')
assert res['value'] is None
assert res['score'] == 0.5
assert res['reasoning'] == 'The values are the same.\n[[C]]'
res = chain.evaluate_string_pairs(prediction='I like pie.',
prediction_b='I like pie.', input='What is your favorite food?')
assert res['value'] == 'A'
assert res['score'] == 1
with pytest.warns(UserWarning, match=re.escape(chain.
_skip_reference_warning)):
res = chain.evaluate_string_pairs(prediction='I like pie.',
prediction_b='I hate pie.', input='What is your favorite food?',
reference='I enjoy pie.')
assert res['value'] == 'B'
assert res['score'] == 0
| null |
model
|
return Vicuna(llm=FakeLLM())
|
@pytest.fixture
def model() ->Vicuna:
return Vicuna(llm=FakeLLM())
| null |
on_retry
|
"""Run on a retry event."""
|
def on_retry(self, retry_state: RetryCallState, *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run on a retry event."""
|
Run on a retry event.
|
test_import_class
|
"""Test that the class can be imported."""
module_name = 'langchain_community.chat_models.huggingface'
class_name = 'ChatHuggingFace'
module = import_module(module_name)
assert hasattr(module, class_name)
|
def test_import_class() ->None:
"""Test that the class can be imported."""
module_name = 'langchain_community.chat_models.huggingface'
class_name = 'ChatHuggingFace'
module = import_module(module_name)
assert hasattr(module, class_name)
|
Test that the class can be imported.
|
_api_url
|
return self.api_url or self._default_api_url
|
@property
def _api_url(self) ->str:
return self.api_url or self._default_api_url
| null |
fake_vectorstore
|
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents([Document(page_content='test', metadata={'foo':
'bar'})], ids=['test'])
return vectorstore
|
@pytest.fixture()
def fake_vectorstore() ->InMemoryVectorstoreWithSearch:
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents([Document(page_content='test', metadata={
'foo': 'bar'})], ids=['test'])
return vectorstore
| null |
test_promptlayer_openai_stop_valid
|
"""Test promptlayer openai stop logic on valid configuration."""
query = 'write an ordered list of five items'
first_llm = PromptLayerOpenAI(stop='3', temperature=0)
first_output = first_llm(query)
second_llm = PromptLayerOpenAI(temperature=0)
second_output = second_llm(query, stop=['3'])
assert first_output == second_output
|
def test_promptlayer_openai_stop_valid() ->None:
"""Test promptlayer openai stop logic on valid configuration."""
query = 'write an ordered list of five items'
first_llm = PromptLayerOpenAI(stop='3', temperature=0)
first_output = first_llm(query)
second_llm = PromptLayerOpenAI(temperature=0)
second_output = second_llm(query, stop=['3'])
assert first_output == second_output
|
Test promptlayer openai stop logic on valid configuration.
|
invoke
|
return self._call_with_config(self._invoke, input, config, **kwargs)
|
def invoke(self, input: Dict[str, Any], config: Optional[RunnableConfig]=
None, **kwargs: Any) ->Dict[str, Any]:
return self._call_with_config(self._invoke, input, config, **kwargs)
| null |
run
|
if mode == 'get_games_details':
return self.details_of_games(game)
elif mode == 'get_recommended_games':
return self.recommended_games(game)
else:
raise ValueError(f'Invalid mode {mode} for Steam API.')
|
def run(self, mode: str, game: str) ->str:
if mode == 'get_games_details':
return self.details_of_games(game)
elif mode == 'get_recommended_games':
return self.recommended_games(game)
else:
raise ValueError(f'Invalid mode {mode} for Steam API.')
| null |
_process_response
|
if version == '1.0':
text = response
else:
text = response['response']
if stop:
text = enforce_stop_tokens(text, stop)
return ''.join(text)
|
@staticmethod
def _process_response(response: Any, stop: Optional[List[str]], version:
Optional[str]) ->str:
if version == '1.0':
text = response
else:
text = response['response']
if stop:
text = enforce_stop_tokens(text, stop)
return ''.join(text)
| null |
update
|
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
f'Momento only supports caching of normal LLM generations, got {type(gen)}'
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f'Unexpected response: {set_response}')
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) ->None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
f'Momento only supports caching of normal LLM generations, got {type(gen)}'
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f'Unexpected response: {set_response}')
|
Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
|
__init__
|
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
|
def __init__(self, session_id: str, connection_string: str=
DEFAULT_CONNECTION_STRING, table_name: str='message_store'):
import psycopg
from psycopg.rows import dict_row
try:
self.connection = psycopg.connect(connection_string)
self.cursor = self.connection.cursor(row_factory=dict_row)
except psycopg.OperationalError as error:
logger.error(error)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
| null |
get_gmail_credentials
|
"""Get credentials."""
Request, Credentials = import_google()
InstalledAppFlow = import_installed_app_flow()
creds = None
scopes = scopes or DEFAULT_SCOPES
token_file = token_file or DEFAULT_CREDS_TOKEN_FILE
client_secrets_file = client_secrets_file or DEFAULT_CLIENT_SECRETS_FILE
if os.path.exists(token_file):
creds = Credentials.from_authorized_user_file(token_file, scopes)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file,
scopes)
creds = flow.run_local_server(port=0)
with open(token_file, 'w') as token:
token.write(creds.to_json())
return creds
|
def get_gmail_credentials(token_file: Optional[str]=None,
client_secrets_file: Optional[str]=None, scopes: Optional[List[str]]=None
) ->Credentials:
"""Get credentials."""
Request, Credentials = import_google()
InstalledAppFlow = import_installed_app_flow()
creds = None
scopes = scopes or DEFAULT_SCOPES
token_file = token_file or DEFAULT_CREDS_TOKEN_FILE
client_secrets_file = client_secrets_file or DEFAULT_CLIENT_SECRETS_FILE
if os.path.exists(token_file):
creds = Credentials.from_authorized_user_file(token_file, scopes)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
creds = flow.run_local_server(port=0)
with open(token_file, 'w') as token:
token.write(creds.to_json())
return creds
|
Get credentials.
|
similarity_search
|
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, k=k,
search_params=search_params)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, search_params: Optional[
dict]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[Document]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(query, k=k,
search_params=search_params)
return [doc for doc, _ in docs_and_scores]
|
Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
|
_get_filtered_args
|
"""Get the arguments from a function's signature."""
schema = inferred_model.schema()['properties']
valid_keys = signature(func).parameters
return {k: schema[k] for k in valid_keys if k not in ('run_manager',
'callbacks')}
|
def _get_filtered_args(inferred_model: Type[BaseModel], func: Callable) ->dict:
"""Get the arguments from a function's signature."""
schema = inferred_model.schema()['properties']
valid_keys = signature(func).parameters
return {k: schema[k] for k in valid_keys if k not in ('run_manager',
'callbacks')}
|
Get the arguments from a function's signature.
|
_completion_with_retry
|
return self.client.create(**kwargs)
|
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
| null |
on_tool_start
|
"""Do nothing when tool starts."""
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Do nothing when tool starts."""
|
Do nothing when tool starts.
|
on_tool_end
|
"""Run when tool ends running."""
aim = import_aim()
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = {'action': 'on_tool_end'}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(output), name='on_tool_end', context=resp)
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running."""
aim = import_aim()
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = {'action': 'on_tool_end'}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(output), name='on_tool_end', context=resp)
|
Run when tool ends running.
|
_import_myscale_settings
|
from langchain_community.vectorstores.myscale import MyScaleSettings
return MyScaleSettings
|
def _import_myscale_settings() ->Any:
from langchain_community.vectorstores.myscale import MyScaleSettings
return MyScaleSettings
| null |
test_api_key_masked_when_passed_from_env
|
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('ANYSCALE_API_KEY', 'secret-api-key')
llm = Anyscale(anyscale_api_base='test', model_name='test')
print(llm.anyscale_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('openai')
def test_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('ANYSCALE_API_KEY', 'secret-api-key')
llm = Anyscale(anyscale_api_base='test', model_name='test')
print(llm.anyscale_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via an env variable
|
set
|
"""Set entity value in store."""
pass
|
@abstractmethod
def set(self, key: str, value: Optional[str]) ->None:
"""Set entity value in store."""
pass
|
Set entity value in store.
|
__init__
|
if key is not None:
kwargs[key] = value
super().__init__(keys={k: (_coerce_set_value(v) if v is not None else None) for
k, v in kwargs.items()}, prefix=prefix)
|
def __init__(self, key: Optional[str]=None, value: Optional[SetValue]=None,
prefix: str='', **kwargs: SetValue):
if key is not None:
kwargs[key] = value
super().__init__(keys={k: (_coerce_set_value(v) if v is not None else
None) for k, v in kwargs.items()}, prefix=prefix)
| null |
test_api_key_masked_when_passed_via_constructor
|
chat = ChatGoogleGenerativeAI(model='gemini-nano', google_api_key=
'secret-api-key')
print(chat.google_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
chat = ChatGoogleGenerativeAI(model='gemini-nano', google_api_key=
'secret-api-key')
print(chat.google_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
test_refresh_schema
|
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(self.username, self.password, self.address, self.
port, self.graph)
huge_graph.refresh_schema()
self.assertNotEqual(huge_graph.get_schema, '')
|
@patch('hugegraph.connection.PyHugeGraph')
def test_refresh_schema(self, mock_client: Any) ->None:
mock_client.return_value = MagicMock()
huge_graph = HugeGraph(self.username, self.password, self.address, self
.port, self.graph)
huge_graph.refresh_schema()
self.assertNotEqual(huge_graph.get_schema, '')
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.