method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
config_specs
|
from langchain_core.beta.runnables.context import CONTEXT_CONFIG_PREFIX, CONTEXT_CONFIG_SUFFIX_SET
specs = get_unique_config_specs(spec for step in [self.default] + [r for _,
r in self.branches] + [r for r, _ in self.branches] for spec in step.
config_specs)
if any(s.id.startswith(CONTEXT_CONFIG_PREFIX) and s.id.endswith(
CONTEXT_CONFIG_SUFFIX_SET) for s in specs):
raise ValueError('RunnableBranch cannot contain context setters.')
return specs
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
from langchain_core.beta.runnables.context import CONTEXT_CONFIG_PREFIX, CONTEXT_CONFIG_SUFFIX_SET
specs = get_unique_config_specs(spec for step in [self.default] + [r for
_, r in self.branches] + [r for r, _ in self.branches] for spec in
step.config_specs)
if any(s.id.startswith(CONTEXT_CONFIG_PREFIX) and s.id.endswith(
CONTEXT_CONFIG_SUFFIX_SET) for s in specs):
raise ValueError('RunnableBranch cannot contain context setters.')
return specs
| null |
print_ascii
|
print(self.draw_ascii())
|
def print_ascii(self) ->None:
print(self.draw_ascii())
| null |
_prepare_tensor
|
"""Prepare an input data structure."""
t = grpcclient.InferInput(name, input_data.shape, np_to_triton_dtype(
input_data.dtype))
t.set_data_from_numpy(input_data)
return t
|
def _prepare_tensor(self, name: str, input_data: np.ndarray
) ->grpcclient.InferInput:
"""Prepare an input data structure."""
t = grpcclient.InferInput(name, input_data.shape, np_to_triton_dtype(
input_data.dtype))
t.set_data_from_numpy(input_data)
return t
|
Prepare an input data structure.
|
test_get_all
|
start_time = time.time()
contract_address = '0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9'
result = BlockchainDocumentLoader(contract_address=contract_address,
blockchainType=BlockchainType.POLYGON_MAINNET, api_key=os.environ[
'ALCHEMY_API_KEY'], startToken='100', get_all_tokens=True).load()
end_time = time.time()
print(
f'Tokens returned for {contract_address} contract: {len(result)} in {end_time - start_time} seconds'
)
assert len(result) > 0, 'No NFTs returned'
|
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.')
def test_get_all() ->None:
start_time = time.time()
contract_address = '0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9'
result = BlockchainDocumentLoader(contract_address=contract_address,
blockchainType=BlockchainType.POLYGON_MAINNET, api_key=os.environ[
'ALCHEMY_API_KEY'], startToken='100', get_all_tokens=True).load()
end_time = time.time()
print(
f'Tokens returned for {contract_address} contract: {len(result)} in {end_time - start_time} seconds'
)
assert len(result) > 0, 'No NFTs returned'
| null |
test_cassandra_with_score
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': '0.0'}),
Document(page_content='bar', metadata={'page': '1.0'}), Document(
page_content='baz', metadata={'page': '2.0'})]
assert scores[0] > scores[1] > scores[2]
|
def test_cassandra_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': '0.0'}),
Document(page_content='bar', metadata={'page': '1.0'}), Document(
page_content='baz', metadata={'page': '2.0'})]
assert scores[0] > scores[1] > scores[2]
|
Test end to end construction and search with scores and IDs.
|
_llm_type
|
"""Return type of llm."""
return 'xinference'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'xinference'
|
Return type of llm.
|
__getitem__
|
"""Use to index into the chat template."""
if isinstance(index, slice):
start, stop, step = index.indices(len(self.messages))
messages = self.messages[start:stop:step]
return ChatPromptTemplate.from_messages(messages)
else:
return self.messages[index]
|
def __getitem__(self, index: Union[int, slice]) ->Union[MessageLike,
ChatPromptTemplate]:
"""Use to index into the chat template."""
if isinstance(index, slice):
start, stop, step = index.indices(len(self.messages))
messages = self.messages[start:stop:step]
return ChatPromptTemplate.from_messages(messages)
else:
return self.messages[index]
|
Use to index into the chat template.
|
test_exclude_types
|
"""Test exclude types from schema."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})<-[:DIRECTED]-(p:Person {name:'John'})"
)
graph.refresh_schema()
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
exclude_types=['Person', 'DIRECTED'])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert chain.graph_schema == expected_schema
|
def test_exclude_types() ->None:
"""Test exclude types from schema."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"CREATE (a:Actor {name:'Bruce Willis'})-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})<-[:DIRECTED]-(p:Person {name:'John'})"
)
graph.refresh_schema()
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
exclude_types=['Person', 'DIRECTED'])
expected_schema = """Node properties are the following:
Movie {title: STRING},Actor {name: STRING}
Relationship properties are the following:
The relationships are the following:
(:Actor)-[:ACTED_IN]->(:Movie)"""
assert chain.graph_schema == expected_schema
|
Test exclude types from schema.
|
_create_index_if_not_exists
|
"""Create index if it does not exist."""
from momento.requests.vector_index import SimilarityMetric
from momento.responses.vector_index import CreateIndex
similarity_metric = None
if self.distance_strategy == DistanceStrategy.COSINE:
similarity_metric = SimilarityMetric.COSINE_SIMILARITY
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
similarity_metric = SimilarityMetric.INNER_PRODUCT
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY
else:
logger.error(f'Distance strategy {self.distance_strategy} not implemented.'
)
raise ValueError(
f'Distance strategy {self.distance_strategy} not implemented.')
response = self._client.create_index(self.index_name, num_dimensions,
similarity_metric)
if isinstance(response, CreateIndex.Success):
return True
elif isinstance(response, CreateIndex.IndexAlreadyExists):
return False
elif isinstance(response, CreateIndex.Error):
logger.error(f'Error creating index: {response.inner_exception}')
raise response.inner_exception
else:
logger.error(f'Unexpected response: {response}')
raise Exception(f'Unexpected response: {response}')
|
def _create_index_if_not_exists(self, num_dimensions: int) ->bool:
"""Create index if it does not exist."""
from momento.requests.vector_index import SimilarityMetric
from momento.responses.vector_index import CreateIndex
similarity_metric = None
if self.distance_strategy == DistanceStrategy.COSINE:
similarity_metric = SimilarityMetric.COSINE_SIMILARITY
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
similarity_metric = SimilarityMetric.INNER_PRODUCT
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY
else:
logger.error(
f'Distance strategy {self.distance_strategy} not implemented.')
raise ValueError(
f'Distance strategy {self.distance_strategy} not implemented.')
response = self._client.create_index(self.index_name, num_dimensions,
similarity_metric)
if isinstance(response, CreateIndex.Success):
return True
elif isinstance(response, CreateIndex.IndexAlreadyExists):
return False
elif isinstance(response, CreateIndex.Error):
logger.error(f'Error creating index: {response.inner_exception}')
raise response.inner_exception
else:
logger.error(f'Unexpected response: {response}')
raise Exception(f'Unexpected response: {response}')
|
Create index if it does not exist.
|
test_telegram_channel_loader_parsing
|
"""Test TelegramChatApiLoader."""
file_path = Path(__file__).parent / 'test_docs' / 'telegram_channel.json'
loader = TelegramChatApiLoader(file_path=str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content)
assert docs[0].page_content == """Hello, world!.
LLMs are awesome! Langchain is great. Telegram is the best!."""
|
@pytest.mark.requires('telethon', 'pandas')
def test_telegram_channel_loader_parsing() ->None:
"""Test TelegramChatApiLoader."""
file_path = Path(__file__).parent / 'test_docs' / 'telegram_channel.json'
loader = TelegramChatApiLoader(file_path=str(file_path))
docs = loader.load()
assert len(docs) == 1
print(docs[0].page_content)
assert docs[0].page_content == """Hello, world!.
LLMs are awesome! Langchain is great. Telegram is the best!."""
|
Test TelegramChatApiLoader.
|
test_load_success
|
docs = api_client.load('HUNTER X HUNTER')
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
|
def test_load_success(api_client: WikipediaAPIWrapper) ->None:
docs = api_client.load('HUNTER X HUNTER')
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
| null |
_create_index
|
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
if self.index_params is None:
self.index_params = {'metric_type': 'L2', 'index_type': 'HNSW',
'params': {'M': 8, 'efConstruction': 64}}
try:
self.col.create_index(self._vector_field, index_params=self.
index_params, using=self.alias)
except MilvusException:
self.index_params = {'metric_type': 'L2', 'index_type':
'AUTOINDEX', 'params': {}}
self.col.create_index(self._vector_field, index_params=self.
index_params, using=self.alias)
logger.debug('Successfully created an index on collection: %s',
self.collection_name)
except MilvusException as e:
logger.error('Failed to create an index on collection: %s', self.
collection_name)
raise e
|
def _create_index(self) ->None:
"""Create a index on the collection"""
from pymilvus import Collection, MilvusException
if isinstance(self.col, Collection) and self._get_index() is None:
try:
if self.index_params is None:
self.index_params = {'metric_type': 'L2', 'index_type':
'HNSW', 'params': {'M': 8, 'efConstruction': 64}}
try:
self.col.create_index(self._vector_field, index_params=self
.index_params, using=self.alias)
except MilvusException:
self.index_params = {'metric_type': 'L2', 'index_type':
'AUTOINDEX', 'params': {}}
self.col.create_index(self._vector_field, index_params=self
.index_params, using=self.alias)
logger.debug('Successfully created an index on collection: %s',
self.collection_name)
except MilvusException as e:
logger.error('Failed to create an index on collection: %s',
self.collection_name)
raise e
|
Create a index on the collection
|
embed_query
|
try:
vals = json.loads(text)
assert len(vals) == self.dimension
return vals
except Exception:
print(f'[ParserEmbeddings] Returning a moot vector for "{text}"')
return [0.0] * self.dimension
|
def embed_query(self, text: str) ->List[float]:
try:
vals = json.loads(text)
assert len(vals) == self.dimension
return vals
except Exception:
print(f'[ParserEmbeddings] Returning a moot vector for "{text}"')
return [0.0] * self.dimension
| null |
add_documents
|
doc_ids = [str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.metadata['source'])) for
doc in docs]
prop_docs = [Document(page_content=prop, metadata={id_key: doc_ids[i]}) for
i, props in enumerate(propositions) for prop in props if prop]
retriever.vectorstore.add_documents(prop_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
|
def add_documents(retriever, propositions: Sequence[Sequence[str]], docs:
Sequence[Document], id_key: str=DOCSTORE_ID_KEY):
doc_ids = [str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.metadata['source'])) for
doc in docs]
prop_docs = [Document(page_content=prop, metadata={id_key: doc_ids[i]}) for
i, props in enumerate(propositions) for prop in props if prop]
retriever.vectorstore.add_documents(prop_docs)
retriever.docstore.mset(list(zip(doc_ids, docs)))
| null |
load_local
|
try:
import joblib
except ImportError:
raise ImportError(
'Could not import joblib, please install with `pip install joblib`.')
path = Path(folder_path)
vectorizer = joblib.load(path / f'{file_name}.joblib')
with open(path / f'{file_name}.pkl', 'rb') as f:
docs, tfidf_array = pickle.load(f)
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array)
|
@classmethod
def load_local(cls, folder_path: str, file_name: str='tfidf_vectorizer'
) ->TFIDFRetriever:
try:
import joblib
except ImportError:
raise ImportError(
'Could not import joblib, please install with `pip install joblib`.'
)
path = Path(folder_path)
vectorizer = joblib.load(path / f'{file_name}.joblib')
with open(path / f'{file_name}.pkl', 'rb') as f:
docs, tfidf_array = pickle.load(f)
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array)
| null |
convert_message_to_dict
|
"""Convert a message to a dict."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
|
def convert_message_to_dict(message: BaseMessage) ->dict:
"""Convert a message to a dict."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
|
Convert a message to a dict.
|
_generate
|
functions = kwargs.get('functions', [])
if 'function_call' in kwargs:
functions = [fn for fn in functions if fn['name'] == kwargs[
'function_call']['name']]
if not functions:
raise ValueError(
'If "function_call" is specified, you must also pass a matching function in "functions".'
)
del kwargs['function_call']
elif not functions:
functions.append(DEFAULT_RESPONSE_FUNCTION)
system_message_prompt_template = SystemMessagePromptTemplate.from_template(self
.tool_system_prompt_template)
system_message = system_message_prompt_template.format(tools=json.dumps(
functions, indent=2))
if 'functions' in kwargs:
del kwargs['functions']
response_message = self.llm.predict_messages([system_message] + messages,
stop=stop, callbacks=run_manager, **kwargs)
chat_generation_content = response_message.content
if not isinstance(chat_generation_content, str):
raise ValueError('OllamaFunctions does not support non-string output.')
try:
parsed_chat_result = json.loads(chat_generation_content)
except json.JSONDecodeError:
raise ValueError(
f'"{self.llm.model}" did not respond with valid JSON. Please try again.'
)
called_tool_name = parsed_chat_result['tool']
called_tool_arguments = parsed_chat_result['tool_input']
called_tool = next((fn for fn in functions if fn['name'] ==
called_tool_name), None)
if called_tool is None:
raise ValueError(
f'Failed to parse a function call from {self.llm.model} output: {chat_generation_content}'
)
if called_tool['name'] == DEFAULT_RESPONSE_FUNCTION['name']:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content
=called_tool_arguments['response']))])
response_message_with_functions = AIMessage(content='', additional_kwargs={
'function_call': {'name': called_tool_name, 'arguments': json.dumps(
called_tool_arguments) if called_tool_arguments else ''}})
return ChatResult(generations=[ChatGeneration(message=
response_message_with_functions)])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
functions = kwargs.get('functions', [])
if 'function_call' in kwargs:
functions = [fn for fn in functions if fn['name'] == kwargs[
'function_call']['name']]
if not functions:
raise ValueError(
'If "function_call" is specified, you must also pass a matching function in "functions".'
)
del kwargs['function_call']
elif not functions:
functions.append(DEFAULT_RESPONSE_FUNCTION)
system_message_prompt_template = SystemMessagePromptTemplate.from_template(
self.tool_system_prompt_template)
system_message = system_message_prompt_template.format(tools=json.dumps
(functions, indent=2))
if 'functions' in kwargs:
del kwargs['functions']
response_message = self.llm.predict_messages([system_message] +
messages, stop=stop, callbacks=run_manager, **kwargs)
chat_generation_content = response_message.content
if not isinstance(chat_generation_content, str):
raise ValueError('OllamaFunctions does not support non-string output.')
try:
parsed_chat_result = json.loads(chat_generation_content)
except json.JSONDecodeError:
raise ValueError(
f'"{self.llm.model}" did not respond with valid JSON. Please try again.'
)
called_tool_name = parsed_chat_result['tool']
called_tool_arguments = parsed_chat_result['tool_input']
called_tool = next((fn for fn in functions if fn['name'] ==
called_tool_name), None)
if called_tool is None:
raise ValueError(
f'Failed to parse a function call from {self.llm.model} output: {chat_generation_content}'
)
if called_tool['name'] == DEFAULT_RESPONSE_FUNCTION['name']:
return ChatResult(generations=[ChatGeneration(message=AIMessage(
content=called_tool_arguments['response']))])
response_message_with_functions = AIMessage(content='',
additional_kwargs={'function_call': {'name': called_tool_name,
'arguments': json.dumps(called_tool_arguments) if
called_tool_arguments else ''}})
return ChatResult(generations=[ChatGeneration(message=
response_message_with_functions)])
| null |
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
extract_op_field
|
return x['output_text']
|
def extract_op_field(x):
return x['output_text']
| null |
handler
|
return OpenAICallbackHandler()
|
@pytest.fixture
def handler() ->OpenAICallbackHandler:
return OpenAICallbackHandler()
| null |
_chat_stream_response_to_chat_generation_chunk
|
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get('done'
) is True else None
return ChatGenerationChunk(message=AIMessageChunk(content=parsed_response.
get('message', {}).get('content', '')), generation_info=generation_info)
|
def _chat_stream_response_to_chat_generation_chunk(stream_response: str
) ->ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get('done'
) is True else None
return ChatGenerationChunk(message=AIMessageChunk(content=
parsed_response.get('message', {}).get('content', '')),
generation_info=generation_info)
|
Convert a stream response to a generation chunk.
|
max_marginal_relevance_search_with_score_by_vector
|
"""Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
"""
scores, indices = self.index.search(np.array([embedding], dtype=np.float32),
fetch_k if filter is None else fetch_k * 2)
if filter is not None:
filtered_indices = []
for i in indices[0]:
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}'
)
if all(doc.metadata.get(key) in value if isinstance(value, list) else
doc.metadata.get(key) == value for key, value in filter.items()):
filtered_indices.append(i)
indices = np.array([filtered_indices])
embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=np.
float32), embeddings, k=k, lambda_mult=lambda_mult)
selected_indices = [indices[0][i] for i in mmr_selected]
selected_scores = [scores[0][i] for i in mmr_selected]
docs_and_scores = []
for i, score in zip(selected_indices, selected_scores):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}')
docs_and_scores.append((doc, score))
return docs_and_scores
|
def max_marginal_relevance_search_with_score_by_vector(self, embedding:
List[float], *, k: int=4, fetch_k: int=20, lambda_mult: float=0.5,
filter: Optional[Dict[str, Any]]=None) ->List[Tuple[Document, float]]:
"""Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
"""
scores, indices = self.index.search(np.array([embedding], dtype=np.
float32), fetch_k if filter is None else fetch_k * 2)
if filter is not None:
filtered_indices = []
for i in indices[0]:
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(
f'Could not find document for id {_id}, got {doc}')
if all(doc.metadata.get(key) in value if isinstance(value, list
) else doc.metadata.get(key) == value for key, value in
filter.items()):
filtered_indices.append(i)
indices = np.array([filtered_indices])
embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1
]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=
np.float32), embeddings, k=k, lambda_mult=lambda_mult)
selected_indices = [indices[0][i] for i in mmr_selected]
selected_scores = [scores[0][i] for i in mmr_selected]
docs_and_scores = []
for i, score in zip(selected_indices, selected_scores):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}'
)
docs_and_scores.append((doc, score))
return docs_and_scores
|
Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
|
memory_variables
|
return ['chat_history', 'relevant_context']
|
@property
def memory_variables(self) ->List[str]:
return ['chat_history', 'relevant_context']
| null |
comment_on_issue
|
"""
Adds a comment to a gitlab issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1
Working on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
"""
issue_number = int(comment_query.split('\n\n')[0])
comment = comment_query[len(str(issue_number)) + 2:]
try:
issue = self.gitlab_repo_instance.issues.get(issue_number)
issue.notes.create({'body': comment})
return 'Commented on issue ' + str(issue_number)
except Exception as e:
return 'Unable to make comment due to error:\n' + str(e)
|
def comment_on_issue(self, comment_query: str) ->str:
"""
Adds a comment to a gitlab issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1
Working on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
"""
issue_number = int(comment_query.split('\n\n')[0])
comment = comment_query[len(str(issue_number)) + 2:]
try:
issue = self.gitlab_repo_instance.issues.get(issue_number)
issue.notes.create({'body': comment})
return 'Commented on issue ' + str(issue_number)
except Exception as e:
return 'Unable to make comment due to error:\n' + str(e)
|
Adds a comment to a gitlab issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1
Working on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
|
__init__
|
"""Initialize the Milvus vector store."""
try:
from pymilvus import Collection, utility
except ImportError:
raise ValueError(
'Could not import pymilvus python package. Please install it with `pip install pymilvus`.'
)
self.default_search_params = {'IVF_FLAT': {'metric_type': 'L2', 'params': {
'nprobe': 10}}, 'IVF_SQ8': {'metric_type': 'L2', 'params': {'nprobe':
10}}, 'IVF_PQ': {'metric_type': 'L2', 'params': {'nprobe': 10}}, 'HNSW':
{'metric_type': 'L2', 'params': {'ef': 10}}, 'RHNSW_FLAT': {
'metric_type': 'L2', 'params': {'ef': 10}}, 'RHNSW_SQ': {'metric_type':
'L2', 'params': {'ef': 10}}, 'RHNSW_PQ': {'metric_type': 'L2', 'params':
{'ef': 10}}, 'IVF_HNSW': {'metric_type': 'L2', 'params': {'nprobe': 10,
'ef': 10}}, 'ANNOY': {'metric_type': 'L2', 'params': {'search_k': 10}},
'AUTOINDEX': {'metric_type': 'L2', 'params': {}}}
self.embedding_func = embedding_function
self.collection_name = collection_name
self.collection_description = collection_description
self.index_params = index_params
self.search_params = search_params
self.consistency_level = consistency_level
self._primary_field = primary_field
self._text_field = text_field
self._vector_field = vector_field
self._metadata_field = metadata_field
self.fields: list[str] = []
if connection_args is None:
connection_args = DEFAULT_MILVUS_CONNECTION
self.alias = self._create_connection_alias(connection_args)
self.col: Optional[Collection] = None
if utility.has_collection(self.collection_name, using=self.alias):
self.col = Collection(self.collection_name, using=self.alias)
if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
self._init()
|
def __init__(self, embedding_function: Embeddings, collection_name: str=
'LangChainCollection', collection_description: str='', connection_args:
Optional[dict[str, Any]]=None, consistency_level: str='Session',
index_params: Optional[dict]=None, search_params: Optional[dict]=None,
drop_old: Optional[bool]=False, *, primary_field: str='pk', text_field:
str='text', vector_field: str='vector', metadata_field: Optional[str]=None
):
"""Initialize the Milvus vector store."""
try:
from pymilvus import Collection, utility
except ImportError:
raise ValueError(
'Could not import pymilvus python package. Please install it with `pip install pymilvus`.'
)
self.default_search_params = {'IVF_FLAT': {'metric_type': 'L2',
'params': {'nprobe': 10}}, 'IVF_SQ8': {'metric_type': 'L2',
'params': {'nprobe': 10}}, 'IVF_PQ': {'metric_type': 'L2', 'params':
{'nprobe': 10}}, 'HNSW': {'metric_type': 'L2', 'params': {'ef': 10}
}, 'RHNSW_FLAT': {'metric_type': 'L2', 'params': {'ef': 10}},
'RHNSW_SQ': {'metric_type': 'L2', 'params': {'ef': 10}}, 'RHNSW_PQ':
{'metric_type': 'L2', 'params': {'ef': 10}}, 'IVF_HNSW': {
'metric_type': 'L2', 'params': {'nprobe': 10, 'ef': 10}}, 'ANNOY':
{'metric_type': 'L2', 'params': {'search_k': 10}}, 'AUTOINDEX': {
'metric_type': 'L2', 'params': {}}}
self.embedding_func = embedding_function
self.collection_name = collection_name
self.collection_description = collection_description
self.index_params = index_params
self.search_params = search_params
self.consistency_level = consistency_level
self._primary_field = primary_field
self._text_field = text_field
self._vector_field = vector_field
self._metadata_field = metadata_field
self.fields: list[str] = []
if connection_args is None:
connection_args = DEFAULT_MILVUS_CONNECTION
self.alias = self._create_connection_alias(connection_args)
self.col: Optional[Collection] = None
if utility.has_collection(self.collection_name, using=self.alias):
self.col = Collection(self.collection_name, using=self.alias)
if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
self._init()
|
Initialize the Milvus vector store.
|
_get_chain
|
"""Construct the chain based on the callback manager and model type."""
from langchain.chains.llm import LLMChain
if isinstance(self.llm, BaseLanguageModel):
return LLMChain(llm=self.llm, callback_manager=self.callback_manager if
self.callback_manager else None, prompt=PromptTemplate(template=
SINGLE_QUESTION_TO_QUERY, input_variables=['tool_input', 'tables',
'schemas', 'examples']))
system_prompt = SystemMessagePromptTemplate(prompt=PromptTemplate(template=
QUESTION_TO_QUERY_BASE, input_variables=['tables', 'schemas', 'examples']))
human_prompt = HumanMessagePromptTemplate(prompt=PromptTemplate(template=
USER_INPUT, input_variables=['tool_input']))
return LLMChain(llm=self.llm, callback_manager=self.callback_manager if
self.callback_manager else None, prompt=ChatPromptTemplate.
from_messages([system_prompt, human_prompt]))
|
def _get_chain(self) ->LLMChain:
"""Construct the chain based on the callback manager and model type."""
from langchain.chains.llm import LLMChain
if isinstance(self.llm, BaseLanguageModel):
return LLMChain(llm=self.llm, callback_manager=self.
callback_manager if self.callback_manager else None, prompt=
PromptTemplate(template=SINGLE_QUESTION_TO_QUERY,
input_variables=['tool_input', 'tables', 'schemas', 'examples']))
system_prompt = SystemMessagePromptTemplate(prompt=PromptTemplate(
template=QUESTION_TO_QUERY_BASE, input_variables=['tables',
'schemas', 'examples']))
human_prompt = HumanMessagePromptTemplate(prompt=PromptTemplate(
template=USER_INPUT, input_variables=['tool_input']))
return LLMChain(llm=self.llm, callback_manager=self.callback_manager if
self.callback_manager else None, prompt=ChatPromptTemplate.
from_messages([system_prompt, human_prompt]))
|
Construct the chain based on the callback manager and model type.
|
test_hf_call
|
"""Test valid call to HuggingFace Foundation Model."""
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'HF_ENDPOINT_API_KEY'), endpoint_url=os.getenv('HF_ENDPOINT_URL'),
deployment_name=os.getenv('HF_DEPLOYMENT_NAME'), content_formatter=
HFContentFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
def test_hf_call() ->None:
"""Test valid call to HuggingFace Foundation Model."""
llm = AzureMLOnlineEndpoint(endpoint_api_key=os.getenv(
'HF_ENDPOINT_API_KEY'), endpoint_url=os.getenv('HF_ENDPOINT_URL'),
deployment_name=os.getenv('HF_DEPLOYMENT_NAME'), content_formatter=
HFContentFormatter())
output = llm('Foo')
assert isinstance(output, str)
|
Test valid call to HuggingFace Foundation Model.
|
evaluate_string_pairs
|
"""Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information.
"""
self._check_evaluation_args(reference=reference, input=input)
return self._evaluate_string_pairs(prediction=prediction, prediction_b=
prediction_b, reference=reference, input=input, **kwargs)
|
def evaluate_string_pairs(self, *, prediction: str, prediction_b: str,
reference: Optional[str]=None, input: Optional[str]=None, **kwargs: Any
) ->dict:
"""Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information.
"""
self._check_evaluation_args(reference=reference, input=input)
return self._evaluate_string_pairs(prediction=prediction, prediction_b=
prediction_b, reference=reference, input=input, **kwargs)
|
Evaluate the output string pairs.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
reference (Optional[str], optional): The expected output / reference string.
input (Optional[str], optional): The input string.
**kwargs: Additional keyword arguments, such as callbacks and optional reference strings.
Returns:
dict: A dictionary containing the preference, scores, and/or other information.
|
test_extract_nested_tags
|
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'><p><span>First</span> paragraph.</p><p>Second <div>paragraph.</div></p><p><p>Third paragraph.</p></p></div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph. Third paragraph.'
|
@pytest.mark.requires('bs4')
def test_extract_nested_tags() ->None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'><p><span>First</span> paragraph.</p><p>Second <div>paragraph.</div></p><p><p>Third paragraph.</p></p></div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph. Third paragraph.'
| null |
get_format_instructions
|
return FORMAT_INSTRUCTIONS
|
def get_format_instructions(self) ->str:
return FORMAT_INSTRUCTIONS
| null |
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value=['1', '2'])])
expected = (
'and(metadata->foo.lt.2,metadata->>bar.eq.baz,and(metadata->>abc.lt.1,metadata->>abc.lt.2))'
)
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value=['1', '2'])])
expected = (
'and(metadata->foo.lt.2,metadata->>bar.eq.baz,and(metadata->>abc.lt.1,metadata->>abc.lt.2))'
)
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
on_llm_end
|
from promptlayer.utils import get_api_key, promptlayer_api_request
run_info = self.runs.get(run_id, {})
if not run_info:
return
run_info['request_end_time'] = datetime.datetime.now().timestamp()
for i in range(len(response.generations)):
generation = response.generations[i][0]
resp = {'text': generation.text, 'llm_output': response.llm_output}
model_params = run_info.get('invocation_params', {})
is_chat_model = run_info.get('messages', None) is not None
model_input = run_info.get('messages', [])[i] if is_chat_model else [
run_info.get('prompts', [])[i]]
model_response = [self._convert_message_to_dict(generation.message)
] if is_chat_model and isinstance(generation, ChatGeneration) else resp
pl_request_id = promptlayer_api_request(run_info.get('name'),
'langchain', model_input, model_params, self.pl_tags,
model_response, run_info.get('request_start_time'), run_info.get(
'request_end_time'), get_api_key(), return_pl_id=bool(self.
pl_id_callback is not None), metadata={'_langchain_run_id': str(
run_id), '_langchain_parent_run_id': str(parent_run_id),
'_langchain_tags': str(run_info.get('tags', []))})
if self.pl_id_callback:
self.pl_id_callback(pl_request_id)
|
def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id:
Optional[UUID]=None, **kwargs: Any) ->None:
from promptlayer.utils import get_api_key, promptlayer_api_request
run_info = self.runs.get(run_id, {})
if not run_info:
return
run_info['request_end_time'] = datetime.datetime.now().timestamp()
for i in range(len(response.generations)):
generation = response.generations[i][0]
resp = {'text': generation.text, 'llm_output': response.llm_output}
model_params = run_info.get('invocation_params', {})
is_chat_model = run_info.get('messages', None) is not None
model_input = run_info.get('messages', [])[i] if is_chat_model else [
run_info.get('prompts', [])[i]]
model_response = [self._convert_message_to_dict(generation.message)
] if is_chat_model and isinstance(generation, ChatGeneration
) else resp
pl_request_id = promptlayer_api_request(run_info.get('name'),
'langchain', model_input, model_params, self.pl_tags,
model_response, run_info.get('request_start_time'), run_info.
get('request_end_time'), get_api_key(), return_pl_id=bool(self.
pl_id_callback is not None), metadata={'_langchain_run_id': str
(run_id), '_langchain_parent_run_id': str(parent_run_id),
'_langchain_tags': str(run_info.get('tags', []))})
if self.pl_id_callback:
self.pl_id_callback(pl_request_id)
| null |
get_cot_prompt
|
return PromptTemplate(template_format='jinja2', input_variables=[
'problem_description', 'thoughts'], template=dedent(
"""
You are an intelligent agent that is generating one thought at a time in
a tree of thoughts setting.
PROBLEM
{{problem_description}}
{% if thoughts %}
THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
{% endif %}
Let's think step by step.
"""
).strip())
|
def get_cot_prompt() ->PromptTemplate:
return PromptTemplate(template_format='jinja2', input_variables=[
'problem_description', 'thoughts'], template=dedent(
"""
You are an intelligent agent that is generating one thought at a time in
a tree of thoughts setting.
PROBLEM
{{problem_description}}
{% if thoughts %}
THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
{% endif %}
Let's think step by step.
"""
).strip())
| null |
test_lazy_load
|
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
loader.BEAUTIFULSOUP = None
documents = list(loader.lazy_load())
assert len(documents) == 1
assert documents[0].metadata['url'] == 'https://example.com/layer_url'
|
def test_lazy_load(arcgis_mocks, mock_feature_layer, mock_gis):
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
loader.BEAUTIFULSOUP = None
documents = list(loader.lazy_load())
assert len(documents) == 1
assert documents[0].metadata['url'] == 'https://example.com/layer_url'
| null |
_results_to_docs_and_scores
|
"""Return docs and scores from results."""
docs = [(Document(page_content=result.EmbeddingStore.document, metadata=
result.EmbeddingStore.cmetadata), result.distance if self.
embedding_function is not None else None) for result in results]
return docs
|
def _results_to_docs_and_scores(self, results: Any) ->List[Tuple[Document,
float]]:
"""Return docs and scores from results."""
docs = [(Document(page_content=result.EmbeddingStore.document, metadata
=result.EmbeddingStore.cmetadata), result.distance if self.
embedding_function is not None else None) for result in results]
return docs
|
Return docs and scores from results.
|
embed_documents
|
"""Return simple embeddings."""
return [([float(1.0)] * (self.dimension - 1) + [float(i)]) for i in range(
len(embedding_texts))]
|
def embed_documents(self, embedding_texts: List[str]) ->List[List[float]]:
"""Return simple embeddings."""
return [([float(1.0)] * (self.dimension - 1) + [float(i)]) for i in
range(len(embedding_texts))]
|
Return simple embeddings.
|
_seq_input_schema
|
from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
first = steps[0]
if len(steps) == 1:
return first.get_input_schema(config)
elif isinstance(first, RunnableAssign):
next_input_schema = _seq_input_schema(steps[1:], config)
if not next_input_schema.__custom_root_type__:
return create_model('RunnableSequenceInput', **{k: (v.annotation, v
.default) for k, v in next_input_schema.__fields__.items() if k
not in first.mapper.steps}, __config__=_SchemaConfig)
elif isinstance(first, RunnablePick):
return _seq_input_schema(steps[1:], config)
return first.get_input_schema(config)
|
def _seq_input_schema(steps: List[Runnable[Any, Any]], config: Optional[
RunnableConfig]) ->Type[BaseModel]:
from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
first = steps[0]
if len(steps) == 1:
return first.get_input_schema(config)
elif isinstance(first, RunnableAssign):
next_input_schema = _seq_input_schema(steps[1:], config)
if not next_input_schema.__custom_root_type__:
return create_model('RunnableSequenceInput', **{k: (v.
annotation, v.default) for k, v in next_input_schema.
__fields__.items() if k not in first.mapper.steps},
__config__=_SchemaConfig)
elif isinstance(first, RunnablePick):
return _seq_input_schema(steps[1:], config)
return first.get_input_schema(config)
| null |
_default_params
|
params = {'temperature': self.temperature, 'max_output_tokens': self.
max_output_tokens, 'candidate_count': self.n}
if not self.is_codey_model:
params.update({'top_k': self.top_k, 'top_p': self.top_p})
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
params = {'temperature': self.temperature, 'max_output_tokens': self.
max_output_tokens, 'candidate_count': self.n}
if not self.is_codey_model:
params.update({'top_k': self.top_k, 'top_p': self.top_p})
return params
| null |
custom_openapi
|
"""Add servers configuration to the OpenAPI schema"""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(title='Android Robot API', version='1.0.0',
description=
'This is an Android Robot API with different endpoints for robot operations'
, routes=app.routes)
openapi_schema['servers'] = [{'url': f'http://localhost:{PORT}'}]
app.openapi_schema = openapi_schema
return app.openapi_schema
|
def custom_openapi() ->Dict[str, Any]:
"""Add servers configuration to the OpenAPI schema"""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(title='Android Robot API', version='1.0.0',
description=
'This is an Android Robot API with different endpoints for robot operations'
, routes=app.routes)
openapi_schema['servers'] = [{'url': f'http://localhost:{PORT}'}]
app.openapi_schema = openapi_schema
return app.openapi_schema
|
Add servers configuration to the OpenAPI schema
|
_get_child_links_recursive
|
"""Recursively get all child links starting with the path of the input URL.
Args:
url: The URL to crawl.
visited: A set of visited URLs.
depth: Current depth of recursion. Stop when depth >= max_depth.
"""
if depth >= self.max_depth:
return
visited.add(url)
try:
response = requests.get(url, timeout=self.timeout, headers=self.headers)
if self.check_response_status and 400 <= response.status_code <= 599:
raise ValueError(f'Received HTTP status {response.status_code}')
except Exception as e:
logger.warning(
f'Unable to load from {url}. Received error {e} of type {e.__class__.__name__}'
)
return
content = self.extractor(response.text)
if content:
yield Document(page_content=content, metadata=self.metadata_extractor(
response.text, url))
sub_links = extract_sub_links(response.text, url, base_url=self.url,
pattern=self.link_regex, prevent_outside=self.prevent_outside,
exclude_prefixes=self.exclude_dirs)
for link in sub_links:
if link not in visited:
yield from self._get_child_links_recursive(link, visited, depth=
depth + 1)
|
def _get_child_links_recursive(self, url: str, visited: Set[str], *, depth:
int=0) ->Iterator[Document]:
"""Recursively get all child links starting with the path of the input URL.
Args:
url: The URL to crawl.
visited: A set of visited URLs.
depth: Current depth of recursion. Stop when depth >= max_depth.
"""
if depth >= self.max_depth:
return
visited.add(url)
try:
response = requests.get(url, timeout=self.timeout, headers=self.headers
)
if self.check_response_status and 400 <= response.status_code <= 599:
raise ValueError(f'Received HTTP status {response.status_code}')
except Exception as e:
logger.warning(
f'Unable to load from {url}. Received error {e} of type {e.__class__.__name__}'
)
return
content = self.extractor(response.text)
if content:
yield Document(page_content=content, metadata=self.
metadata_extractor(response.text, url))
sub_links = extract_sub_links(response.text, url, base_url=self.url,
pattern=self.link_regex, prevent_outside=self.prevent_outside,
exclude_prefixes=self.exclude_dirs)
for link in sub_links:
if link not in visited:
yield from self._get_child_links_recursive(link, visited, depth
=depth + 1)
|
Recursively get all child links starting with the path of the input URL.
Args:
url: The URL to crawl.
visited: A set of visited URLs.
depth: Current depth of recursion. Stop when depth >= max_depth.
|
_get_retriever_tags
|
"""Get tags for retriever."""
tags = [self.__class__.__name__]
if self.embeddings:
tags.append(self.embeddings.__class__.__name__)
return tags
|
def _get_retriever_tags(self) ->List[str]:
"""Get tags for retriever."""
tags = [self.__class__.__name__]
if self.embeddings:
tags.append(self.embeddings.__class__.__name__)
return tags
|
Get tags for retriever.
|
run
|
"""Run query through Google Finance with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_finance', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = {}
client = self.serp_search_engine(params)
total_results = client.get_dict()
if not total_results:
return 'Nothing was found from the query: ' + query
markets = total_results.get('markets', {})
res = '\nQuery: ' + query + '\n'
if 'futures_chain' in total_results:
futures_chain = total_results.get('futures_chain', [])[0]
stock = futures_chain['stock']
price = futures_chain['price']
temp = futures_chain['price_movement']
percentage = temp['percentage']
movement = temp['movement']
res += f'stock: {stock}\n' + f'price: {price}\n' + f'percentage: {percentage}\n' + f"""movement: {movement}
"""
else:
res += 'No summary information\n'
for key in markets:
if key == 'us' or key == 'asia' or key == 'europe':
res += key
res += ': price = '
res += str(markets[key][0]['price'])
res += ', movement = '
res += markets[key][0]['price_movement']['movement']
res += '\n'
return res
|
def run(self, query: str) ->str:
"""Run query through Google Finance with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_finance', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = {}
client = self.serp_search_engine(params)
total_results = client.get_dict()
if not total_results:
return 'Nothing was found from the query: ' + query
markets = total_results.get('markets', {})
res = '\nQuery: ' + query + '\n'
if 'futures_chain' in total_results:
futures_chain = total_results.get('futures_chain', [])[0]
stock = futures_chain['stock']
price = futures_chain['price']
temp = futures_chain['price_movement']
percentage = temp['percentage']
movement = temp['movement']
res += f'stock: {stock}\n' + f'price: {price}\n' + f'percentage: {percentage}\n' + f"""movement: {movement}
"""
else:
res += 'No summary information\n'
for key in markets:
if key == 'us' or key == 'asia' or key == 'europe':
res += key
res += ': price = '
res += str(markets[key][0]['price'])
res += ', movement = '
res += markets[key][0]['price_movement']['movement']
res += '\n'
return res
|
Run query through Google Finance with Serpapi
|
_create_function_message
|
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(name=agent_action.tool, content=content)
|
def _create_function_message(agent_action: AgentAction, observation: str
) ->FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(name=agent_action.tool, content=content)
|
Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
|
_get_google_search_results_json
|
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
|
def _get_google_search_results_json(**kwargs: Any) ->BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
| null |
simplify_code
|
tree = ast.parse(self.code)
simplified_lines = self.source_lines[:]
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
start = node.lineno - 1
simplified_lines[start] = f'# Code for: {simplified_lines[start]}'
assert isinstance(node.end_lineno, int)
for line_num in range(start + 1, node.end_lineno):
simplified_lines[line_num] = None
return '\n'.join(line for line in simplified_lines if line is not None)
|
def simplify_code(self) ->str:
tree = ast.parse(self.code)
simplified_lines = self.source_lines[:]
for node in ast.iter_child_nodes(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.
ClassDef)):
start = node.lineno - 1
simplified_lines[start] = f'# Code for: {simplified_lines[start]}'
assert isinstance(node.end_lineno, int)
for line_num in range(start + 1, node.end_lineno):
simplified_lines[line_num] = None
return '\n'.join(line for line in simplified_lines if line is not None)
| null |
__init__
|
"""Initialize with Typesense client."""
try:
from typesense import Client
except ImportError:
raise ImportError(
'Could not import typesense python package. Please install it with `pip install typesense`.'
)
if not isinstance(typesense_client, Client):
raise ValueError(
f'typesense_client should be an instance of typesense.Client, got {type(typesense_client)}'
)
self._typesense_client = typesense_client
self._embedding = embedding
self._typesense_collection_name = (typesense_collection_name or
f'langchain-{str(uuid.uuid4())}')
self._text_key = text_key
|
def __init__(self, typesense_client: Client, embedding: Embeddings, *,
typesense_collection_name: Optional[str]=None, text_key: str='text'):
"""Initialize with Typesense client."""
try:
from typesense import Client
except ImportError:
raise ImportError(
'Could not import typesense python package. Please install it with `pip install typesense`.'
)
if not isinstance(typesense_client, Client):
raise ValueError(
f'typesense_client should be an instance of typesense.Client, got {type(typesense_client)}'
)
self._typesense_client = typesense_client
self._embedding = embedding
self._typesense_collection_name = (typesense_collection_name or
f'langchain-{str(uuid.uuid4())}')
self._text_key = text_key
|
Initialize with Typesense client.
|
_embedding_vector_column_name
|
"""Return the name of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get('name')
|
def _embedding_vector_column_name(self) ->Optional[str]:
"""Return the name of the embedding vector column.
None if the index is not a self-managed embedding index.
"""
return self._embedding_vector_column().get('name')
|
Return the name of the embedding vector column.
None if the index is not a self-managed embedding index.
|
test_different_solution_expr_code_validation
|
"""Test the validator."""
with pytest.raises(ValueError):
PALChain.validate_code(_SAMPLE_CODE_2, _FULL_CODE_VALIDATIONS)
|
def test_different_solution_expr_code_validation() ->None:
"""Test the validator."""
with pytest.raises(ValueError):
PALChain.validate_code(_SAMPLE_CODE_2, _FULL_CODE_VALIDATIONS)
|
Test the validator.
|
_llm_type
|
"""Return type of llm."""
return 'oci_model_deployment_tgi_endpoint'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'oci_model_deployment_tgi_endpoint'
|
Return type of llm.
|
test_update
|
"""Test updating records in the database."""
read_keys = manager.list_keys()
assert read_keys == []
keys = ['key1', 'key2', 'key3']
manager.update(keys)
read_keys = manager.list_keys()
assert read_keys == ['key1', 'key2', 'key3']
|
def test_update(manager: SQLRecordManager) ->None:
"""Test updating records in the database."""
read_keys = manager.list_keys()
assert read_keys == []
keys = ['key1', 'key2', 'key3']
manager.update(keys)
read_keys = manager.list_keys()
assert read_keys == ['key1', 'key2', 'key3']
|
Test updating records in the database.
|
setup_class
|
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
|
@classmethod
def setup_class(cls) ->None:
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'output']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'output']
|
Get the namespace of the langchain object.
|
test_saving_loading_llm
|
"""Test saving/loading a Google PaLM LLM."""
llm = GooglePalm(max_output_tokens=10)
llm.save(file_path=tmp_path / 'google_palm.yaml')
loaded_llm = load_llm(tmp_path / 'google_palm.yaml')
assert loaded_llm == llm
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading a Google PaLM LLM."""
llm = GooglePalm(max_output_tokens=10)
llm.save(file_path=tmp_path / 'google_palm.yaml')
loaded_llm = load_llm(tmp_path / 'google_palm.yaml')
assert loaded_llm == llm
|
Test saving/loading a Google PaLM LLM.
|
test_layer_description_provided_by_user
|
custom_description = 'Custom Layer Description'
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc=
custom_description)
layer_properties = loader._get_layer_properties(lyr_desc=custom_description)
assert layer_properties['layer_description'] == custom_description
|
def test_layer_description_provided_by_user(arcgis_mocks,
mock_feature_layer, mock_gis):
custom_description = 'Custom Layer Description'
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc=
custom_description)
layer_properties = loader._get_layer_properties(lyr_desc=custom_description
)
assert layer_properties['layer_description'] == custom_description
| null |
prep_outputs
|
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
|
def prep_outputs(self, inputs: Dict[str, str], outputs: Dict[str, str],
return_only_outputs: bool=False) ->Dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
|
Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
|
embed_documents
|
"""Embed a list of texts.
The method first checks the cache for the embeddings.
If the embeddings are not found, the method uses the underlying embedder
to embed the documents and stores the results in the cache.
Args:
texts: A list of texts to embed.
Returns:
A list of embeddings for the given texts.
"""
vectors: List[Union[List[float], None]] = self.document_embedding_store.mget(
texts)
missing_indices: List[int] = [i for i, vector in enumerate(vectors) if
vector is None]
missing_texts = [texts[i] for i in missing_indices]
if missing_texts:
missing_vectors = self.underlying_embeddings.embed_documents(missing_texts)
self.document_embedding_store.mset(list(zip(missing_texts,
missing_vectors)))
for index, updated_vector in zip(missing_indices, missing_vectors):
vectors[index] = updated_vector
return cast(List[List[float]], vectors)
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of texts.
The method first checks the cache for the embeddings.
If the embeddings are not found, the method uses the underlying embedder
to embed the documents and stores the results in the cache.
Args:
texts: A list of texts to embed.
Returns:
A list of embeddings for the given texts.
"""
vectors: List[Union[List[float], None]
] = self.document_embedding_store.mget(texts)
missing_indices: List[int] = [i for i, vector in enumerate(vectors) if
vector is None]
missing_texts = [texts[i] for i in missing_indices]
if missing_texts:
missing_vectors = self.underlying_embeddings.embed_documents(
missing_texts)
self.document_embedding_store.mset(list(zip(missing_texts,
missing_vectors)))
for index, updated_vector in zip(missing_indices, missing_vectors):
vectors[index] = updated_vector
return cast(List[List[float]], vectors)
|
Embed a list of texts.
The method first checks the cache for the embeddings.
If the embeddings are not found, the method uses the underlying embedder
to embed the documents and stores the results in the cache.
Args:
texts: A list of texts to embed.
Returns:
A list of embeddings for the given texts.
|
test_language_loader_for_javascript
|
"""Test JavaScript loader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = GenericLoader.from_filesystem(file_path, glob='hello_world.js',
parser=LanguageParser(parser_threshold=5))
docs = loader.load()
assert len(docs) == 3
metadata = docs[0].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'functions_classes'
assert metadata['language'] == 'js'
metadata = docs[1].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'functions_classes'
assert metadata['language'] == 'js'
metadata = docs[2].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'simplified_code'
assert metadata['language'] == 'js'
assert docs[0].page_content == """class HelloWorld {
sayHello() {
console.log("Hello World!");
}
}"""
assert docs[1].page_content == """function main() {
const hello = new HelloWorld();
hello.sayHello();
}"""
assert docs[2].page_content == """// Code for: class HelloWorld {
// Code for: function main() {
main();"""
|
@pytest.mark.skipif(not esprima_installed(), reason='requires esprima package')
def test_language_loader_for_javascript() ->None:
"""Test JavaScript loader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = GenericLoader.from_filesystem(file_path, glob='hello_world.js',
parser=LanguageParser(parser_threshold=5))
docs = loader.load()
assert len(docs) == 3
metadata = docs[0].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'functions_classes'
assert metadata['language'] == 'js'
metadata = docs[1].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'functions_classes'
assert metadata['language'] == 'js'
metadata = docs[2].metadata
assert metadata['source'] == str(file_path / 'hello_world.js')
assert metadata['content_type'] == 'simplified_code'
assert metadata['language'] == 'js'
assert docs[0].page_content == """class HelloWorld {
sayHello() {
console.log("Hello World!");
}
}"""
assert docs[1].page_content == """function main() {
const hello = new HelloWorld();
hello.sayHello();
}"""
assert docs[2].page_content == """// Code for: class HelloWorld {
// Code for: function main() {
main();"""
|
Test JavaScript loader with parser enabled.
|
test_invalid_arguments_to_delete
|
with pytest.raises(ValueError) as exception_info:
self.invoke_delete_with_no_args(azure_openai_embeddings, collection)
assert str(exception_info.value) == 'No document ids provided to delete.'
|
def test_invalid_arguments_to_delete(self, azure_openai_embeddings:
OpenAIEmbeddings, collection: Any) ->None:
with pytest.raises(ValueError) as exception_info:
self.invoke_delete_with_no_args(azure_openai_embeddings, collection)
assert str(exception_info.value) == 'No document ids provided to delete.'
| null |
clear
|
"""Clear session memory from PostgreSQL"""
query = f'DELETE FROM {self.table_name} WHERE session_id = %s;'
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
|
def clear(self) ->None:
"""Clear session memory from PostgreSQL"""
query = f'DELETE FROM {self.table_name} WHERE session_id = %s;'
self.cursor.execute(query, (self.session_id,))
self.connection.commit()
|
Clear session memory from PostgreSQL
|
add_texts
|
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (List[str]) or ids (List[str]): Identifiers of entries.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
"""
ids = []
keys_or_ids = kwargs.get('keys', kwargs.get('ids'))
if metadatas:
if isinstance(metadatas, list) and len(metadatas) != len(texts):
raise ValueError('Number of metadatas must match number of texts')
if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)):
raise ValueError('Metadatas must be a list of dicts')
embeddings = embeddings or self._embeddings.embed_documents(list(texts))
self._create_index_if_not_exist(dim=len(embeddings[0]))
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
key = keys_or_ids[i] if keys_or_ids else str(uuid.uuid4().hex)
if not key.startswith(self.key_prefix + ':'):
key = self.key_prefix + ':' + key
metadata = metadatas[i] if metadatas else {}
metadata = _prepare_metadata(metadata) if clean_metadata else metadata
pipeline.hset(key, mapping={self._schema.content_key: text, self.
_schema.content_vector_key: _array_to_buffer(embeddings[i], self.
_schema.vector_dtype), **metadata})
ids.append(key)
if i % batch_size == 0:
pipeline.execute()
pipeline.execute()
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, embeddings: Optional[List[List[float]]]=None, batch_size: int=
1000, clean_metadata: bool=True, **kwargs: Any) ->List[str]:
"""Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (List[str]) or ids (List[str]): Identifiers of entries.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
"""
ids = []
keys_or_ids = kwargs.get('keys', kwargs.get('ids'))
if metadatas:
if isinstance(metadatas, list) and len(metadatas) != len(texts):
raise ValueError('Number of metadatas must match number of texts')
if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)
):
raise ValueError('Metadatas must be a list of dicts')
embeddings = embeddings or self._embeddings.embed_documents(list(texts))
self._create_index_if_not_exist(dim=len(embeddings[0]))
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
key = keys_or_ids[i] if keys_or_ids else str(uuid.uuid4().hex)
if not key.startswith(self.key_prefix + ':'):
key = self.key_prefix + ':' + key
metadata = metadatas[i] if metadatas else {}
metadata = _prepare_metadata(metadata) if clean_metadata else metadata
pipeline.hset(key, mapping={self._schema.content_key: text, self.
_schema.content_vector_key: _array_to_buffer(embeddings[i],
self._schema.vector_dtype), **metadata})
ids.append(key)
if i % batch_size == 0:
pipeline.execute()
pipeline.execute()
return ids
|
Add more texts to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
Defaults to None.
embeddings (Optional[List[List[float]]], optional): Optional pre-generated
embeddings. Defaults to None.
keys (List[str]) or ids (List[str]): Identifiers of entries.
Defaults to None.
batch_size (int, optional): Batch size to use for writes. Defaults to 1000.
Returns:
List[str]: List of ids added to the vectorstore
|
_create_credentials_from_file
|
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path)
return credentials
|
@classmethod
def _create_credentials_from_file(cls, json_credentials_path: Optional[str]
) ->Optional[Credentials]:
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path)
return credentials
|
Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
|
_generate
|
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if 'error' in response:
raise ValueError(f'Error from Hunyuan api response: {response}')
return _create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop,
run_manager=run_manager, **kwargs)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if 'error' in response:
raise ValueError(f'Error from Hunyuan api response: {response}')
return _create_chat_result(response)
| null |
_generate
|
"""Generate a chat response."""
prompt = []
for message in messages:
if isinstance(message, AIMessage):
role = 'assistant'
else:
role = 'user'
prompt.append({'role': role, 'content': message.content})
should_stream = stream if stream is not None else self.streaming
if not should_stream:
response = self.invoke(prompt)
if response['code'] != 200:
raise RuntimeError(response)
content = response['data']['choices'][0]['content']
return ChatResult(generations=[ChatGeneration(message=AIMessage(content
=content))])
else:
stream_iter = self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
"""Generate a chat response."""
prompt = []
for message in messages:
if isinstance(message, AIMessage):
role = 'assistant'
else:
role = 'user'
prompt.append({'role': role, 'content': message.content})
should_stream = stream if stream is not None else self.streaming
if not should_stream:
response = self.invoke(prompt)
if response['code'] != 200:
raise RuntimeError(response)
content = response['data']['choices'][0]['content']
return ChatResult(generations=[ChatGeneration(message=AIMessage(
content=content))])
else:
stream_iter = self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
|
Generate a chat response.
|
load
|
"""Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
"""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
"""
return list(self.lazy_load())
|
Load bibtex file documents from the given bibtex file path.
See https://bibtexparser.readthedocs.io/en/master/
Args:
file_path: the path to the bibtex file
Returns:
a list of documents with the document.page_content in text format
|
_type
|
return 'datetime'
|
@property
def _type(self) ->str:
return 'datetime'
| null |
test_shell_tool_run
|
placeholder = PlaceholderProcess(output='hello')
shell_tool = ShellTool(process=placeholder)
result = shell_tool._run(commands=test_commands)
assert result.strip() == 'hello'
|
def test_shell_tool_run() ->None:
placeholder = PlaceholderProcess(output='hello')
shell_tool = ShellTool(process=placeholder)
result = shell_tool._run(commands=test_commands)
assert result.strip() == 'hello'
| null |
test_loading_jinja_from_JSON
|
"""Test that loading jinja2 format prompts from JSON raises ValueError."""
prompt_path = EXAMPLE_DIR / 'jinja_injection_prompt.json'
with pytest.raises(ValueError, match='.*can lead to arbitrary code execution.*'
):
load_prompt(prompt_path)
|
def test_loading_jinja_from_JSON() ->None:
"""Test that loading jinja2 format prompts from JSON raises ValueError."""
prompt_path = EXAMPLE_DIR / 'jinja_injection_prompt.json'
with pytest.raises(ValueError, match=
'.*can lead to arbitrary code execution.*'):
load_prompt(prompt_path)
|
Test that loading jinja2 format prompts from JSON raises ValueError.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
create_tables_if_not_exists
|
with Session(self._bind) as session, session.begin():
Base.metadata.create_all(session.get_bind())
|
def create_tables_if_not_exists(self) ->None:
with Session(self._bind) as session, session.begin():
Base.metadata.create_all(session.get_bind())
| null |
assert_results_exists
|
if len(results) > 0:
for result in results:
assert 'post_title' in result
assert 'post_author' in result
assert 'post_subreddit' in result
assert 'post_text' in result
assert 'post_url' in result
assert 'post_score' in result
assert 'post_category' in result
assert 'post_id' in result
else:
assert results == []
|
def assert_results_exists(results: list) ->None:
if len(results) > 0:
for result in results:
assert 'post_title' in result
assert 'post_author' in result
assert 'post_subreddit' in result
assert 'post_text' in result
assert 'post_url' in result
assert 'post_score' in result
assert 'post_category' in result
assert 'post_id' in result
else:
assert results == []
| null |
__init__
|
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
'Could not import upstash_redis python package. Please install it with `pip install upstash_redis`.'
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error('Upstash Redis instance could not be initiated.')
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
|
def __init__(self, session_id: str='default', url: str='', token: str='',
key_prefix: str='memory_store', ttl: Optional[int]=60 * 60 * 24,
recall_ttl: Optional[int]=60 * 60 * 24 * 3, *args: Any, **kwargs: Any):
try:
from upstash_redis import Redis
except ImportError:
raise ImportError(
'Could not import upstash_redis python package. Please install it with `pip install upstash_redis`.'
)
super().__init__(*args, **kwargs)
try:
self.redis_client = Redis(url=url, token=token)
except Exception:
logger.error('Upstash Redis instance could not be initiated.')
self.session_id = session_id
self.key_prefix = key_prefix
self.ttl = ttl
self.recall_ttl = recall_ttl or ttl
| null |
_completion_with_retry
|
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
|
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(prompt: str) ->Any:
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
| null |
similarity_search_with_score_id_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
metadata_parameter = self._filter_to_metadata(filter)
hits = list(self.collection.paginated_find(filter=metadata_parameter, sort=
{'$vector': embedding}, options={'limit': k, 'includeSimilarity': True},
projection={'_id': 1, 'content': 1, 'metadata': 1}))
return [(Document(page_content=hit['content'], metadata=hit['metadata']),
hit['$similarity'], hit['_id']) for hit in hits]
|
def similarity_search_with_score_id_by_vector(self, embedding: List[float],
k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document,
float, str]]:
"""Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
"""
metadata_parameter = self._filter_to_metadata(filter)
hits = list(self.collection.paginated_find(filter=metadata_parameter,
sort={'$vector': embedding}, options={'limit': k,
'includeSimilarity': True}, projection={'_id': 1, 'content': 1,
'metadata': 1}))
return [(Document(page_content=hit['content'], metadata=hit['metadata']
), hit['$similarity'], hit['_id']) for hit in hits]
|
Return docs most similar to embedding vector.
Args:
embedding (str): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
Returns:
List of (Document, score, id), the most similar to the query vector.
|
exists
|
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
|
def exists(self, key: str) ->bool:
query = f"""
SELECT 1
FROM {self.full_table_name}
WHERE key = ?
LIMIT 1
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
return result is not None
| null |
_default_params
|
"""Get the default parameters for calling Qianfan API."""
normal_params = {'model': self.model, 'endpoint': self.endpoint, 'stream':
self.streaming, 'request_timeout': self.request_timeout, 'top_p': self.
top_p, 'temperature': self.temperature, 'penalty_score': self.penalty_score
}
return {**normal_params, **self.model_kwargs}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {'model': self.model, 'endpoint': self.endpoint,
'stream': self.streaming, 'request_timeout': self.request_timeout,
'top_p': self.top_p, 'temperature': self.temperature,
'penalty_score': self.penalty_score}
return {**normal_params, **self.model_kwargs}
|
Get the default parameters for calling Qianfan API.
|
test_max_marginal_relevance_search
|
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search('foo', k=1, fetch_k=2
)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
embeddings = FakeEmbeddings().embed_documents(['foo', 'bar', 'baz'])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(embeddings
[0], k=1, fetch_k=2)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
deeplake_datastore.delete_dataset()
|
def test_max_marginal_relevance_search(deeplake_datastore: DeepLake) ->None:
"""Test max marginal relevance search by vector."""
output = deeplake_datastore.max_marginal_relevance_search('foo', k=1,
fetch_k=2)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
embeddings = FakeEmbeddings().embed_documents(['foo', 'bar', 'baz'])
output = deeplake_datastore.max_marginal_relevance_search_by_vector(
embeddings[0], k=1, fetch_k=2)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
deeplake_datastore.delete_dataset()
|
Test max marginal relevance search by vector.
|
similarity_search
|
"""
Perform a similarity search on the query string.
Args:
query (str): The text to search for.
k (int, optional): The number of results to return. Default is 4.
param (dict, optional): Specifies the search parameters for the index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): Time to wait before a timeout error.
Defaults to None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Document]: The document results of the search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
res = self.similarity_search_with_score(query=query, k=k, param=param, expr
=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
def similarity_search(self, query: str, k: int=4, param: Optional[dict]=
None, expr: Optional[str]=None, timeout: Optional[int]=None, **kwargs: Any
) ->List[Document]:
"""
Perform a similarity search on the query string.
Args:
query (str): The text to search for.
k (int, optional): The number of results to return. Default is 4.
param (dict, optional): Specifies the search parameters for the index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): Time to wait before a timeout error.
Defaults to None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Document]: The document results of the search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
res = self.similarity_search_with_score(query=query, k=k, param=param,
expr=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
Perform a similarity search on the query string.
Args:
query (str): The text to search for.
k (int, optional): The number of results to return. Default is 4.
param (dict, optional): Specifies the search parameters for the index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): Time to wait before a timeout error.
Defaults to None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Document]: The document results of the search.
|
setUp
|
self.space = 'test_space'
self.username = 'test_user'
self.password = 'test_password'
self.address = 'test_address'
self.port = 1234
self.session_pool_size = 10
|
def setUp(self) ->None:
self.space = 'test_space'
self.username = 'test_user'
self.password = 'test_password'
self.address = 'test_address'
self.port = 1234
self.session_pool_size = 10
| null |
issue_create
|
try:
import json
except ImportError:
raise ImportError(
'json is not installed. Please install it with `pip install json`')
params = json.loads(query)
return self.jira.issue_create(fields=dict(params))
|
def issue_create(self, query: str) ->str:
try:
import json
except ImportError:
raise ImportError(
'json is not installed. Please install it with `pip install json`')
params = json.loads(query)
return self.jira.issue_create(fields=dict(params))
| null |
similarity_search
|
"""Search the marqo index for the most similar documents.
Args:
query (Union[str, Dict[str, float]]): The query for the search, either
as a string or a weighted query.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Document]: k documents ordered from best to worst match.
"""
results = self.marqo_similarity_search(query=query, k=k)
documents = self._construct_documents_from_results_without_score(results)
return documents
|
def similarity_search(self, query: Union[str, Dict[str, float]], k: int=4,
**kwargs: Any) ->List[Document]:
"""Search the marqo index for the most similar documents.
Args:
query (Union[str, Dict[str, float]]): The query for the search, either
as a string or a weighted query.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Document]: k documents ordered from best to worst match.
"""
results = self.marqo_similarity_search(query=query, k=k)
documents = self._construct_documents_from_results_without_score(results)
return documents
|
Search the marqo index for the most similar documents.
Args:
query (Union[str, Dict[str, float]]): The query for the search, either
as a string or a weighted query.
k (int, optional): The number of documents to return. Defaults to 4.
Returns:
List[Document]: k documents ordered from best to worst match.
|
__init__
|
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
|
def __init__(self, auth_handler: Union[OAuthHandler, OAuth2BearerHandler],
twitter_users: Sequence[str], number_tweets: Optional[int]=100):
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
| null |
test_load_returns_no_result
|
"""Test that returns no docs"""
docs = api_client.load('1605.08386WWW')
assert len(docs) == 0
|
def test_load_returns_no_result(api_client: ArxivAPIWrapper) ->None:
"""Test that returns no docs"""
docs = api_client.load('1605.08386WWW')
assert len(docs) == 0
|
Test that returns no docs
|
detect_labels
|
"""
Args:
str_node: node in string format
node_variable_dict: dictionary of node variables
"""
splitted_node = str_node.split(':')
variable = splitted_node[0]
labels = []
if variable in node_variable_dict:
labels = node_variable_dict[variable]
elif variable == '' and len(splitted_node) > 1:
labels = splitted_node[1:]
return labels
|
def detect_labels(self, str_node: str, node_variable_dict: Dict[str, Any]
) ->List[str]:
"""
Args:
str_node: node in string format
node_variable_dict: dictionary of node variables
"""
splitted_node = str_node.split(':')
variable = splitted_node[0]
labels = []
if variable in node_variable_dict:
labels = node_variable_dict[variable]
elif variable == '' and len(splitted_node) > 1:
labels = splitted_node[1:]
return labels
|
Args:
str_node: node in string format
node_variable_dict: dictionary of node variables
|
_send_to_infino
|
"""Send the key-value to Infino.
Parameters:
key (str): the key to send to Infino.
value (Any): the value to send to Infino.
is_ts (bool): if True, the value is part of a time series, else it
is sent as a log message.
"""
payload = {'date': int(time.time()), key: value, 'labels': {'model_id':
self.model_id, 'model_version': self.model_version}}
if self.verbose:
print(f'Tracking {key} with Infino: {payload}')
if is_ts:
self.client.append_ts(payload)
else:
self.client.append_log(payload)
|
def _send_to_infino(self, key: str, value: Any, is_ts: bool=True) ->None:
"""Send the key-value to Infino.
Parameters:
key (str): the key to send to Infino.
value (Any): the value to send to Infino.
is_ts (bool): if True, the value is part of a time series, else it
is sent as a log message.
"""
payload = {'date': int(time.time()), key: value, 'labels': {'model_id':
self.model_id, 'model_version': self.model_version}}
if self.verbose:
print(f'Tracking {key} with Infino: {payload}')
if is_ts:
self.client.append_ts(payload)
else:
self.client.append_log(payload)
|
Send the key-value to Infino.
Parameters:
key (str): the key to send to Infino.
value (Any): the value to send to Infino.
is_ts (bool): if True, the value is part of a time series, else it
is sent as a log message.
|
get_graph
|
if (deps := self.deps):
graph = Graph()
input_node = graph.add_node(self.get_input_schema(config))
output_node = graph.add_node(self.get_output_schema(config))
for dep in deps:
dep_graph = dep.get_graph()
dep_graph.trim_first_node()
dep_graph.trim_last_node()
if not dep_graph:
graph.add_edge(input_node, output_node)
else:
graph.extend(dep_graph)
dep_first_node = dep_graph.first_node()
if not dep_first_node:
raise ValueError(f'Runnable {dep} has no first node')
dep_last_node = dep_graph.last_node()
if not dep_last_node:
raise ValueError(f'Runnable {dep} has no last node')
graph.add_edge(input_node, dep_first_node)
graph.add_edge(dep_last_node, output_node)
else:
graph = super().get_graph(config)
return graph
|
def get_graph(self, config: (RunnableConfig | None)=None) ->Graph:
if (deps := self.deps):
graph = Graph()
input_node = graph.add_node(self.get_input_schema(config))
output_node = graph.add_node(self.get_output_schema(config))
for dep in deps:
dep_graph = dep.get_graph()
dep_graph.trim_first_node()
dep_graph.trim_last_node()
if not dep_graph:
graph.add_edge(input_node, output_node)
else:
graph.extend(dep_graph)
dep_first_node = dep_graph.first_node()
if not dep_first_node:
raise ValueError(f'Runnable {dep} has no first node')
dep_last_node = dep_graph.last_node()
if not dep_last_node:
raise ValueError(f'Runnable {dep} has no last node')
graph.add_edge(input_node, dep_first_node)
graph.add_edge(dep_last_node, output_node)
else:
graph = super().get_graph(config)
return graph
| null |
_get_driver
|
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == 'chrome':
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(options=chrome_options, service=Service(executable_path=
self.executable_path))
elif self.browser.lower() == 'firefox':
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument('--headless')
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(options=firefox_options, service=Service(executable_path
=self.executable_path))
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
|
def _get_driver(self) ->Union['Chrome', 'Firefox']:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == 'chrome':
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(options=chrome_options, service=Service(
executable_path=self.executable_path))
elif self.browser.lower() == 'firefox':
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument('--headless')
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(options=firefox_options, service=Service(
executable_path=self.executable_path))
else:
raise ValueError(
"Invalid browser specified. Use 'chrome' or 'firefox'.")
|
Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
|
_prompt_type
|
"""Return the prompt type key."""
return 'few_shot'
|
@property
def _prompt_type(self) ->str:
"""Return the prompt type key."""
return 'few_shot'
|
Return the prompt type key.
|
similarity_search
|
"""
Run a similarity search with BagelDB.
Args:
query (str): The query text to search for similar documents/texts.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Metadata filters to narrow down.
Returns:
List[Document]: List of documents objects representing
the documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, where=where)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=DEFAULT_K, where: Optional[
Dict[str, str]]=None, **kwargs: Any) ->List[Document]:
"""
Run a similarity search with BagelDB.
Args:
query (str): The query text to search for similar documents/texts.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Metadata filters to narrow down.
Returns:
List[Document]: List of documents objects representing
the documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, where=where)
return [doc for doc, _ in docs_and_scores]
|
Run a similarity search with BagelDB.
Args:
query (str): The query text to search for similar documents/texts.
k (int): The number of results to return.
where (Optional[Dict[str, str]]): Metadata filters to narrow down.
Returns:
List[Document]: List of documents objects representing
the documents most similar to the query text.
|
_persist_run
|
"""Persist a run."""
|
@abstractmethod
def _persist_run(self, run: Run) ->None:
"""Persist a run."""
|
Persist a run.
|
test_resolve_criteria_list_enum
|
val = resolve_pairwise_criteria(list(Criteria))
assert isinstance(val, dict)
assert set(val.keys()) == set(c.value for c in list(Criteria))
|
def test_resolve_criteria_list_enum() ->None:
val = resolve_pairwise_criteria(list(Criteria))
assert isinstance(val, dict)
assert set(val.keys()) == set(c.value for c in list(Criteria))
| null |
_split_by_punctuation
|
"""Splits a string by punctuation and whitespace characters."""
split_by = string.punctuation + '\t\n '
pattern = f'([{split_by}])'
return [segment for segment in re.split(pattern, text) if segment]
|
@staticmethod
def _split_by_punctuation(text: str) ->List[str]:
"""Splits a string by punctuation and whitespace characters."""
split_by = string.punctuation + '\t\n '
pattern = f'([{split_by}])'
return [segment for segment in re.split(pattern, text) if segment]
|
Splits a string by punctuation and whitespace characters.
|
plus_one
|
for i in input:
yield i + 1
|
def plus_one(input: Iterator[int]) ->Iterator[int]:
for i in input:
yield i + 1
| null |
__init__
|
self.name = name
|
def __init__(self, name):
self.name = name
| null |
__call__
|
next_thought = memory.top()
parent_thought = memory.top_parent()
validity = (ThoughtValidity.VALID_INTERMEDIATE if next_thought is None else
next_thought.validity)
if validity == ThoughtValidity.INVALID:
memory.pop()
next_thought = memory.top()
if next_thought and len(next_thought.children) >= self.c:
memory.pop()
elif validity == ThoughtValidity.VALID_INTERMEDIATE and parent_thought and len(
parent_thought.children) >= self.c:
memory.pop(2)
return tuple(thought.text for thought in memory.current_path())
|
def __call__(self, memory: ToTDFSMemory) ->Tuple[str, ...]:
next_thought = memory.top()
parent_thought = memory.top_parent()
validity = (ThoughtValidity.VALID_INTERMEDIATE if next_thought is None else
next_thought.validity)
if validity == ThoughtValidity.INVALID:
memory.pop()
next_thought = memory.top()
if next_thought and len(next_thought.children) >= self.c:
memory.pop()
elif validity == ThoughtValidity.VALID_INTERMEDIATE and parent_thought and len(
parent_thought.children) >= self.c:
memory.pop(2)
return tuple(thought.text for thought in memory.current_path())
| null |
stream
|
runnable, config = self._prepare(config)
return runnable.stream(input, config, **kwargs)
|
def stream(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Optional[Any]) ->Iterator[Output]:
runnable, config = self._prepare(config)
return runnable.stream(input, config, **kwargs)
| null |
_convert_to_parts
|
"""Converts a list of LangChain messages into a google parts."""
parts = []
content = [raw_content] if isinstance(raw_content, str) else raw_content
for part in content:
if isinstance(part, str):
parts.append(genai.types.PartDict(text=part))
elif isinstance(part, Mapping):
if _is_openai_parts_format(part):
if part['type'] == 'text':
parts.append({'text': part['text']})
elif part['type'] == 'image_url':
img_url = part['image_url']
if isinstance(img_url, dict):
if 'url' not in img_url:
raise ValueError(
f'Unrecognized message image format: {img_url}')
img_url = img_url['url']
parts.append({'inline_data': _url_to_pil(img_url)})
else:
raise ValueError(
f"Unrecognized message part type: {part['type']}")
else:
logger.warning(
"Unrecognized message part format. Assuming it's a text part.")
parts.append(part)
else:
raise ChatGoogleGenerativeAIError(
'Gemini only supports text and inline_data parts.')
return parts
|
def _convert_to_parts(raw_content: Union[str, Sequence[Union[str, dict]]]
) ->List[genai.types.PartType]:
"""Converts a list of LangChain messages into a google parts."""
parts = []
content = [raw_content] if isinstance(raw_content, str) else raw_content
for part in content:
if isinstance(part, str):
parts.append(genai.types.PartDict(text=part))
elif isinstance(part, Mapping):
if _is_openai_parts_format(part):
if part['type'] == 'text':
parts.append({'text': part['text']})
elif part['type'] == 'image_url':
img_url = part['image_url']
if isinstance(img_url, dict):
if 'url' not in img_url:
raise ValueError(
f'Unrecognized message image format: {img_url}'
)
img_url = img_url['url']
parts.append({'inline_data': _url_to_pil(img_url)})
else:
raise ValueError(
f"Unrecognized message part type: {part['type']}")
else:
logger.warning(
"Unrecognized message part format. Assuming it's a text part."
)
parts.append(part)
else:
raise ChatGoogleGenerativeAIError(
'Gemini only supports text and inline_data parts.')
return parts
|
Converts a list of LangChain messages into a google parts.
|
test_non_faker_values
|
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
'My name is John Smith. Your name is Adam Smith. Her name is Jane Smith.Our names are: John Smith, Adam Smith, Jane Smith.'
)
expected_result = (
'My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>.Our names are: <PERSON>, <PERSON_2>, <PERSON_3>.'
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_non_faker_values() ->None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
'My name is John Smith. Your name is Adam Smith. Her name is Jane Smith.Our names are: John Smith, Adam Smith, Jane Smith.'
)
expected_result = (
'My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>.Our names are: <PERSON>, <PERSON_2>, <PERSON_3>.'
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False
)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
|
Test anonymizing multiple items in a sentence without faker values
|
test_faiss
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.requires('faiss')
def test_faiss() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
similarity_search
|
"""Return docs most similar to query."""
docs_with_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_with_scores]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[
TigrisFilter]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query."""
docs_with_scores = self.similarity_search_with_score(query, k, filter)
return [doc for doc, _ in docs_with_scores]
|
Return docs most similar to query.
|
_sanitize_input
|
return re.sub('[^a-zA-Z0-9_]', '', input_str)
|
def _sanitize_input(self, input_str: str) ->str:
return re.sub('[^a-zA-Z0-9_]', '', input_str)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.