id stringlengths 6 6 | text stringlengths 20 17.2k | title stringclasses 1 value |
|---|---|---|
161128 | """Milvus vector store index.
An index that is built within Milvus.
"""
import logging
from typing import Any, Dict, List, Optional, Union
from llama_index.legacy.schema import BaseNode, TextNode
from llama_index.legacy.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
DEFAULT_DOC_ID_KEY,
DEFAULT_EMBEDDING_KEY,
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = logging.getLogger(__name__)
MILVUS_ID_FIELD = "id"
def _to_milvus_filter(standard_filters: MetadataFilters) -> List[str]:
"""Translate standard metadata filters to Milvus specific spec."""
filters = []
for filter in standard_filters.legacy_filters():
if isinstance(filter.value, str):
filters.append(str(filter.key) + " == " + '"' + str(filter.value) + '"')
else:
filters.append(str(filter.key) + " == " + str(filter.value))
return filters
class MilvusVectorStore(VectorStore):
"""The Milvus Vector Store.
In this vector store we store the text, its embedding and
a its metadata in a Milvus collection. This implementation
allows the use of an already existing collection.
It also supports creating a new one if the collection doesn't
exist or if `overwrite` is set to True.
Args:
uri (str, optional): The URI to connect to, comes in the form of
"http://address:port".
token (str, optional): The token for log in. Empty if not using rbac, if
using rbac it will most likely be "username:password".
collection_name (str, optional): The name of the collection where data will be
stored. Defaults to "llamalection".
dim (int, optional): The dimension of the embedding vectors for the collection.
Required if creating a new collection.
embedding_field (str, optional): The name of the embedding field for the
collection, defaults to DEFAULT_EMBEDDING_KEY.
doc_id_field (str, optional): The name of the doc_id field for the collection,
defaults to DEFAULT_DOC_ID_KEY.
similarity_metric (str, optional): The similarity metric to use,
currently supports IP and L2.
consistency_level (str, optional): Which consistency level to use for a newly
created collection. Defaults to "Strong".
overwrite (bool, optional): Whether to overwrite existing collection with same
name. Defaults to False.
text_key (str, optional): What key text is stored in in the passed collection.
Used when bringing your own collection. Defaults to None.
index_config (dict, optional): The configuration used for building the
Milvus index. Defaults to None.
search_config (dict, optional): The configuration used for searching
the Milvus index. Note that this must be compatible with the index
type specified by `index_config`. Defaults to None.
Raises:
ImportError: Unable to import `pymilvus`.
MilvusException: Error communicating with Milvus, more can be found in logging
under Debug.
Returns:
MilvusVectorstore: Vectorstore that supports add, delete, and query.
"""
stores_text: bool = True
stores_node: bool = True
def __init__(
self,
uri: str = "http://localhost:19530",
token: str = "",
collection_name: str = "llamalection",
dim: Optional[int] = None,
embedding_field: str = DEFAULT_EMBEDDING_KEY,
doc_id_field: str = DEFAULT_DOC_ID_KEY,
similarity_metric: str = "IP",
consistency_level: str = "Strong",
overwrite: bool = False,
text_key: Optional[str] = None,
index_config: Optional[dict] = None,
search_config: Optional[dict] = None,
**kwargs: Any,
) -> None:
"""Init params."""
import_err_msg = (
"`pymilvus` package not found, please run `pip install pymilvus`"
)
try:
import pymilvus # noqa
except ImportError:
raise ImportError(import_err_msg)
from pymilvus import Collection, MilvusClient
self.collection_name = collection_name
self.dim = dim
self.embedding_field = embedding_field
self.doc_id_field = doc_id_field
self.consistency_level = consistency_level
self.overwrite = overwrite
self.text_key = text_key
self.index_config: Dict[str, Any] = index_config.copy() if index_config else {}
# Note: The search configuration is set at construction to avoid having
# to change the API for usage of the vector store (i.e. to pass the
# search config along with the rest of the query).
self.search_config: Dict[str, Any] = (
search_config.copy() if search_config else {}
)
# Select the similarity metric
if similarity_metric.lower() in ("ip"):
self.similarity_metric = "IP"
elif similarity_metric.lower() in ("l2", "euclidean"):
self.similarity_metric = "L2"
# Connect to Milvus instance
self.milvusclient = MilvusClient(
uri=uri,
token=token,
**kwargs, # pass additional arguments such as server_pem_path
)
# Delete previous collection if overwriting
if self.overwrite and self.collection_name in self.client.list_collections():
self.milvusclient.drop_collection(self.collection_name)
# Create the collection if it does not exist
if self.collection_name not in self.client.list_collections():
if self.dim is None:
raise ValueError("Dim argument required for collection creation.")
self.milvusclient.create_collection(
collection_name=self.collection_name,
dimension=self.dim,
primary_field_name=MILVUS_ID_FIELD,
vector_field_name=self.embedding_field,
id_type="string",
metric_type=self.similarity_metric,
max_length=65_535,
consistency_level=self.consistency_level,
)
self.collection = Collection(
self.collection_name, using=self.milvusclient._using
)
self._create_index_if_required()
logger.debug(f"Successfully created a new collection: {self.collection_name}")
@property
def client(self) -> Any:
"""Get client."""
return self.milvusclient
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add the embeddings and their nodes into Milvus.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
to insert.
Raises:
MilvusException: Failed to insert data.
Returns:
List[str]: List of ids inserted.
"""
insert_list = []
insert_ids = []
# Process that data we are going to insert
for node in nodes:
entry = node_to_metadata_dict(node)
entry[MILVUS_ID_FIELD] = node.node_id
entry[self.embedding_field] = node.embedding
insert_ids.append(node.node_id)
insert_list.append(entry)
# Insert the data into milvus
self.collection.insert(insert_list)
self.collection.flush()
self._create_index_if_required()
logger.debug(
f"Successfully inserted embeddings into: {self.collection_name} "
f"Num Inserted: {len(insert_list)}"
)
return insert_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
Raises:
MilvusException: Failed to delete the doc.
"""
# Adds ability for multiple doc delete in future.
doc_ids: List[str]
if isinstance(ref_doc_id, list):
doc_ids = ref_doc_id # type: ignore
else:
doc_ids = [ref_doc_id]
# Begin by querying for the primary keys to delete
doc_ids = ['"' + entry + '"' for entry in doc_ids]
entries = self.milvusclient.query(
collection_name=self.collection_name,
filter=f"{self.doc_id_field} in [{','.join(doc_ids)}]",
)
ids = [entry["id"] for entry in entries]
self.milvusclient.delete(collection_name=self.collection_name, pks=ids)
logger.debug(f"Successfully deleted embedding with doc_id: {doc_ids}") | |
161132 | def __init__(
self,
search_or_index_client: Any,
id_field_key: str,
chunk_field_key: str,
embedding_field_key: str,
metadata_string_field_key: str,
doc_id_field_key: str,
filterable_metadata_field_keys: Optional[
Union[
List[str],
Dict[str, str],
Dict[str, Tuple[str, MetadataIndexFieldType]],
]
] = None,
index_name: Optional[str] = None,
index_mapping: Optional[
Callable[[Dict[str, str], Dict[str, Any]], Dict[str, str]]
] = None,
index_management: IndexManagement = IndexManagement.NO_VALIDATION,
embedding_dimensionality: int = 1536,
**kwargs: Any,
) -> None:
# ruff: noqa: E501
"""
Embeddings and documents are stored in an Azure AI Search index,
a merge or upload approach is used when adding embeddings.
When adding multiple embeddings the index is updated by this vector store
in batches of 10 documents, very large nodes may result in failure due to
the batch byte size being exceeded.
Args:
search_client (azure.search.documents.SearchClient):
Client for index to populated / queried.
id_field_key (str): Index field storing the id
chunk_field_key (str): Index field storing the node text
embedding_field_key (str): Index field storing the embedding vector
metadata_string_field_key (str):
Index field storing node metadata as a json string.
Schema is arbitrary, to filter on metadata values they must be stored
as separate fields in the index, use filterable_metadata_field_keys
to specify the metadata values that should be stored in these filterable fields
doc_id_field_key (str): Index field storing doc_id
index_mapping:
Optional function with definition
(enriched_doc: Dict[str, str], metadata: Dict[str, Any]): Dict[str,str]
used to map document fields to the AI search index fields
(return value of function).
If none is specified a default mapping is provided which uses
the field keys. The keys in the enriched_doc are
["id", "chunk", "embedding", "metadata"]
The default mapping is:
- "id" to id_field_key
- "chunk" to chunk_field_key
- "embedding" to embedding_field_key
- "metadata" to metadata_field_key
*kwargs (Any): Additional keyword arguments.
Raises:
ImportError: Unable to import `azure-search-documents`
ValueError: If `search_or_index_client` is not provided
ValueError: If `index_name` is not provided and `search_or_index_client`
is of type azure.search.documents.SearchIndexClient
ValueError: If `index_name` is provided and `search_or_index_client`
is of type azure.search.documents.SearchClient
ValueError: If `create_index_if_not_exists` is true and
`search_or_index_client` is of type azure.search.documents.SearchClient
"""
import_err_msg = (
"`azure-search-documents` package not found, please run "
"`pip install azure-search-documents==11.4.0`"
)
try:
import azure.search.documents # noqa
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
except ImportError:
raise ImportError(import_err_msg)
self._index_client: SearchIndexClient = cast(SearchIndexClient, None)
self._search_client: SearchClient = cast(SearchClient, None)
self.embedding_dimensionality = embedding_dimensionality
# Validate search_or_index_client
if search_or_index_client is not None:
if isinstance(search_or_index_client, SearchIndexClient):
# If SearchIndexClient is supplied so must index_name
self._index_client = cast(SearchIndexClient, search_or_index_client)
if not index_name:
raise ValueError(
"index_name must be supplied if search_or_index_client is of "
"type azure.search.documents.SearchIndexClient"
)
self._search_client = self._index_client.get_search_client(
index_name=index_name
)
elif isinstance(search_or_index_client, SearchClient):
self._search_client = cast(SearchClient, search_or_index_client)
# Validate index_name
if index_name:
raise ValueError(
"index_name cannot be supplied if search_or_index_client "
"is of type azure.search.documents.SearchClient"
)
if not self._index_client and not self._search_client:
raise ValueError(
"search_or_index_client must be of type "
"azure.search.documents.SearchClient or "
"azure.search.documents.SearchIndexClient"
)
else:
raise ValueError("search_or_index_client not specified")
if (
index_management == IndexManagement.CREATE_IF_NOT_EXISTS
and not self._index_client
):
raise ValueError(
"index_management has value of IndexManagement.CREATE_IF_NOT_EXISTS "
"but search_or_index_client is not of type "
"azure.search.documents.SearchIndexClient"
)
self._index_management = index_management
# Default field mapping
field_mapping = {
"id": id_field_key,
"chunk": chunk_field_key,
"embedding": embedding_field_key,
"metadata": metadata_string_field_key,
"doc_id": doc_id_field_key,
}
self._field_mapping = field_mapping
self._index_mapping = (
self._default_index_mapping if index_mapping is None else index_mapping
)
# self._filterable_metadata_field_keys = filterable_metadata_field_keys
self._metadata_to_index_field_map = self._normalise_metadata_to_index_fields(
filterable_metadata_field_keys
)
if self._index_management == IndexManagement.CREATE_IF_NOT_EXISTS:
if index_name:
self._create_index_if_not_exists(index_name)
if self._index_management == IndexManagement.VALIDATE_INDEX:
self._validate_index(index_name)
@property
def client(self) -> Any:
"""Get client."""
return self._search_client
def _default_index_mapping(
self, enriched_doc: Dict[str, str], metadata: Dict[str, Any]
) -> Dict[str, str]:
index_doc: Dict[str, str] = {}
for field in self._field_mapping:
index_doc[self._field_mapping[field]] = enriched_doc[field]
for metadata_field_name, (
index_field_name,
_,
) in self._metadata_to_index_field_map.items():
metadata_value = metadata.get(metadata_field_name)
if metadata_value:
index_doc[index_field_name] = metadata_value
return index_doc
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to index associated with the configured search client.
Args:
nodes: List[BaseNode]: nodes with embeddings
"""
if not self._search_client:
raise ValueError("Search client not initialized")
documents = []
ids = []
for node in nodes:
logger.debug(f"Processing embedding: {node.node_id}")
ids.append(node.node_id)
index_document = self._create_index_document(node)
documents.append(index_document)
if len(documents) >= 10:
logger.info(
f"Uploading batch of size {len(documents)}, "
f"current progress {len(ids)} of {len(nodes)}"
)
self._search_client.merge_or_upload_documents(documents)
documents = []
# Upload remaining batch of less than 10 documents
if len(documents) > 0:
logger.info(
f"Uploading remaining batch of size {len(documents)}, "
f"current progress {len(ids)} of {len(nodes)}"
)
self._search_client.merge_or_upload_documents(documents)
documents = []
return ids
def _create_index_document(self, node: BaseNode) -> Dict[str, Any]:
"""Create AI Search index document from embedding result."""
doc: Dict[str, Any] = {}
doc["id"] = node.node_id
doc["chunk"] = node.get_content(metadata_mode=MetadataMode.NONE) or ""
doc["embedding"] = node.get_embedding()
doc["doc_id"] = node.ref_doc_id
node_metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
doc["metadata"] = json.dumps(node_metadata)
return self._index_mapping(doc, node_metadata) | |
161181 | """DeepLake vector store index.
An index that is built within DeepLake.
"""
import logging
from typing import Any, List, Optional, cast
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
try:
from deeplake.core.vectorstore.deeplake_vectorstore import VectorStore
DEEPLAKE_INSTALLED = True
except ImportError:
DEEPLAKE_INSTALLED = False
logger = logging.getLogger(__name__)
class DeepLakeVectorStore(BasePydanticVectorStore):
"""The DeepLake Vector Store.
In this vector store we store the text, its embedding and
a few pieces of its metadata in a deeplake dataset. This implementation
allows the use of an already existing deeplake dataset if it is one that was created
this vector store. It also supports creating a new one if the dataset doesn't
exist or if `overwrite` is set to True.
"""
stores_text: bool = True
flat_metadata: bool = True
ingestion_batch_size: int
num_workers: int
token: Optional[str]
read_only: Optional[bool]
dataset_path: str
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_deeplake_db: Any = PrivateAttr()
_deeplake_db_collection: Any = PrivateAttr()
_vectorstore: "VectorStore" = PrivateAttr()
_id_tensor_name: str = PrivateAttr()
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
ingestion_batch_size: int = 1024,
ingestion_num_workers: int = 4,
overwrite: bool = False,
exec_option: Optional[str] = None,
verbose: bool = True,
**kwargs: Any,
) -> None:
"""
Args:
dataset_path (str): Path to the deeplake dataset, where data will be
stored. Defaults to "llama_index".
overwrite (bool, optional): Whether to overwrite existing dataset with same
name. Defaults to False.
token (str, optional): the deeplake token that allows you to access the
dataset with proper access. Defaults to None.
read_only (bool, optional): Whether to open the dataset with read only mode.
ingestion_batch_size (int): used for controlling batched data
ingestion to deeplake dataset. Defaults to 1024.
ingestion_num_workers (int): number of workers to use during data ingestion.
Defaults to 4.
overwrite (bool): Whether to overwrite existing dataset with the
new dataset with the same name.
exec_option (str): Default method for search execution. It could be either
It could be either ``"python"``, ``"compute_engine"`` or
``"tensor_db"``. Defaults to ``"python"``.
- ``python`` - Pure-python implementation that runs on the client and
can be used for data stored anywhere. WARNING: using this option
with big datasets is discouraged because it can lead to memory
issues.
- ``compute_engine`` - Performant C++ implementation of the Deep Lake
Compute Engine that runs on the client and can be used for any data
stored in or connected to Deep Lake. It cannot be used with
in-memory or local datasets.
- ``tensor_db`` - Performant and fully-hosted Managed Tensor Database
that is responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database. Store datasets in
this database by specifying runtime = {"tensor_db": True} during
dataset creation.
verbose (bool): Specify if verbose output is enabled. Default is True.
**kwargs (Any): Additional keyword arguments.
Raises:
ImportError: Unable to import `deeplake`.
"""
super().__init__(
dataset_path=dataset_path,
token=token,
read_only=read_only,
ingestion_batch_size=ingestion_batch_size,
num_workers=ingestion_num_workers,
)
if not DEEPLAKE_INSTALLED:
raise ImportError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
self._vectorstore = VectorStore(
path=dataset_path,
ingestion_batch_size=ingestion_batch_size,
num_workers=ingestion_num_workers,
token=token,
read_only=read_only,
exec_option=exec_option,
overwrite=overwrite,
verbose=verbose,
**kwargs,
)
self._id_tensor_name = "ids" if "ids" in self._vectorstore.tensors() else "id"
@property
def client(self) -> Any:
"""Get client.
Returns:
Any: DeepLake vectorstore dataset.
"""
return self._vectorstore.dataset
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add the embeddings and their nodes into DeepLake.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
to insert.
Returns:
List[str]: List of ids inserted.
"""
embedding = []
metadata = []
id_ = []
text = []
for node in nodes:
embedding.append(node.get_embedding())
metadata.append(
node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
)
id_.append(node.node_id)
text.append(node.get_content(metadata_mode=MetadataMode.NONE))
kwargs = {
"embedding": embedding,
"metadata": metadata,
self._id_tensor_name: id_,
"text": text,
}
return self._vectorstore.add(
return_ids=True,
**kwargs,
)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
self._vectorstore.delete(filter={"metadata": {"doc_id": ref_doc_id}})
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query (VectorStoreQuery): VectorStoreQuery class input, it has
the following attributes:
1. query_embedding (List[float]): query embedding
2. similarity_top_k (int): top k most similar nodes
deep_memory (bool): Whether to use deep memory for query execution.
Returns:
VectorStoreQueryResult
"""
query_embedding = cast(List[float], query.query_embedding)
exec_option = kwargs.get("exec_option")
deep_memory = kwargs.get("deep_memory")
data = self._vectorstore.search(
embedding=query_embedding,
exec_option=exec_option,
k=query.similarity_top_k,
filter=query.filters,
deep_memory=deep_memory,
)
similarities = data["score"]
ids = data[self._id_tensor_name]
metadatas = data["metadata"]
nodes = []
for metadata in metadatas:
nodes.append(metadata_dict_to_node(metadata))
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids) | |
161218 | from typing import Any, List, Literal, Optional
import fsspec
from llama_index.legacy.vector_stores.docarray.base import DocArrayVectorStore
class DocArrayInMemoryVectorStore(DocArrayVectorStore):
"""Class representing a DocArray In-Memory vector store.
This class is a document index provided by Docarray that stores documents in memory.
"""
def __init__(
self,
index_path: Optional[str] = None,
metric: Literal[
"cosine_sim", "euclidian_dist", "sgeuclidean_dist"
] = "cosine_sim",
):
"""Initializes the DocArrayInMemoryVectorStore.
Args:
index_path (Optional[str]): The path to the index file.
metric (Literal["cosine_sim", "euclidian_dist", "sgeuclidean_dist"]):
The distance metric to use. Default is "cosine_sim".
"""
import_err_msg = """
`docarray` package not found. Install the package via pip:
`pip install docarray`
"""
try:
import docarray # noqa
except ImportError:
raise ImportError(import_err_msg)
self._ref_docs = None # type: ignore[assignment]
self._index_file_path = index_path
self._index, self._schema = self._init_index(metric=metric)
def _init_index(self, **kwargs: Any): # type: ignore[no-untyped-def]
"""Initializes the in-memory exact nearest neighbour index.
Args:
**kwargs: Variable length argument list.
Returns:
tuple: The in-memory exact nearest neighbour index and its schema.
"""
from docarray.index import InMemoryExactNNIndex
schema = self._get_schema(**kwargs)
index = InMemoryExactNNIndex[schema] # type: ignore[valid-type]
params = {"index_file_path": self._index_file_path}
return index(**params), schema # type: ignore[arg-type]
def _find_docs_to_be_removed(self, doc_id: str) -> List[str]:
"""Finds the documents to be removed from the vector store.
Args:
doc_id (str): Reference document ID that should be removed.
Returns:
List[str]: List of document IDs to be removed.
"""
query = {"metadata__doc_id": {"$eq": doc_id}}
docs = self._index.filter(query)
return [doc.id for doc in docs]
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persists the in-memory vector store to a file.
Args:
persist_path (str): The path to persist the index.
fs (fsspec.AbstractFileSystem, optional): Filesystem to persist to.
(doesn't apply)
"""
index_path = persist_path or self._index_file_path
self._index.persist(index_path) | |
161244 | """LlamaIndex data structures."""
# indices
from llama_index.legacy.indices.composability.graph import ComposableGraph
from llama_index.legacy.indices.document_summary import (
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
)
from llama_index.legacy.indices.document_summary.base import DocumentSummaryIndex
from llama_index.legacy.indices.empty.base import EmptyIndex, GPTEmptyIndex
from llama_index.legacy.indices.keyword_table.base import (
GPTKeywordTableIndex,
KeywordTableIndex,
)
from llama_index.legacy.indices.keyword_table.rake_base import (
GPTRAKEKeywordTableIndex,
RAKEKeywordTableIndex,
)
from llama_index.legacy.indices.keyword_table.simple_base import (
GPTSimpleKeywordTableIndex,
SimpleKeywordTableIndex,
)
from llama_index.legacy.indices.knowledge_graph import (
GPTKnowledgeGraphIndex,
KnowledgeGraphIndex,
)
from llama_index.legacy.indices.list import GPTListIndex, ListIndex, SummaryIndex
from llama_index.legacy.indices.list.base import GPTListIndex, ListIndex, SummaryIndex
from llama_index.legacy.indices.loading import (
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
from llama_index.legacy.indices.managed.colbert_index import ColbertIndex
from llama_index.legacy.indices.managed.vectara import VectaraIndex
from llama_index.legacy.indices.managed.zilliz import ZillizCloudPipelineIndex
from llama_index.legacy.indices.multi_modal import MultiModalVectorStoreIndex
from llama_index.legacy.indices.struct_store.pandas import GPTPandasIndex, PandasIndex
from llama_index.legacy.indices.struct_store.sql import (
GPTSQLStructStoreIndex,
SQLStructStoreIndex,
)
from llama_index.legacy.indices.tree.base import GPTTreeIndex, TreeIndex
from llama_index.legacy.indices.vector_store import (
GPTVectorStoreIndex,
VectorStoreIndex,
)
__all__ = [
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"KeywordTableIndex",
"SimpleKeywordTableIndex",
"RAKEKeywordTableIndex",
"SummaryIndex",
"TreeIndex",
"VectaraIndex",
"ColbertIndex",
"ZillizCloudPipelineIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PandasIndex",
"VectorStoreIndex",
"SQLStructStoreIndex",
"MultiModalVectorStoreIndex",
"EmptyIndex",
"ComposableGraph",
# legacy
"GPTKnowledgeGraphIndex",
"GPTKeywordTableIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTDocumentSummaryIndex",
"GPTListIndex",
"GPTTreeIndex",
"GPTPandasIndex",
"ListIndex",
"GPTVectorStoreIndex",
"GPTSQLStructStoreIndex",
"GPTEmptyIndex",
] | |
161249 | class PromptHelper(BaseComponent):
"""Prompt helper.
General prompt helper that can help deal with LLM context window token limitations.
At its core, it calculates available context size by starting with the context
window size of an LLM and reserve token space for the prompt template, and the
output.
It provides utility for "repacking" text chunks (retrieved from index) to maximally
make use of the available context window (and thereby reducing the number of LLM
calls needed), or truncating them so that they fit in a single LLM call.
Args:
context_window (int): Context window for the LLM.
num_output (int): Number of outputs for the LLM.
chunk_overlap_ratio (float): Chunk overlap as a ratio of chunk size
chunk_size_limit (Optional[int]): Maximum chunk size to use.
tokenizer (Optional[Callable[[str], List]]): Tokenizer to use.
separator (str): Separator for text splitter
"""
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum context size that will get sent to the LLM.",
)
num_output: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The amount of token-space to leave in input for generation.",
)
chunk_overlap_ratio: float = Field(
default=DEFAULT_CHUNK_OVERLAP_RATIO,
description="The percentage token amount that each chunk should overlap.",
)
chunk_size_limit: Optional[int] = Field(description="The maximum size of a chunk.")
separator: str = Field(
default=" ", description="The separator when chunking tokens."
)
_token_counter: TokenCounter = PrivateAttr()
def __init__(
self,
context_window: int = DEFAULT_CONTEXT_WINDOW,
num_output: int = DEFAULT_NUM_OUTPUTS,
chunk_overlap_ratio: float = DEFAULT_CHUNK_OVERLAP_RATIO,
chunk_size_limit: Optional[int] = None,
tokenizer: Optional[Callable[[str], List]] = None,
separator: str = " ",
) -> None:
"""Init params."""
if chunk_overlap_ratio > 1.0 or chunk_overlap_ratio < 0.0:
raise ValueError("chunk_overlap_ratio must be a float between 0. and 1.")
# TODO: make configurable
self._token_counter = TokenCounter(tokenizer=tokenizer)
super().__init__(
context_window=context_window,
num_output=num_output,
chunk_overlap_ratio=chunk_overlap_ratio,
chunk_size_limit=chunk_size_limit,
separator=separator,
)
@classmethod
def from_llm_metadata(
cls,
llm_metadata: LLMMetadata,
chunk_overlap_ratio: float = DEFAULT_CHUNK_OVERLAP_RATIO,
chunk_size_limit: Optional[int] = None,
tokenizer: Optional[Callable[[str], List]] = None,
separator: str = " ",
) -> "PromptHelper":
"""Create from llm predictor.
This will autofill values like context_window and num_output.
"""
context_window = llm_metadata.context_window
if llm_metadata.num_output == -1:
num_output = DEFAULT_NUM_OUTPUTS
else:
num_output = llm_metadata.num_output
return cls(
context_window=context_window,
num_output=num_output,
chunk_overlap_ratio=chunk_overlap_ratio,
chunk_size_limit=chunk_size_limit,
tokenizer=tokenizer,
separator=separator,
)
@classmethod
def class_name(cls) -> str:
return "PromptHelper"
def _get_available_context_size(self, num_prompt_tokens: int) -> int:
"""Get available context size.
This is calculated as:
available context window = total context window
- input (partially filled prompt)
- output (room reserved for response)
Notes:
- Available context size is further clamped to be non-negative.
"""
context_size_tokens = self.context_window - num_prompt_tokens - self.num_output
if context_size_tokens < 0:
raise ValueError(
f"Calculated available context size {context_size_tokens} was"
" not non-negative."
)
return context_size_tokens
def _get_available_chunk_size(
self,
prompt: BasePromptTemplate,
num_chunks: int = 1,
padding: int = 5,
llm: Optional[LLM] = None,
) -> int:
"""Get available chunk size.
This is calculated as:
available chunk size = available context window // number_chunks
- padding
Notes:
- By default, we use padding of 5 (to save space for formatting needs).
- Available chunk size is further clamped to chunk_size_limit if specified.
"""
if isinstance(prompt, SelectorPromptTemplate):
prompt = prompt.select(llm=llm)
if isinstance(prompt, ChatPromptTemplate):
messages: List[ChatMessage] = prompt.message_templates
# account for partial formatting
partial_messages = []
for message in messages:
partial_message = deepcopy(message)
# get string variables (if any)
template_vars = [
var
for _, var, _, _ in Formatter().parse(str(message))
if var is not None
]
# figure out which variables are partially formatted
# if a variable is not formatted, it will be replaced with
# the template variable itself
used_vars = {
template_var: f"{{{template_var}}}"
for template_var in template_vars
}
for var_name, val in prompt.kwargs.items():
if var_name in template_vars:
used_vars[var_name] = val
# format partial message
if partial_message.content is not None:
partial_message.content = partial_message.content.format(
**used_vars
)
# add to list of partial messages
partial_messages.append(partial_message)
num_prompt_tokens = self._token_counter.estimate_tokens_in_messages(
partial_messages
)
else:
prompt_str = get_empty_prompt_txt(prompt)
num_prompt_tokens = self._token_counter.get_string_tokens(prompt_str)
available_context_size = self._get_available_context_size(num_prompt_tokens)
result = available_context_size // num_chunks - padding
if self.chunk_size_limit is not None:
result = min(result, self.chunk_size_limit)
return result
def get_text_splitter_given_prompt(
self,
prompt: BasePromptTemplate,
num_chunks: int = 1,
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
) -> TokenTextSplitter:
"""Get text splitter configured to maximally pack available context window,
taking into account of given prompt, and desired number of chunks.
"""
chunk_size = self._get_available_chunk_size(
prompt, num_chunks, padding=padding, llm=llm
)
if chunk_size <= 0:
raise ValueError(f"Chunk size {chunk_size} is not positive.")
chunk_overlap = int(self.chunk_overlap_ratio * chunk_size)
return TokenTextSplitter(
separator=self.separator,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
tokenizer=self._token_counter.tokenizer,
)
def truncate(
self,
prompt: BasePromptTemplate,
text_chunks: Sequence[str],
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
) -> List[str]:
"""Truncate text chunks to fit available context window."""
text_splitter = self.get_text_splitter_given_prompt(
prompt,
num_chunks=len(text_chunks),
padding=padding,
llm=llm,
)
return [truncate_text(chunk, text_splitter) for chunk in text_chunks]
def repack(
self,
prompt: BasePromptTemplate,
text_chunks: Sequence[str],
padding: int = DEFAULT_PADDING,
llm: Optional[LLM] = None,
) -> List[str]:
"""Repack text chunks to fit available context window.
This will combine text chunks into consolidated chunks
that more fully "pack" the prompt template given the context_window.
"""
text_splitter = self.get_text_splitter_given_prompt(
prompt, padding=padding, llm=llm
)
combined_str = "\n\n".join([c.strip() for c in text_chunks if c.strip()])
return text_splitter.split_text(combined_str) | |
161259 | ## 🌲 Tree Index
Currently the tree index refers to the `TreeIndex` class. It organizes external data into a tree structure that can be queried.
### Index Construction
The `TreeIndex` first takes in a set of text documents as input. It then builds up a tree-index in a bottom-up fashion; each parent node is able to summarize the children nodes using a general **summarization prompt**; each intermediate node contains text summarizing the components below. Once the index is built, it can be saved to disk as a JSON and loaded for future use.
### Query
There are two query modes: `default` and `retrieve`.
**Default (GPTTreeIndexLeafQuery)**
Using a **query prompt template**, the TreeIndex will be able to recursively perform tree traversal in a top-down fashion in order to answer a question. For example, in the very beginning GPT-3 is tasked with selecting between _n_ top-level nodes which best answers a provided query, by outputting a number as a multiple-choice problem. The TreeIndex then uses the number to select the corresponding node, and the process repeats recursively among the children nodes until a leaf node is reached.
**Retrieve (GPTTreeIndexRetQuery)**
Simply use the root nodes as context to synthesize an answer to the query. This is especially effective if the tree is preseeded with a `query_str`.
### Usage
```python
from llama_index.legacy import TreeIndex, SimpleDirectoryReader
# build index
documents = SimpleDirectoryReader("data").load_data()
index = TreeIndex.from_documents(documents)
# query
query_engine = index.as_query_engine()
response = query_engine.query("<question text>")
```
### FAQ
**Why build a tree? Why not just incrementally go through each chunk?**
Algorithmically speaking, $O(\log N)$ is better than $O(N)$.
More broadly, building a tree helps us to test GPT's capabilities in modeling information in a hierarchy. It seems to me that our brains organize information in a similar way (citation needed). We can use this design to test how GPT can use its own hierarchy to answer questions.
Practically speaking, it is much cheaper to do so and I want to limit my monthly spending (see below for costs).
**How much does this cost to run?**
We currently use the Davinci model for good results. Unfortunately Davinci is quite expensive. The cost of building the tree is roughly
$cN\log(N)\frac{p}{1000}$, where $p=4096$ is the prompt limit and $c$ is the cost per 1000 tokens ($0.02 as mentioned on the [pricing page](https://openai.com/api/pricing/)). The cost of querying the tree is roughly
$c\log(N)\frac{p}{1000}$.
For the NYC example, this equates to \$~0.40 per query. | |
161327 | def delete_nodes(
self,
node_ids: List[str],
delete_from_docstore: bool = False,
**delete_kwargs: Any,
) -> None:
"""Delete a list of nodes from the index.
Args:
node_ids (List[str]): A list of node_ids from the nodes to delete
"""
raise NotImplementedError(
"Vector indices currently only support delete_ref_doc, which "
"deletes nodes using the ref_doc_id of ingested documents."
)
def delete_ref_doc(
self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
) -> None:
"""Delete a document and it's nodes by using ref_doc_id."""
self._vector_store.delete(ref_doc_id, **delete_kwargs)
# delete from index_struct only if needed
if not self._vector_store.stores_text or self._store_nodes_override:
ref_doc_info = self._docstore.get_ref_doc_info(ref_doc_id)
if ref_doc_info is not None:
for node_id in ref_doc_info.node_ids:
self._index_struct.delete(node_id)
self._vector_store.delete(node_id)
# delete from docstore only if needed
if (
not self._vector_store.stores_text or self._store_nodes_override
) and delete_from_docstore:
self._docstore.delete_ref_doc(ref_doc_id, raise_error=False)
self._storage_context.index_store.add_index_struct(self._index_struct)
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
if not self._vector_store.stores_text or self._store_nodes_override:
node_doc_ids = list(self.index_struct.nodes_dict.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
else:
raise NotImplementedError(
"Vector store integrations that store text in the vector store are "
"not supported by ref_doc_info yet."
)
GPTVectorStoreIndex = VectorStoreIndex | |
161328 | """Base vector store index query."""
from typing import Any, Dict, List, Optional
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.constants import DEFAULT_SIMILARITY_TOP_K
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.data_structs.data_structs import IndexDict
from llama_index.legacy.indices.utils import log_vector_store_query_result
from llama_index.legacy.indices.vector_store.base import VectorStoreIndex
from llama_index.legacy.schema import NodeWithScore, ObjectType, QueryBundle
from llama_index.legacy.vector_stores.types import (
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
class VectorIndexRetriever(BaseRetriever):
"""Vector index retriever.
Args:
index (VectorStoreIndex): vector store index.
similarity_top_k (int): number of top k results to return.
vector_store_query_mode (str): vector store query mode
See reference for VectorStoreQueryMode for full list of supported modes.
filters (Optional[MetadataFilters]): metadata filters, defaults to None
alpha (float): weight for sparse/dense retrieval, only used for
hybrid query mode.
doc_ids (Optional[List[str]]): list of documents to constrain search.
vector_store_kwargs (dict): Additional vector store specific kwargs to pass
through to the vector store at query time.
"""
def __init__(
self,
index: VectorStoreIndex,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
vector_store_query_mode: VectorStoreQueryMode = VectorStoreQueryMode.DEFAULT,
filters: Optional[MetadataFilters] = None,
alpha: Optional[float] = None,
node_ids: Optional[List[str]] = None,
doc_ids: Optional[List[str]] = None,
sparse_top_k: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._vector_store = self._index.vector_store
self._service_context = self._index.service_context
self._docstore = self._index.docstore
self._similarity_top_k = similarity_top_k
self._vector_store_query_mode = VectorStoreQueryMode(vector_store_query_mode)
self._alpha = alpha
self._node_ids = node_ids
self._doc_ids = doc_ids
self._filters = filters
self._sparse_top_k = sparse_top_k
self._kwargs: Dict[str, Any] = kwargs.get("vector_store_kwargs", {})
super().__init__(
callback_manager=callback_manager, object_map=object_map, verbose=verbose
)
@property
def similarity_top_k(self) -> int:
"""Return similarity top k."""
return self._similarity_top_k
@similarity_top_k.setter
def similarity_top_k(self, similarity_top_k: int) -> None:
"""Set similarity top k."""
self._similarity_top_k = similarity_top_k
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None and len(query_bundle.embedding_strs) > 0:
query_bundle.embedding = (
self._service_context.embed_model.get_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return self._get_nodes_with_embeddings(query_bundle)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
if self._vector_store.is_embedding_query:
if query_bundle.embedding is None and len(query_bundle.embedding_strs) > 0:
embed_model = self._service_context.embed_model
query_bundle.embedding = (
await embed_model.aget_agg_embedding_from_queries(
query_bundle.embedding_strs
)
)
return await self._aget_nodes_with_embeddings(query_bundle)
def _build_vector_store_query(
self, query_bundle_with_embeddings: QueryBundle
) -> VectorStoreQuery:
return VectorStoreQuery(
query_embedding=query_bundle_with_embeddings.embedding,
similarity_top_k=self._similarity_top_k,
node_ids=self._node_ids,
doc_ids=self._doc_ids,
query_str=query_bundle_with_embeddings.query_str,
mode=self._vector_store_query_mode,
alpha=self._alpha,
filters=self._filters,
sparse_top_k=self._sparse_top_k,
)
def _build_node_list_from_query_result(
self, query_result: VectorStoreQueryResult
) -> List[NodeWithScore]:
if query_result.nodes is None:
# NOTE: vector store does not keep text and returns node indices.
# Need to recover all nodes from docstore
if query_result.ids is None:
raise ValueError(
"Vector store query result should return at "
"least one of nodes or ids."
)
assert isinstance(self._index.index_struct, IndexDict)
node_ids = [
self._index.index_struct.nodes_dict[idx] for idx in query_result.ids
]
nodes = self._docstore.get_nodes(node_ids)
query_result.nodes = nodes
else:
# NOTE: vector store keeps text, returns nodes.
# Only need to recover image or index nodes from docstore
for i in range(len(query_result.nodes)):
source_node = query_result.nodes[i].source_node
if (not self._vector_store.stores_text) or (
source_node is not None and source_node.node_type != ObjectType.TEXT
):
node_id = query_result.nodes[i].node_id
if self._docstore.document_exists(node_id):
query_result.nodes[i] = self._docstore.get_node(
node_id
) # type: ignore[index]
log_vector_store_query_result(query_result)
node_with_scores: List[NodeWithScore] = []
for ind, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[ind]
node_with_scores.append(NodeWithScore(node=node, score=score))
return node_with_scores
def _get_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self._build_vector_store_query(query_bundle_with_embeddings)
query_result = self._vector_store.query(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result)
async def _aget_nodes_with_embeddings(
self, query_bundle_with_embeddings: QueryBundle
) -> List[NodeWithScore]:
query = self._build_vector_store_query(query_bundle_with_embeddings)
query_result = await self._vector_store.aquery(query, **self._kwargs)
return self._build_node_list_from_query_result(query_result) | |
161386 | """Retriever tool."""
from typing import TYPE_CHECKING, Any, Optional
from llama_index.legacy.core.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.legacy.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.legacy.schema import MetadataMode
from llama_index.legacy.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
) -> None:
self._retriever = retriever
self._metadata = metadata
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(retriever=retriever, metadata=metadata)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
content = ""
for doc in docs:
node_copy = doc.node.copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": input},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
for doc in docs:
node_copy = doc.node.copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": input},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.") | |
161421 | """OpenAI embeddings file."""
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import httpx
from openai import AsyncOpenAI, OpenAI
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.embeddings.base import BaseEmbedding
from llama_index.legacy.llms.openai_utils import (
create_retry_decorator,
resolve_openai_credentials,
)
embedding_retry_decorator = create_retry_decorator(
max_retries=6,
random_exponential=True,
stop_after_delay_seconds=60,
min_seconds=1,
max_seconds=20,
)
class OpenAIEmbeddingMode(str, Enum):
"""OpenAI embedding mode."""
SIMILARITY_MODE = "similarity"
TEXT_SEARCH_MODE = "text_search"
class OpenAIEmbeddingModelType(str, Enum):
"""OpenAI embedding model type."""
DAVINCI = "davinci"
CURIE = "curie"
BABBAGE = "babbage"
ADA = "ada"
TEXT_EMBED_ADA_002 = "text-embedding-ada-002"
TEXT_EMBED_3_LARGE = "text-embedding-3-large"
TEXT_EMBED_3_SMALL = "text-embedding-3-small"
class OpenAIEmbeddingModeModel(str, Enum):
"""OpenAI embedding mode model."""
# davinci
TEXT_SIMILARITY_DAVINCI = "text-similarity-davinci-001"
TEXT_SEARCH_DAVINCI_QUERY = "text-search-davinci-query-001"
TEXT_SEARCH_DAVINCI_DOC = "text-search-davinci-doc-001"
# curie
TEXT_SIMILARITY_CURIE = "text-similarity-curie-001"
TEXT_SEARCH_CURIE_QUERY = "text-search-curie-query-001"
TEXT_SEARCH_CURIE_DOC = "text-search-curie-doc-001"
# babbage
TEXT_SIMILARITY_BABBAGE = "text-similarity-babbage-001"
TEXT_SEARCH_BABBAGE_QUERY = "text-search-babbage-query-001"
TEXT_SEARCH_BABBAGE_DOC = "text-search-babbage-doc-001"
# ada
TEXT_SIMILARITY_ADA = "text-similarity-ada-001"
TEXT_SEARCH_ADA_QUERY = "text-search-ada-query-001"
TEXT_SEARCH_ADA_DOC = "text-search-ada-doc-001"
# text-embedding-ada-002
TEXT_EMBED_ADA_002 = "text-embedding-ada-002"
# text-embedding-3-large
TEXT_EMBED_3_LARGE = "text-embedding-3-large"
# text-embedding-3-small
TEXT_EMBED_3_SMALL = "text-embedding-3-small"
# convenient shorthand
OAEM = OpenAIEmbeddingMode
OAEMT = OpenAIEmbeddingModelType
OAEMM = OpenAIEmbeddingModeModel
EMBED_MAX_TOKEN_LIMIT = 2048
_QUERY_MODE_MODEL_DICT = {
(OAEM.SIMILARITY_MODE, "davinci"): OAEMM.TEXT_SIMILARITY_DAVINCI,
(OAEM.SIMILARITY_MODE, "curie"): OAEMM.TEXT_SIMILARITY_CURIE,
(OAEM.SIMILARITY_MODE, "babbage"): OAEMM.TEXT_SIMILARITY_BABBAGE,
(OAEM.SIMILARITY_MODE, "ada"): OAEMM.TEXT_SIMILARITY_ADA,
(OAEM.SIMILARITY_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.SIMILARITY_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
(OAEM.SIMILARITY_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "davinci"): OAEMM.TEXT_SEARCH_DAVINCI_QUERY,
(OAEM.TEXT_SEARCH_MODE, "curie"): OAEMM.TEXT_SEARCH_CURIE_QUERY,
(OAEM.TEXT_SEARCH_MODE, "babbage"): OAEMM.TEXT_SEARCH_BABBAGE_QUERY,
(OAEM.TEXT_SEARCH_MODE, "ada"): OAEMM.TEXT_SEARCH_ADA_QUERY,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
}
_TEXT_MODE_MODEL_DICT = {
(OAEM.SIMILARITY_MODE, "davinci"): OAEMM.TEXT_SIMILARITY_DAVINCI,
(OAEM.SIMILARITY_MODE, "curie"): OAEMM.TEXT_SIMILARITY_CURIE,
(OAEM.SIMILARITY_MODE, "babbage"): OAEMM.TEXT_SIMILARITY_BABBAGE,
(OAEM.SIMILARITY_MODE, "ada"): OAEMM.TEXT_SIMILARITY_ADA,
(OAEM.SIMILARITY_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.SIMILARITY_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
(OAEM.SIMILARITY_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "davinci"): OAEMM.TEXT_SEARCH_DAVINCI_DOC,
(OAEM.TEXT_SEARCH_MODE, "curie"): OAEMM.TEXT_SEARCH_CURIE_DOC,
(OAEM.TEXT_SEARCH_MODE, "babbage"): OAEMM.TEXT_SEARCH_BABBAGE_DOC,
(OAEM.TEXT_SEARCH_MODE, "ada"): OAEMM.TEXT_SEARCH_ADA_DOC,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-ada-002"): OAEMM.TEXT_EMBED_ADA_002,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-large"): OAEMM.TEXT_EMBED_3_LARGE,
(OAEM.TEXT_SEARCH_MODE, "text-embedding-3-small"): OAEMM.TEXT_EMBED_3_SMALL,
}
@embedding_retry_decorator
def get_embedding(client: OpenAI, text: str, engine: str, **kwargs: Any) -> List[float]:
"""Get embedding.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
text = text.replace("\n", " ")
return (
client.embeddings.create(input=[text], model=engine, **kwargs).data[0].embedding
)
@embedding_retry_decorator
async def aget_embedding(
aclient: AsyncOpenAI, text: str, engine: str, **kwargs: Any
) -> List[float]:
"""Asynchronously get embedding.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
text = text.replace("\n", " ")
return (
(await aclient.embeddings.create(input=[text], model=engine, **kwargs))
.data[0]
.embedding
)
@embedding_retry_decorator
def get_embeddings(
client: OpenAI, list_of_text: List[str], engine: str, **kwargs: Any
) -> List[List[float]]:
"""Get embeddings.
NOTE: Copied from OpenAI's embedding utils:
https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py
Copied here to avoid importing unnecessary dependencies
like matplotlib, plotly, scipy, sklearn.
"""
assert len(list_of_text) <= 2048, "The batch size should not be larger than 2048."
list_of_text = [text.replace("\n", " ") for text in list_of_text]
data = client.embeddings.create(input=list_of_text, model=engine, **kwargs).data
return [d.embedding for d in data] | |
161431 | """Langchain Embedding Wrapper Module."""
from typing import TYPE_CHECKING, List, Optional
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
if TYPE_CHECKING:
from llama_index.legacy.bridge.langchain import Embeddings as LCEmbeddings
class LangchainEmbedding(BaseEmbedding):
"""External embeddings (taken from Langchain).
Args:
langchain_embedding (langchain.embeddings.Embeddings): Langchain
embeddings class.
"""
_langchain_embedding: "LCEmbeddings" = PrivateAttr()
_async_not_implemented_warned: bool = PrivateAttr(default=False)
def __init__(
self,
langchain_embeddings: "LCEmbeddings",
model_name: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
):
# attempt to get a useful model name
if model_name is not None:
model_name = model_name
elif hasattr(langchain_embeddings, "model_name"):
model_name = langchain_embeddings.model_name
elif hasattr(langchain_embeddings, "model"):
model_name = langchain_embeddings.model
else:
model_name = type(langchain_embeddings).__name__
self._langchain_embedding = langchain_embeddings
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=model_name,
)
@classmethod
def class_name(cls) -> str:
return "LangchainEmbedding"
def _async_not_implemented_warn_once(self) -> None:
if not self._async_not_implemented_warned:
print("Async embedding not available, falling back to sync method.")
self._async_not_implemented_warned = True
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._langchain_embedding.embed_query(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
try:
return await self._langchain_embedding.aembed_query(query)
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
try:
embeds = await self._langchain_embedding.aembed_documents([text])
return embeds[0]
except NotImplementedError:
# Warn the user that sync is being used
self._async_not_implemented_warn_once()
return self._get_text_embedding(text)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._langchain_embedding.embed_documents([text])[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._langchain_embedding.embed_documents(texts) | |
161462 | """Token splitter."""
import logging
from typing import Callable, List, Optional
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.constants import DEFAULT_CHUNK_OVERLAP, DEFAULT_CHUNK_SIZE
from llama_index.legacy.node_parser.interface import MetadataAwareTextSplitter
from llama_index.legacy.node_parser.node_utils import default_id_func
from llama_index.legacy.node_parser.text.utils import split_by_char, split_by_sep
from llama_index.legacy.schema import Document
from llama_index.legacy.utils import get_tokenizer
_logger = logging.getLogger(__name__)
# NOTE: this is the number of tokens we reserve for metadata formatting
DEFAULT_METADATA_FORMAT_LEN = 2
class TokenTextSplitter(MetadataAwareTextSplitter):
"""Implementation of splitting text that looks at word tokens."""
chunk_size: int = Field(
default=DEFAULT_CHUNK_SIZE,
description="The token chunk size for each chunk.",
gt=0,
)
chunk_overlap: int = Field(
default=DEFAULT_CHUNK_OVERLAP,
description="The token overlap of each chunk when splitting.",
gte=0,
)
separator: str = Field(
default=" ", description="Default separator for splitting into words"
)
backup_separators: List = Field(
default_factory=list, description="Additional separators for splitting."
)
_tokenizer: Callable = PrivateAttr()
_split_fns: List[Callable] = PrivateAttr()
def __init__(
self,
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
tokenizer: Optional[Callable] = None,
callback_manager: Optional[CallbackManager] = None,
separator: str = " ",
backup_separators: Optional[List[str]] = ["\n"],
include_metadata: bool = True,
include_prev_next_rel: bool = True,
id_func: Optional[Callable[[int, Document], str]] = None,
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
callback_manager = callback_manager or CallbackManager([])
id_func = id_func or default_id_func
self._tokenizer = tokenizer or get_tokenizer()
all_seps = [separator] + (backup_separators or [])
self._split_fns = [split_by_sep(sep) for sep in all_seps] + [split_by_char()]
super().__init__(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separator=separator,
backup_separators=backup_separators,
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
id_func=id_func,
)
@classmethod
def from_defaults(
cls,
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
separator: str = " ",
backup_separators: Optional[List[str]] = ["\n"],
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
id_func: Optional[Callable[[int, Document], str]] = None,
) -> "TokenTextSplitter":
"""Initialize with default parameters."""
callback_manager = callback_manager or CallbackManager([])
return cls(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separator=separator,
backup_separators=backup_separators,
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
id_func=id_func,
)
@classmethod
def class_name(cls) -> str:
return "TokenTextSplitter"
def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]:
"""Split text into chunks, reserving space required for metadata str."""
metadata_len = len(self._tokenizer(metadata_str)) + DEFAULT_METADATA_FORMAT_LEN
effective_chunk_size = self.chunk_size - metadata_len
if effective_chunk_size <= 0:
raise ValueError(
f"Metadata length ({metadata_len}) is longer than chunk size "
f"({self.chunk_size}). Consider increasing the chunk size or "
"decreasing the size of your metadata to avoid this."
)
elif effective_chunk_size < 50:
print(
f"Metadata length ({metadata_len}) is close to chunk size "
f"({self.chunk_size}). Resulting chunks are less than 50 tokens. "
"Consider increasing the chunk size or decreasing the size of "
"your metadata to avoid this.",
flush=True,
)
return self._split_text(text, chunk_size=effective_chunk_size)
def split_text(self, text: str) -> List[str]:
"""Split text into chunks."""
return self._split_text(text, chunk_size=self.chunk_size)
def _split_text(self, text: str, chunk_size: int) -> List[str]:
"""Split text into chunks up to chunk_size."""
if text == "":
return [text]
with self.callback_manager.event(
CBEventType.CHUNKING, payload={EventPayload.CHUNKS: [text]}
) as event:
splits = self._split(text, chunk_size)
chunks = self._merge(splits, chunk_size)
event.on_end(
payload={EventPayload.CHUNKS: chunks},
)
return chunks
def _split(self, text: str, chunk_size: int) -> List[str]:
"""Break text into splits that are smaller than chunk size.
The order of splitting is:
1. split by separator
2. split by backup separators (if any)
3. split by characters
NOTE: the splits contain the separators.
"""
if len(self._tokenizer(text)) <= chunk_size:
return [text]
for split_fn in self._split_fns:
splits = split_fn(text)
if len(splits) > 1:
break
new_splits = []
for split in splits:
split_len = len(self._tokenizer(split))
if split_len <= chunk_size:
new_splits.append(split)
else:
# recursively split
new_splits.extend(self._split(split, chunk_size=chunk_size))
return new_splits
def _merge(self, splits: List[str], chunk_size: int) -> List[str]:
"""Merge splits into chunks.
The high-level idea is to keep adding splits to a chunk until we
exceed the chunk size, then we start a new chunk with overlap.
When we start a new chunk, we pop off the first element of the previous
chunk until the total length is less than the chunk size.
"""
chunks: List[str] = []
cur_chunk: List[str] = []
cur_len = 0
for split in splits:
split_len = len(self._tokenizer(split))
if split_len > chunk_size:
_logger.warning(
f"Got a split of size {split_len}, ",
f"larger than chunk size {chunk_size}.",
)
# if we exceed the chunk size after adding the new split, then
# we need to end the current chunk and start a new one
if cur_len + split_len > chunk_size:
# end the previous chunk
chunk = "".join(cur_chunk).strip()
if chunk:
chunks.append(chunk)
# start a new chunk with overlap
# keep popping off the first element of the previous chunk until:
# 1. the current chunk length is less than chunk overlap
# 2. the total length is less than chunk size
while cur_len > self.chunk_overlap or cur_len + split_len > chunk_size:
# pop off the first element
first_chunk = cur_chunk.pop(0)
cur_len -= len(self._tokenizer(first_chunk))
cur_chunk.append(split)
cur_len += split_len
# handle the last chunk
chunk = "".join(cur_chunk).strip()
if chunk:
chunks.append(chunk)
return chunks | |
161524 | class WandbCallbackHandler(BaseCallbackHandler):
"""Callback handler that logs events to wandb.
NOTE: this is a beta feature. The usage within our codebase, and the interface
may change.
Use the `WandbCallbackHandler` to log trace events to wandb. This handler is
useful for debugging and visualizing the trace events. It captures the payload of
the events and logs them to wandb. The handler also tracks the start and end of
events. This is particularly useful for debugging your LLM calls.
The `WandbCallbackHandler` can also be used to log the indices and graphs to wandb
using the `persist_index` method. This will save the indexes as artifacts in wandb.
The `load_storage_context` method can be used to load the indexes from wandb
artifacts. This method will return a `StorageContext` object that can be used to
build the index, using `load_index_from_storage`, `load_indices_from_storage` or
`load_graph_from_storage` functions.
Args:
event_starts_to_ignore (Optional[List[CBEventType]]): list of event types to
ignore when tracking event starts.
event_ends_to_ignore (Optional[List[CBEventType]]): list of event types to
ignore when tracking event ends.
"""
def __init__(
self,
run_args: Optional[WandbRunArgs] = None,
tokenizer: Optional[Callable[[str], List]] = None,
event_starts_to_ignore: Optional[List[CBEventType]] = None,
event_ends_to_ignore: Optional[List[CBEventType]] = None,
) -> None:
try:
import wandb
from wandb.sdk.data_types import trace_tree
self._wandb = wandb
self._trace_tree = trace_tree
except ImportError:
raise ImportError(
"WandbCallbackHandler requires wandb. "
"Please install it with `pip install wandb`."
)
from llama_index.legacy.indices import (
ComposableGraph,
GPTEmptyIndex,
GPTKeywordTableIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTSQLStructStoreIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
SummaryIndex,
)
self._IndexType = (
ComposableGraph,
GPTKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTRAKEKeywordTableIndex,
SummaryIndex,
GPTEmptyIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
GPTSQLStructStoreIndex,
)
self._run_args = run_args
# Check if a W&B run is already initialized; if not, initialize one
self._ensure_run(should_print_url=(self._wandb.run is None))
self._event_pairs_by_id: Dict[str, List[CBEvent]] = defaultdict(list)
self._cur_trace_id: Optional[str] = None
self._trace_map: Dict[str, List[str]] = defaultdict(list)
self.tokenizer = tokenizer or get_tokenizer()
self._token_counter = TokenCounter(tokenizer=self.tokenizer)
event_starts_to_ignore = (
event_starts_to_ignore if event_starts_to_ignore else []
)
event_ends_to_ignore = event_ends_to_ignore if event_ends_to_ignore else []
super().__init__(
event_starts_to_ignore=event_starts_to_ignore,
event_ends_to_ignore=event_ends_to_ignore,
)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
"""Store event start data by event type.
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
parent_id (str): parent event id.
"""
event = CBEvent(event_type, payload=payload, id_=event_id)
self._event_pairs_by_id[event.id_].append(event)
return event.id_
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
"""Store event end data by event type.
Args:
event_type (CBEventType): event type to store.
payload (Optional[Dict[str, Any]]): payload to store.
event_id (str): event id to store.
"""
event = CBEvent(event_type, payload=payload, id_=event_id)
self._event_pairs_by_id[event.id_].append(event)
self._trace_map = defaultdict(list)
def start_trace(self, trace_id: Optional[str] = None) -> None:
"""Launch a trace."""
self._trace_map = defaultdict(list)
self._cur_trace_id = trace_id
self._start_time = datetime.now()
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
# Ensure W&B run is initialized
self._ensure_run()
self._trace_map = trace_map or defaultdict(list)
self._end_time = datetime.now()
# Log the trace map to wandb
# We can control what trace ids we want to log here.
self.log_trace_tree()
# TODO (ayulockin): Log the LLM token counts to wandb when weave is ready
def log_trace_tree(self) -> None:
"""Log the trace tree to wandb."""
try:
child_nodes = self._trace_map["root"]
root_span = self._convert_event_pair_to_wb_span(
self._event_pairs_by_id[child_nodes[0]],
trace_id=self._cur_trace_id if len(child_nodes) > 1 else None,
)
if len(child_nodes) == 1:
child_nodes = self._trace_map[child_nodes[0]]
root_span = self._build_trace_tree(child_nodes, root_span)
else:
root_span = self._build_trace_tree(child_nodes, root_span)
if root_span:
root_trace = self._trace_tree.WBTraceTree(root_span)
if self._wandb.run:
self._wandb.run.log({"trace": root_trace})
self._wandb.termlog("Logged trace tree to W&B.")
except Exception as e:
print(f"Failed to log trace tree to W&B: {e}")
# ignore errors to not break user code
def persist_index(
self, index: "IndexType", index_name: str, persist_dir: Union[str, None] = None
) -> None:
"""Upload an index to wandb as an artifact. You can learn more about W&B
artifacts here: https://docs.wandb.ai/guides/artifacts.
For the `ComposableGraph` index, the root id is stored as artifact metadata.
Args:
index (IndexType): index to upload.
index_name (str): name of the index. This will be used as the artifact name.
persist_dir (Union[str, None]): directory to persist the index. If None, a
temporary directory will be created and used.
"""
if persist_dir is None:
persist_dir = f"{self._wandb.run.dir}/storage" # type: ignore
_default_persist_dir = True
if not os.path.exists(persist_dir):
os.makedirs(persist_dir)
if isinstance(index, self._IndexType):
try:
index.storage_context.persist(persist_dir) # type: ignore
metadata = None
# For the `ComposableGraph` index, store the root id as metadata
if isinstance(index, self._IndexType[0]):
root_id = index.root_id
metadata = {"root_id": root_id}
self._upload_index_as_wb_artifact(persist_dir, index_name, metadata)
except Exception as e:
# Silently ignore errors to not break user code
self._print_upload_index_fail_message(e)
# clear the default storage dir
if _default_persist_dir:
shutil.rmtree(persist_dir, ignore_errors=True) | |
161545 | try:
import pydantic.v1 as pydantic
from pydantic.v1 import (
BaseConfig,
BaseModel,
Field,
PrivateAttr,
StrictFloat,
StrictInt,
StrictStr,
create_model,
root_validator,
validator,
)
from pydantic.v1.error_wrappers import ValidationError
from pydantic.v1.fields import FieldInfo
from pydantic.v1.generics import GenericModel
except ImportError:
import pydantic # type: ignore
from pydantic import (
BaseConfig,
BaseModel,
Field,
PrivateAttr,
StrictFloat,
StrictInt,
StrictStr,
create_model,
root_validator,
validator,
)
from pydantic.error_wrappers import ValidationError
from pydantic.fields import FieldInfo
from pydantic.generics import GenericModel
__all__ = [
"pydantic",
"BaseModel",
"Field",
"PrivateAttr",
"root_validator",
"validator",
"create_model",
"StrictFloat",
"StrictInt",
"StrictStr",
"FieldInfo",
"ValidationError",
"GenericModel",
"BaseConfig",
] | |
161546 | import langchain
from langchain.agents import AgentExecutor, AgentType, initialize_agent
# agents and tools
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.base_language import BaseLanguageModel
# callback
from langchain.callbacks.base import BaseCallbackHandler, BaseCallbackManager
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.chat_models.base import BaseChatModel
from langchain.docstore.document import Document
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
# chat and memory
from langchain.memory.chat_memory import BaseChatMemory
from langchain.output_parsers import ResponseSchema
# prompts
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
AIMessagePromptTemplate,
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
# schema
from langchain.schema import (
AIMessage,
BaseMemory,
BaseMessage,
BaseOutputParser,
ChatGeneration,
ChatMessage,
FunctionMessage,
HumanMessage,
LLMResult,
SystemMessage,
)
# embeddings
from langchain.schema.embeddings import Embeddings
from langchain.schema.prompt_template import BasePromptTemplate
# input & output
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.tools import BaseTool, StructuredTool, Tool
from langchain_community.chat_models import ChatAnyscale, ChatOpenAI
from langchain_community.embeddings import (
HuggingFaceBgeEmbeddings,
HuggingFaceEmbeddings,
)
# LLMs
from langchain_community.llms import AI21, BaseLLM, Cohere, FakeListLLM, OpenAI
__all__ = [
"langchain",
"BaseLLM",
"FakeListLLM",
"OpenAI",
"AI21",
"Cohere",
"BaseChatModel",
"ChatAnyscale",
"ChatOpenAI",
"BaseLanguageModel",
"Embeddings",
"HuggingFaceEmbeddings",
"HuggingFaceBgeEmbeddings",
"PromptTemplate",
"BasePromptTemplate",
"ConditionalPromptSelector",
"is_chat_model",
"AIMessagePromptTemplate",
"ChatPromptTemplate",
"HumanMessagePromptTemplate",
"BaseMessagePromptTemplate",
"SystemMessagePromptTemplate",
"BaseChatMemory",
"ConversationBufferMemory",
"ChatMessageHistory",
"BaseToolkit",
"AgentType",
"AgentExecutor",
"initialize_agent",
"StructuredTool",
"Tool",
"BaseTool",
"ResponseSchema",
"BaseCallbackHandler",
"BaseCallbackManager",
"AIMessage",
"FunctionMessage",
"BaseMessage",
"ChatMessage",
"HumanMessage",
"SystemMessage",
"BaseMemory",
"BaseOutputParser",
"LLMResult",
"ChatGeneration",
"Document",
"RecursiveCharacterTextSplitter",
"TextSplitter",
] | |
161679 | """Init file for langchain helpers."""
try:
import langchain # noqa
except ImportError:
raise ImportError(
"langchain not installed. "
"Please install langchain with `pip install llama_index[langchain]`."
) | |
161681 | from queue import Queue
from threading import Event
from typing import Any, Generator, List, Optional
from uuid import UUID
from llama_index.legacy.bridge.langchain import BaseCallbackHandler, LLMResult
class StreamingGeneratorCallbackHandler(BaseCallbackHandler):
"""Streaming callback handler."""
def __init__(self) -> None:
self._token_queue: Queue = Queue()
self._done = Event()
def __deepcopy__(self, memo: Any) -> "StreamingGeneratorCallbackHandler":
# NOTE: hack to bypass deepcopy in langchain
return self
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
self._token_queue.put_nowait(token)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self._done.set()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
self._done.set()
def get_response_gen(self) -> Generator:
while True:
if not self._token_queue.empty():
token = self._token_queue.get_nowait()
yield token
elif self._done.is_set():
break | |
161750 | """SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, Iterable, List, Optional, Tuple
from sqlalchemy import MetaData, create_engine, insert, inspect, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import OperationalError, ProgrammingError
class SQLDatabase:
"""SQL Database.
This class provides a wrapper around the SQLAlchemy engine to interact with a SQL
database.
It provides methods to execute SQL commands, insert data into tables, and retrieve
information about the database schema.
It also supports optional features such as including or excluding specific tables,
sampling rows for table info,
including indexes in table info, and supporting views.
Based on langchain SQLDatabase.
https://github.com/langchain-ai/langchain/blob/e355606b1100097665207ca259de6dc548d44c78/libs/langchain/langchain/utilities/sql_database.py#L39
Args:
engine (Engine): The SQLAlchemy engine instance to use for database operations.
schema (Optional[str]): The name of the schema to use, if any.
metadata (Optional[MetaData]): The metadata instance to use, if any.
ignore_tables (Optional[List[str]]): List of table names to ignore. If set,
include_tables must be None.
include_tables (Optional[List[str]]): List of table names to include. If set,
ignore_tables must be None.
sample_rows_in_table_info (int): The number of sample rows to include in table
info.
indexes_in_table_info (bool): Whether to include indexes in table info.
custom_table_info (Optional[dict]): Custom table info to use.
view_support (bool): Whether to support views.
max_string_length (int): The maximum string length to use.
"""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = {
table: info
for table, info in self._custom_table_info.items()
if table in intersection
}
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
@property
def metadata_obj(self) -> MetaData:
"""Return SQL Alchemy metadata."""
return self._metadata
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> "SQLDatabase":
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_columns(self, table_name: str) -> List[Any]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
# same logic as table_info, but with specific table names
template = (
"Table '{table_name}' has columns: {columns}, "
"and foreign keys: {foreign_keys}."
)
columns = []
for column in self._inspector.get_columns(table_name, schema=self._schema):
if column.get("comment"):
columns.append(
f"{column['name']} ({column['type']!s}): "
f"'{column.get('comment')}'"
)
else:
columns.append(f"{column['name']} ({column['type']!s})")
column_str = ", ".join(columns)
foreign_keys = []
for foreign_key in self._inspector.get_foreign_keys(
table_name, schema=self._schema
):
foreign_keys.append(
f"{foreign_key['constrained_columns']} -> "
f"{foreign_key['referred_table']}.{foreign_key['referred_columns']}"
)
foreign_key_str = ", ".join(foreign_keys)
return template.format(
table_name=table_name, columns=column_str, foreign_keys=foreign_key_str
)
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self._metadata.tables[table_name]
stmt = insert(table).values(**data)
with self._engine.begin() as connection:
connection.execute(stmt)
def truncate_word(self, content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
try:
if self._schema:
command = command.replace("FROM ", f"FROM {self._schema}.")
cursor = connection.execute(text(command))
except (ProgrammingError, OperationalError) as exc:
raise NotImplementedError(
f"Statement {command!r} is invalid SQL."
) from exc
if cursor.returns_rows:
result = cursor.fetchall()
# truncate the results to the max string length
# we can't use str(result) directly because it automatically truncates long strings
truncated_results = []
for row in result:
# truncate each column, then convert the row to a tuple
truncated_row = tuple(
self.truncate_word(column, length=self._max_string_length)
for column in row
)
truncated_results.append(truncated_row)
return str(truncated_results), {
"result": truncated_results,
"col_keys": list(cursor.keys()),
}
return "", {} | |
161756 | """Pydantic output parser."""
import json
from typing import Any, List, Optional, Type
from llama_index.legacy.output_parsers.base import ChainableOutputParser
from llama_index.legacy.output_parsers.utils import extract_json_str
from llama_index.legacy.types import Model
PYDANTIC_FORMAT_TMPL = """
Here's a JSON schema to follow:
{schema}
Output a valid JSON object but do not repeat the schema.
"""
class PydanticOutputParser(ChainableOutputParser):
"""Pydantic Output Parser.
Args:
output_cls (BaseModel): Pydantic output class.
"""
def __init__(
self,
output_cls: Type[Model],
excluded_schema_keys_from_format: Optional[List] = None,
pydantic_format_tmpl: str = PYDANTIC_FORMAT_TMPL,
) -> None:
"""Init params."""
self._output_cls = output_cls
self._excluded_schema_keys_from_format = excluded_schema_keys_from_format or []
self._pydantic_format_tmpl = pydantic_format_tmpl
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
@property
def format_string(self) -> str:
"""Format string."""
return self.get_format_string(escape_json=True)
def get_format_string(self, escape_json: bool = True) -> str:
"""Format string."""
schema_dict = self._output_cls.schema()
for key in self._excluded_schema_keys_from_format:
del schema_dict[key]
schema_str = json.dumps(schema_dict)
output_str = self._pydantic_format_tmpl.format(schema=schema_str)
if escape_json:
return output_str.replace("{", "{{").replace("}", "}}")
else:
return output_str
def parse(self, text: str) -> Any:
"""Parse, validate, and correct errors programmatically."""
json_str = extract_json_str(text)
return self._output_cls.parse_raw(json_str)
def format(self, query: str) -> str:
"""Format a query with structured output formatting instructions."""
return query + "\n\n" + self.get_format_string(escape_json=True) | |
161813 | class ChatPromptTemplate(BasePromptTemplate):
message_templates: List[ChatMessage]
def __init__(
self,
message_templates: List[ChatMessage],
prompt_type: str = PromptType.CUSTOM,
output_parser: Optional[BaseOutputParser] = None,
metadata: Optional[Dict[str, Any]] = None,
template_var_mappings: Optional[Dict[str, Any]] = None,
function_mappings: Optional[Dict[str, Callable]] = None,
**kwargs: Any,
):
if metadata is None:
metadata = {}
metadata["prompt_type"] = prompt_type
template_vars = []
for message_template in message_templates:
template_vars.extend(get_template_vars(message_template.content or ""))
super().__init__(
message_templates=message_templates,
kwargs=kwargs,
metadata=metadata,
output_parser=output_parser,
template_vars=template_vars,
template_var_mappings=template_var_mappings,
function_mappings=function_mappings,
)
def partial_format(self, **kwargs: Any) -> "ChatPromptTemplate":
prompt = deepcopy(self)
prompt.kwargs.update(kwargs)
return prompt
def format(
self,
llm: Optional[BaseLLM] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
**kwargs: Any,
) -> str:
del llm # unused
messages = self.format_messages(**kwargs)
if messages_to_prompt is not None:
return messages_to_prompt(messages)
return default_messages_to_prompt(messages)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
del llm # unused
"""Format the prompt into a list of chat messages."""
all_kwargs = {
**self.kwargs,
**kwargs,
}
mapped_all_kwargs = self._map_all_vars(all_kwargs)
messages: List[ChatMessage] = []
for message_template in self.message_templates:
template_vars = get_template_vars(message_template.content or "")
relevant_kwargs = {
k: v for k, v in mapped_all_kwargs.items() if k in template_vars
}
content_template = message_template.content or ""
# if there's mappings specified, make sure those are used
content = content_template.format(**relevant_kwargs)
message: ChatMessage = message_template.copy()
message.content = content
messages.append(message)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return messages
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
return default_messages_to_prompt(self.message_templates)
def _as_query_component(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> QueryComponent:
"""As query component."""
return PromptComponent(prompt=self, format_messages=True, llm=llm)
class SelectorPromptTemplate(BasePromptTemplate):
default_template: BasePromptTemplate
conditionals: Optional[
List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None
def __init__(
self,
default_template: BasePromptTemplate,
conditionals: Optional[
List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None,
):
metadata = default_template.metadata
kwargs = default_template.kwargs
template_vars = default_template.template_vars
output_parser = default_template.output_parser
super().__init__(
default_template=default_template,
conditionals=conditionals,
metadata=metadata,
kwargs=kwargs,
template_vars=template_vars,
output_parser=output_parser,
)
def select(self, llm: Optional[BaseLLM] = None) -> BasePromptTemplate:
# ensure output parser is up to date
self.default_template.output_parser = self.output_parser
if llm is None:
return self.default_template
if self.conditionals is not None:
for condition, prompt in self.conditionals:
if condition(llm):
# ensure output parser is up to date
prompt.output_parser = self.output_parser
return prompt
return self.default_template
def partial_format(self, **kwargs: Any) -> "SelectorPromptTemplate":
default_template = self.default_template.partial_format(**kwargs)
if self.conditionals is None:
conditionals = None
else:
conditionals = [
(condition, prompt.partial_format(**kwargs))
for condition, prompt in self.conditionals
]
return SelectorPromptTemplate(
default_template=default_template, conditionals=conditionals
)
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
"""Format the prompt into a string."""
prompt = self.select(llm=llm)
return prompt.format(**kwargs)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
prompt = self.select(llm=llm)
return prompt.format_messages(**kwargs)
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
prompt = self.select(llm=llm)
return prompt.get_template(llm=llm) | |
161886 | import asyncio
import logging
import queue
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from threading import Event
from typing import AsyncGenerator, Generator, List, Optional, Union
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
)
from llama_index.legacy.core.response.schema import Response, StreamingResponse
from llama_index.legacy.memory import BaseMemory
from llama_index.legacy.schema import NodeWithScore
from llama_index.legacy.tools import ToolOutput
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def is_function(message: ChatMessage) -> bool:
"""Utility for ChatMessage responses from OpenAI models."""
return "tool_calls" in message.additional_kwargs
class ChatResponseMode(str, Enum):
"""Flag toggling waiting/streaming in `Agent._chat`."""
WAIT = "wait"
STREAM = "stream"
@dataclass
class AgentChatResponse:
"""Agent chat response."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
source_nodes: List[NodeWithScore] = field(default_factory=list)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
return self.response
@dataclass
class StreamingAgentChatResponse:
"""Streaming chat response to user and writing to chat history."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
chat_stream: Optional[ChatResponseGen] = None
achat_stream: Optional[ChatResponseAsyncGen] = None
source_nodes: List[NodeWithScore] = field(default_factory=list)
_unformatted_response: str = ""
_queue: queue.Queue = field(default_factory=queue.Queue)
_aqueue: asyncio.Queue = field(default_factory=asyncio.Queue)
# flag when chat message is a function call
_is_function: Optional[bool] = None
# flag when processing done
_is_done = False
# signal when a new item is added to the queue
_new_item_event: asyncio.Event = field(default_factory=asyncio.Event)
# NOTE: async code uses two events rather than one since it yields
# control when waiting for queue item
# signal when the OpenAI functions stop executing
_is_function_false_event: asyncio.Event = field(default_factory=asyncio.Event)
# signal when an OpenAI function is being executed
_is_function_not_none_thread_event: Event = field(default_factory=Event)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
if self._is_done and not self._queue.empty() and not self._is_function:
while self._queue.queue:
delta = self._queue.queue.popleft()
self._unformatted_response += delta
self.response = self._unformatted_response.strip()
return self.response
def put_in_queue(self, delta: Optional[str]) -> None:
self._queue.put_nowait(delta)
self._is_function_not_none_thread_event.set()
def aput_in_queue(self, delta: Optional[str]) -> None:
self._aqueue.put_nowait(delta)
self._new_item_event.set()
def write_response_to_history(
self, memory: BaseMemory, raise_error: bool = False
) -> None:
if self.chat_stream is None:
raise ValueError(
"chat_stream is None. Cannot write to history without chat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
for chat in self.chat_stream:
self._is_function = is_function(chat.message)
self.put_in_queue(chat.delta)
final_text += chat.delta or ""
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
if not raise_error:
logger.warning(
f"Encountered exception writing response to history: {e}"
)
else:
raise
self._is_done = True
# This act as is_done events for any consumers waiting
self._is_function_not_none_thread_event.set()
async def awrite_response_to_history(
self,
memory: BaseMemory,
) -> None:
if self.achat_stream is None:
raise ValueError(
"achat_stream is None. Cannot asynchronously write to "
"history without achat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
async for chat in self.achat_stream:
self._is_function = is_function(chat.message)
self.aput_in_queue(chat.delta)
final_text += chat.delta or ""
if self._is_function is False:
self._is_function_false_event.set()
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
logger.warning(f"Encountered exception writing response to history: {e}")
self._is_done = True
# These act as is_done events for any consumers waiting
self._is_function_false_event.set()
self._new_item_event.set()
@property
def response_gen(self) -> Generator[str, None, None]:
while not self._is_done or not self._queue.empty():
try:
delta = self._queue.get(block=False)
self._unformatted_response += delta
yield delta
except queue.Empty:
# Queue is empty, but we're not done yet
continue
self.response = self._unformatted_response.strip()
async def async_response_gen(self) -> AsyncGenerator[str, None]:
while not self._is_done or not self._aqueue.empty():
if not self._aqueue.empty():
delta = self._aqueue.get_nowait()
self._unformatted_response += delta
yield delta
else:
await self._new_item_event.wait() # Wait until a new item is added
self._new_item_event.clear() # Clear the event for the next wait
self.response = self._unformatted_response.strip()
def print_response_stream(self) -> None:
for token in self.response_gen:
print(token, end="", flush=True)
async def aprint_response_stream(self) -> None:
async for token in self.async_response_gen():
print(token, end="", flush=True)
AGENT_CHAT_RESPONSE_TYPE = Union[AgentChatResponse, StreamingAgentChatResponse] | |
161904 | """Qdrant reader."""
from typing import Dict, List, Optional, cast
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class QdrantReader(BaseReader):
"""Qdrant reader.
Retrieve documents from existing Qdrant collections.
Args:
location:
If `:memory:` - use in-memory Qdrant instance.
If `str` - use it as a `url` parameter.
If `None` - use default values for `host` and `port`.
url:
either host or str of
"Optional[scheme], host, Optional[port], Optional[prefix]".
Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc: If `true` - use gPRC interface whenever possible in custom methods.
https: If `true` - use HTTPS(SSL) protocol. Default: `false`
api_key: API key for authentication in Qdrant Cloud. Default: `None`
prefix:
If not `None` - add `prefix` to the REST URL path.
Example: `service/v1` will result in
`http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.
Default: `None`
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host: Host name of Qdrant service. If url and host are None, set to 'localhost'.
Default: `None`
"""
def __init__(
self,
location: Optional[str] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
path: Optional[str] = None,
):
"""Initialize with parameters."""
import_err_msg = (
"`qdrant-client` package not found, please run `pip install qdrant-client`"
)
try:
import qdrant_client
except ImportError:
raise ImportError(import_err_msg)
self._client = qdrant_client.QdrantClient(
location=location,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
path=path,
)
def load_data(
self,
collection_name: str,
query_vector: List[float],
should_search_mapping: Optional[Dict[str, str]] = None,
must_search_mapping: Optional[Dict[str, str]] = None,
must_not_search_mapping: Optional[Dict[str, str]] = None,
rang_search_mapping: Optional[Dict[str, Dict[str, float]]] = None,
limit: int = 10,
) -> List[Document]:
"""Load data from Qdrant.
Args:
collection_name (str): Name of the Qdrant collection.
query_vector (List[float]): Query vector.
should_search_mapping (Optional[Dict[str, str]]): Mapping from field name
to query string.
must_search_mapping (Optional[Dict[str, str]]): Mapping from field name
to query string.
must_not_search_mapping (Optional[Dict[str, str]]): Mapping from field
name to query string.
rang_search_mapping (Optional[Dict[str, Dict[str, float]]]): Mapping from
field name to range query.
limit (int): Number of results to return.
Example:
reader = QdrantReader()
reader.load_data(
collection_name="test_collection",
query_vector=[0.1, 0.2, 0.3],
should_search_mapping={"text_field": "text"},
must_search_mapping={"text_field": "text"},
must_not_search_mapping={"text_field": "text"},
# gte, lte, gt, lt supported
rang_search_mapping={"text_field": {"gte": 0.1, "lte": 0.2}},
limit=10
)
Returns:
List[Document]: A list of documents.
"""
from qdrant_client.http.models import (
FieldCondition,
Filter,
MatchText,
MatchValue,
Range,
)
from qdrant_client.http.models.models import Payload
should_search_mapping = should_search_mapping or {}
must_search_mapping = must_search_mapping or {}
must_not_search_mapping = must_not_search_mapping or {}
rang_search_mapping = rang_search_mapping or {}
should_search_conditions = [
FieldCondition(key=key, match=MatchText(text=value))
for key, value in should_search_mapping.items()
if should_search_mapping
]
must_search_conditions = [
FieldCondition(key=key, match=MatchValue(value=value))
for key, value in must_search_mapping.items()
if must_search_mapping
]
must_not_search_conditions = [
FieldCondition(key=key, match=MatchValue(value=value))
for key, value in must_not_search_mapping.items()
if must_not_search_mapping
]
rang_search_conditions = [
FieldCondition(
key=key,
range=Range(
gte=value.get("gte"),
lte=value.get("lte"),
gt=value.get("gt"),
lt=value.get("lt"),
),
)
for key, value in rang_search_mapping.items()
if rang_search_mapping
]
should_search_conditions.extend(rang_search_conditions)
response = self._client.search(
collection_name=collection_name,
query_vector=query_vector,
query_filter=Filter(
must=must_search_conditions,
must_not=must_not_search_conditions,
should=should_search_conditions,
),
with_vectors=True,
with_payload=True,
limit=limit,
)
documents = []
for point in response:
payload = cast(Payload, point.payload)
try:
vector = cast(List[float], point.vector)
except ValueError as e:
raise ValueError("Could not cast vector to List[float].") from e
document = Document(
id_=payload.get("doc_id"),
text=payload.get("text"),
metadata=payload.get("metadata"),
embedding=vector,
)
documents.append(document)
return documents | |
161915 | """DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_vector: Union[List, np.ndarray],
data_vectors: np.ndarray,
distance_metric: str = "l2",
limit: Optional[int] = 4,
) -> List:
"""Naive search for nearest neighbors
args:
query_vector: Union[List, np.ndarray]
data_vectors: np.ndarray
limit (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distance, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors.
"""
# Calculate the distance between the query_vector and all data_vectors
if isinstance(query_vector, list):
query_vector = np.array(query_vector)
query_vector = query_vector.reshape(1, -1)
distances = distance_metric_map[distance_metric](query_vector, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:limit]
if distance_metric in ["cos"]
else nearest_indices[:limit]
)
return nearest_indices.tolist()
class DeepLakeReader(BaseReader):
"""DeepLake reader.
Retrieve documents from existing DeepLake datasets.
Args:
dataset_name: Name of the deeplake dataset.
"""
def __init__(
self,
token: Optional[str] = None,
):
"""Initializing the deepLake reader."""
import_err_msg = (
"`deeplake` package not found, please run `pip install deeplake`"
)
try:
import deeplake # noqa
except ImportError:
raise ImportError(import_err_msg)
self.token = token
def load_data(
self,
query_vector: List[float],
dataset_path: str,
limit: int = 4,
distance_metric: str = "l2",
) -> List[Document]:
"""Load data from DeepLake.
Args:
dataset_name (str): Name of the DeepLake dataset.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
import deeplake
from deeplake.util.exceptions import TensorDoesNotExistError
dataset = deeplake.load(dataset_path, token=self.token)
try:
embeddings = dataset.embedding.numpy(fetch_chunks=True)
except Exception:
raise TensorDoesNotExistError("embedding")
indices = vector_search(
query_vector, embeddings, distance_metric=distance_metric, limit=limit
)
documents = []
for idx in indices:
document = Document(
text=str(dataset[idx].text.numpy().tolist()[0]),
id_=dataset[idx].ids.numpy().tolist()[0],
)
documents.append(document)
return documents | |
161921 | """Chroma Reader."""
from typing import Any, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class ChromaReader(BaseReader):
"""Chroma reader.
Retrieve documents from existing persisted Chroma collections.
Args:
collection_name: Name of the persisted collection.
persist_directory: Directory where the collection is persisted.
"""
def __init__(
self,
collection_name: str,
persist_directory: Optional[str] = None,
chroma_api_impl: str = "rest",
chroma_db_impl: Optional[str] = None,
host: str = "localhost",
port: int = 8000,
) -> None:
"""Initialize with parameters."""
import_err_msg = (
"`chromadb` package not found, please run `pip install chromadb`"
)
try:
import chromadb
except ImportError:
raise ImportError(import_err_msg)
if collection_name is None:
raise ValueError("Please provide a collection name.")
# from chromadb.config import Settings
if persist_directory is not None:
self._client = chromadb.PersistentClient(
path=persist_directory if persist_directory else "./chroma",
)
elif (host is not None) or (port is not None):
self._client = chromadb.HttpClient(
host=host,
port=port,
)
self._collection = self._client.get_collection(collection_name)
def create_documents(self, results: Any) -> List[Document]:
"""Create documents from the results.
Args:
results: Results from the query.
Returns:
List of documents.
"""
documents = []
for result in zip(
results["ids"][0],
results["documents"][0],
results["embeddings"][0],
results["metadatas"][0],
):
document = Document(
id_=result[0],
text=result[1],
embedding=result[2],
metadata=result[3],
)
documents.append(document)
return documents
def load_data(
self,
query_embedding: Optional[List[float]] = None,
limit: int = 10,
where: Optional[dict] = None,
where_document: Optional[dict] = None,
query: Optional[Union[str, List[str]]] = None,
) -> Any:
"""Load data from the collection.
Args:
limit: Number of results to return.
where: Filter results by metadata. {"metadata_field": "is_equal_to_this"}
where_document: Filter results by document. {"$contains":"search_string"}
Returns:
List of documents.
"""
where = where or {}
where_document = where_document or {}
if query_embedding is not None:
results = self._collection.search(
query_embedding=query_embedding,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
elif query is not None:
query = query if isinstance(query, list) else [query]
results = self._collection.query(
query_texts=query,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
else:
raise ValueError("Please provide either query embedding or query.") | |
161935 | """Tabular parser.
Contains parsers for tabular data files.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class CSVReader(BaseReader):
"""CSV parser.
Args:
concat_rows (bool): whether to concatenate all rows into one document.
If set to False, a Document will be created for each row.
True by default.
"""
def __init__(self, *args: Any, concat_rows: bool = True, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._concat_rows = concat_rows
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file.
Returns:
Union[str, List[str]]: a string or a List of strings.
"""
try:
import csv
except ImportError:
raise ImportError("csv module is required to read CSV files.")
text_list = []
with open(file) as fp:
csv_reader = csv.reader(fp)
for row in csv_reader:
text_list.append(", ".join(row))
if self._concat_rows:
return [Document(text="\n".join(text_list), metadata=extra_info)]
else:
return [Document(text=text, metadata=extra_info) for text in text_list]
class PandasCSVReader(BaseReader):
r"""Pandas-based CSV parser.
Parses CSVs using the separator detection from Pandas `read_csv`function.
If special parameters are required, use the `pandas_config` dict.
Args:
concat_rows (bool): whether to concatenate all rows into one document.
If set to False, a Document will be created for each row.
True by default.
col_joiner (str): Separator to use for joining cols per row.
Set to ", " by default.
row_joiner (str): Separator to use for joining each row.
Only used when `concat_rows=True`.
Set to "\n" by default.
pandas_config (dict): Options for the `pandas.read_csv` function call.
Refer to https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
for more information.
Set to empty dict by default, this means pandas will try to figure
out the separators, table head, etc. on its own.
"""
def __init__(
self,
*args: Any,
concat_rows: bool = True,
col_joiner: str = ", ",
row_joiner: str = "\n",
pandas_config: dict = {},
**kwargs: Any
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._concat_rows = concat_rows
self._col_joiner = col_joiner
self._row_joiner = row_joiner
self._pandas_config = pandas_config
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file."""
df = pd.read_csv(file, **self._pandas_config)
text_list = df.apply(
lambda row: (self._col_joiner).join(row.astype(str).tolist()), axis=1
).tolist()
if self._concat_rows:
return [
Document(
text=(self._row_joiner).join(text_list), metadata=extra_info or {}
)
]
else:
return [
Document(text=text, metadata=extra_info or {}) for text in text_list
] | |
161960 | """Weaviate reader."""
from typing import Any, List, Optional
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class WeaviateReader(BaseReader):
"""Weaviate reader.
Retrieves documents from Weaviate through vector lookup. Allows option
to concatenate retrieved documents into one Document, or to return
separate Document objects per document.
Args:
host (str): host.
auth_client_secret (Optional[weaviate.auth.AuthCredentials]):
auth_client_secret.
"""
def __init__(
self,
host: str,
auth_client_secret: Optional[Any] = None,
) -> None:
"""Initialize with parameters."""
try:
import weaviate # noqa
from weaviate import Client
from weaviate.auth import AuthCredentials # noqa
except ImportError:
raise ImportError(
"`weaviate` package not found, please run `pip install weaviate-client`"
)
self.client: Client = Client(host, auth_client_secret=auth_client_secret)
def load_data(
self,
class_name: Optional[str] = None,
properties: Optional[List[str]] = None,
graphql_query: Optional[str] = None,
separate_documents: Optional[bool] = True,
) -> List[Document]:
"""Load data from Weaviate.
If `graphql_query` is not found in load_kwargs, we assume that
`class_name` and `properties` are provided.
Args:
class_name (Optional[str]): class_name to retrieve documents from.
properties (Optional[List[str]]): properties to retrieve from documents.
graphql_query (Optional[str]): Raw GraphQL Query.
We assume that the query is a Get query.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
if class_name is not None and properties is not None:
props_txt = "\n".join(properties)
graphql_query = f"""
{{
Get {{
{class_name} {{
{props_txt}
}}
}}
}}
"""
elif graphql_query is not None:
pass
else:
raise ValueError(
"Either `class_name` and `properties` must be specified, "
"or `graphql_query` must be specified."
)
response = self.client.query.raw(graphql_query)
if "errors" in response:
raise ValueError("Invalid query, got errors: {}".format(response["errors"]))
data_response = response["data"]
if "Get" not in data_response:
raise ValueError("Invalid query response, must be a Get query.")
if class_name is None:
# infer class_name if only graphql_query was provided
class_name = next(iter(data_response["Get"].keys()))
entries = data_response["Get"][class_name]
documents = []
for entry in entries:
embedding: Optional[List[float]] = None
# for each entry, join properties into <property>:<value>
# separated by newlines
text_list = []
for k, v in entry.items():
if k == "_additional":
if "vector" in v:
embedding = v["vector"]
continue
text_list.append(f"{k}: {v}")
text = "\n".join(text_list)
documents.append(Document(text=text, embedding=embedding))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents | |
161983 | import logging
from typing import Any, Callable, Dict, List, Optional, Sequence, Type
from openai.resources import Completions
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.core.llms.types import ChatMessage
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for LLM.
E.g. to use openai Please set the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
INVALID_API_KEY_ERROR_MESSAGE = """Invalid LLM API key."""
try:
from litellm.utils import Message
except ModuleNotFoundError:
Message = Any
logger = logging.getLogger(__name__)
CompletionClientType = Type[Completions]
def _create_retry_decorator(max_retries: int) -> Callable[[Any], Any]:
import litellm
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(litellm.exceptions.Timeout)
| retry_if_exception_type(litellm.exceptions.APIError)
| retry_if_exception_type(litellm.exceptions.APIConnectionError)
| retry_if_exception_type(litellm.exceptions.RateLimitError)
| retry_if_exception_type(litellm.exceptions.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(is_chat_model: bool, max_retries: int, **kwargs: Any) -> Any:
from litellm import completion
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(max_retries=max_retries)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return completion(**kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
is_chat_model: bool, max_retries: int, **kwargs: Any
) -> Any:
from litellm import acompletion
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(max_retries=max_retries)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await acompletion(**kwargs)
return await _completion_with_retry(**kwargs)
def openai_modelname_to_contextsize(modelname: str) -> int:
import litellm
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
Modified from:
https://github.com/hwchase17/langchain/blob/master/langchain/llms/openai.py
"""
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
elif ":ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
try:
context_size = int(litellm.get_max_tokens(modelname))
except Exception:
context_size = 2048 # by default assume models have at least 2048 tokens
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: "
+ ", ".join(litellm.model_list)
+ "\nKnown providers are: "
+ ", ".join(litellm.provider_list)
)
return context_size
def is_chat_model(model: str) -> bool:
import litellm
return model in litellm.model_list
def is_function_calling_model(model: str) -> bool:
is_chat_model_ = is_chat_model(model)
is_old = "0314" in model or "0301" in model
return is_chat_model_ and not is_old
def get_completion_endpoint(is_chat_model: bool) -> CompletionClientType:
from litellm import completion
return completion
def to_openai_message_dict(message: ChatMessage) -> dict:
"""Convert generic message to OpenAI message dict."""
message_dict = {
"role": message.role,
"content": message.content,
}
# NOTE: openai messages have additional arguments:
# - function messages have `name`
# - assistant messages have optional `function_call`
message_dict.update(message.additional_kwargs)
return message_dict
def to_openai_message_dicts(messages: Sequence[ChatMessage]) -> List[dict]:
"""Convert generic messages to OpenAI message dicts."""
return [to_openai_message_dict(message) for message in messages]
def from_openai_message_dict(message_dict: dict) -> ChatMessage:
"""Convert openai message dict to generic message."""
role = message_dict["role"]
# NOTE: Azure OpenAI returns function calling messages without a content key
content = message_dict.get("content", None)
additional_kwargs = message_dict.copy()
additional_kwargs.pop("role")
additional_kwargs.pop("content", None)
return ChatMessage(role=role, content=content, additional_kwargs=additional_kwargs)
def from_litellm_message(message: Message) -> ChatMessage:
"""Convert litellm.utils.Message instance to generic message."""
role = message.get("role")
# NOTE: Azure OpenAI returns function calling messages without a content key
content = message.get("content", None)
return ChatMessage(role=role, content=content)
def from_openai_message_dicts(message_dicts: Sequence[dict]) -> List[ChatMessage]:
"""Convert openai message dicts to generic messages."""
return [from_openai_message_dict(message_dict) for message_dict in message_dicts]
def to_openai_function(pydantic_class: Type[BaseModel]) -> Dict[str, Any]:
"""Convert pydantic class to OpenAI function."""
schema = pydantic_class.schema()
return {
"name": schema["title"],
"description": schema["description"],
"parameters": pydantic_class.schema(),
}
def validate_litellm_api_key(
api_key: Optional[str] = None, api_type: Optional[str] = None
) -> None:
import litellm
api_key = litellm.validate_environment()
if api_key is None:
raise ValueError(MISSING_API_KEY_ERROR_MESSAGE) | |
162008 | def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
url = f"{self.api_base}/chat/completions"
payload = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self._get_all_kwargs(**kwargs),
}
def gen() -> CompletionResponseGen:
with requests.Session() as session:
with session.post(
url, json=payload, headers=self.headers, stream=True
) as response:
response.raise_for_status()
text = ""
for line in response.iter_lines(
decode_unicode=True
): # decode lines to Unicode
if line.startswith("data:"):
data = json.loads(line[5:])
delta = data["choices"][0]["text"]
text += delta
yield CompletionResponse(delta=delta, text=text, raw=data)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._is_chat_model():
raise ValueError("The complete method is not supported for chat models.")
stream_complete_fn = self._stream_complete
return stream_complete_fn(prompt, **kwargs)
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
import aiohttp
url = f"{self.api_base}/chat/completions"
payload = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self._get_all_kwargs(**kwargs),
}
async def gen() -> CompletionResponseAsyncGen:
async with aiohttp.ClientSession() as session:
async with session.post(
url, json=payload, headers=self.headers
) as response:
response.raise_for_status()
text = ""
async for line in response.content:
line_text = line.decode("utf-8").strip()
if line_text.startswith("data:"):
data = json.loads(line_text[5:])
delta = data["choices"][0]["text"]
text += delta
yield CompletionResponse(delta=delta, text=text, raw=data)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._is_chat_model():
raise ValueError("The complete method is not supported for chat models.")
return await self._astream_complete(prompt, **kwargs)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
url = f"{self.api_base}/chat/completions"
payload = {
"model": self.model,
"messages": [
message.dict(exclude={"additional_kwargs"}) for message in messages
],
"stream": True,
**self._get_all_kwargs(**kwargs),
}
def gen() -> ChatResponseGen:
content = ""
with requests.Session() as session:
with session.post(
url, json=payload, headers=self.headers, stream=True
) as response:
response.raise_for_status()
for line in response.iter_lines(
decode_unicode=True
): # decode lines to Unicode
if line.startswith("data:"):
data = json.loads(line[5:])
delta = data["choices"][0]["delta"]["content"]
content += delta
message = ChatMessage(
role="assistant", content=content, raw=data
)
yield ChatResponse(message=message, delta=delta, raw=data)
return gen()
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
return self._stream_chat(messages, **kwargs)
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
import aiohttp
url = f"{self.api_base}/chat/completions"
payload = {
"model": self.model,
"messages": [
message.dict(exclude={"additional_kwargs"}) for message in messages
],
"stream": True,
**self._get_all_kwargs(**kwargs),
}
async def gen() -> ChatResponseAsyncGen:
async with aiohttp.ClientSession() as session:
async with session.post(
url, json=payload, headers=self.headers
) as response:
response.raise_for_status()
content = ""
async for line in response.content:
line_text = line.decode("utf-8").strip()
if line_text.startswith("data:"):
data = json.loads(line_text[5:])
delta = data["choices"][0]["delta"]["content"]
content += delta
message = ChatMessage(
role="assistant", content=content, raw=data
)
yield ChatResponse(message=message, delta=delta, raw=data)
return gen()
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
return await self._astream_chat(messages, **kwargs) | |
162038 | import logging
import os
import time
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
import openai
from deprecated import deprecated
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
stop_after_delay,
wait_exponential,
wait_random_exponential,
)
from tenacity.stop import stop_base
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.core.llms.types import ChatMessage
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
DEFAULT_OPENAI_API_TYPE = "open_ai"
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
DEFAULT_OPENAI_API_VERSION = ""
GPT4_MODELS: Dict[str, int] = {
# stable model names:
# resolves to gpt-4-0314 before 2023-06-27,
# resolves to gpt-4-0613 after
"gpt-4": 8192,
"gpt-4-32k": 32768,
# turbo models (Turbo, JSON mode)
"gpt-4-1106-preview": 128000,
"gpt-4-0125-preview": 128000,
"gpt-4-turbo-preview": 128000,
# multimodal model
"gpt-4-vision-preview": 128000,
# 0613 models (function calling):
# https://openai.com/blog/function-calling-and-other-api-updates
"gpt-4-0613": 8192,
"gpt-4-32k-0613": 32768,
# 0314 models
"gpt-4-0314": 8192,
"gpt-4-32k-0314": 32768,
}
AZURE_TURBO_MODELS: Dict[str, int] = {
"gpt-35-turbo-16k": 16384,
"gpt-35-turbo": 4096,
# 1106 model (JSON mode)
"gpt-35-turbo-1106": 16384,
# 0613 models (function calling):
"gpt-35-turbo-0613": 4096,
"gpt-35-turbo-16k-0613": 16384,
}
TURBO_MODELS: Dict[str, int] = {
# stable model names:
# resolves to gpt-3.5-turbo-0301 before 2023-06-27,
# resolves to gpt-3.5-turbo-0613 until 2023-12-11,
# resolves to gpt-3.5-turbo-1106 after
"gpt-3.5-turbo": 4096,
# resolves to gpt-3.5-turbo-16k-0613 until 2023-12-11
# resolves to gpt-3.5-turbo-1106 after
"gpt-3.5-turbo-16k": 16384,
# 0125 (2024) model (JSON mode)
"gpt-3.5-turbo-0125": 16385,
# 1106 model (JSON mode)
"gpt-3.5-turbo-1106": 16384,
# 0613 models (function calling):
# https://openai.com/blog/function-calling-and-other-api-updates
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k-0613": 16384,
# 0301 models
"gpt-3.5-turbo-0301": 4096,
}
GPT3_5_MODELS: Dict[str, int] = {
"text-davinci-003": 4097,
"text-davinci-002": 4097,
# instruct models
"gpt-3.5-turbo-instruct": 4096,
}
GPT3_MODELS: Dict[str, int] = {
"text-ada-001": 2049,
"text-babbage-001": 2040,
"text-curie-001": 2049,
"ada": 2049,
"babbage": 2049,
"curie": 2049,
"davinci": 2049,
}
ALL_AVAILABLE_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**GPT3_5_MODELS,
**GPT3_MODELS,
**AZURE_TURBO_MODELS,
}
CHAT_MODELS = {
**GPT4_MODELS,
**TURBO_MODELS,
**AZURE_TURBO_MODELS,
}
DISCONTINUED_MODELS = {
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
MISSING_API_KEY_ERROR_MESSAGE = """No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or \
openai.api_key prior to initialization.
API keys can be found or created at \
https://platform.openai.com/account/api-keys
"""
logger = logging.getLogger(__name__)
OpenAIToolCall = Union[ChatCompletionMessageToolCall, ChoiceDeltaToolCall]
def create_retry_decorator(
max_retries: int,
random_exponential: bool = False,
stop_after_delay_seconds: Optional[float] = None,
min_seconds: float = 4,
max_seconds: float = 10,
) -> Callable[[Any], Any]:
wait_strategy = (
wait_random_exponential(min=min_seconds, max=max_seconds)
if random_exponential
else wait_exponential(multiplier=1, min=min_seconds, max=max_seconds)
)
stop_strategy: stop_base = stop_after_attempt(max_retries)
if stop_after_delay_seconds is not None:
stop_strategy = stop_strategy | stop_after_delay(stop_after_delay_seconds)
return retry(
reraise=True,
stop=stop_strategy,
wait=wait_strategy,
retry=(
retry_if_exception_type(
(
openai.APITimeoutError,
openai.APIError,
openai.APIConnectionError,
openai.RateLimitError,
openai.APIStatusError,
)
)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def openai_modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
Modified from:
https://github.com/hwchase17/langchain/blob/master/langchain/llms/openai.py
"""
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
elif ":ft-" in modelname: # legacy fine-tuning
modelname = modelname.split(":")[0]
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"OpenAI model {modelname} has been discontinued. "
"Please choose another model."
)
if modelname not in ALL_AVAILABLE_MODELS:
raise ValueError(
f"Unknown model {modelname!r}. Please provide a valid OpenAI model name in:"
f" {', '.join(ALL_AVAILABLE_MODELS.keys())}"
)
return ALL_AVAILABLE_MODELS[modelname]
def is_chat_model(model: str) -> bool:
return model in CHAT_MODELS
def is_function_calling_model(model: str) -> bool:
is_chat_model_ = is_chat_model(model)
is_old = "0314" in model or "0301" in model
return is_chat_model_ and not is_old | |
162168 | import os
from typing import Dict, List
import pytest
from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores import ChromaVectorStore
from llama_index.legacy.vector_stores.types import VectorStoreQuery
##
# Start chromadb locally
# cd tests
# docker-compose up
#
# Run tests
# cd tests/vector_stores
# pytest test_chromadb.py
PARAMS: Dict[str, str] = {
"host": os.environ.get("CHROMADB_HOST", "localhost"),
"port": os.environ.get("CHROMADB_PORT", "8000"),
}
COLLECTION_NAME = "llama_collection"
try:
import chromadb
# connection check
conn__ = chromadb.HttpClient(**PARAMS) # type: ignore
conn__.get_or_create_collection(COLLECTION_NAME)
chromadb_not_available = False
except (ImportError, Exception):
chromadb_not_available = True
@pytest.mark.skipif(chromadb_not_available, reason="chromadb is not available")
def test_instance_creation_from_collection() -> None:
connection = chromadb.HttpClient(**PARAMS)
collection = connection.get_collection(COLLECTION_NAME)
store = ChromaVectorStore.from_collection(collection)
assert isinstance(store, ChromaVectorStore)
@pytest.mark.skipif(chromadb_not_available, reason="chromadb is not available")
def test_instance_creation_from_http_params() -> None:
store = ChromaVectorStore.from_params(
host=PARAMS["host"],
port=PARAMS["port"],
collection_name=COLLECTION_NAME,
collection_kwargs={},
)
assert isinstance(store, ChromaVectorStore)
@pytest.mark.skipif(chromadb_not_available, reason="chromadb is not available")
def test_instance_creation_from_persist_dir() -> None:
store = ChromaVectorStore.from_params(
persist_dir="./data",
collection_name=COLLECTION_NAME,
collection_kwargs={},
)
assert isinstance(store, ChromaVectorStore)
@pytest.fixture()
def vector_store() -> ChromaVectorStore:
connection = chromadb.HttpClient(**PARAMS)
collection = connection.get_collection(COLLECTION_NAME)
return ChromaVectorStore(chroma_collection=collection)
@pytest.fixture(scope="session")
def node_embeddings() -> List[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="c330d77f-90bd-4c51-9ed2-57d8d693b3b0",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
embedding=[1.0, 0.0, 0.0],
),
TextNode(
text="lorem ipsum",
id_="c3d1e1dd-8fb4-4b8f-b7ea-7fa96038d39d",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")},
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
embedding=[0.0, 1.0, 0.0],
),
TextNode(
text="lorem ipsum",
id_="c3ew11cd-8fb4-4b8f-b7ea-7fa96038d39d",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")},
metadata={
"director": "Christopher Nolan",
},
embedding=[0.0, 0.0, 1.0],
),
TextNode(
text="I was taught that the way of progress was neither swift nor easy.",
id_="0b31ae71-b797-4e88-8495-031371a7752e",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="text-3")},
metadate={
"author": "Marie Curie",
},
embedding=[0.0, 0.0, 0.9],
),
TextNode(
text=(
"The important thing is not to stop questioning."
+ " Curiosity has its own reason for existing."
),
id_="bd2e080b-159a-4030-acc3-d98afd2ba49b",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="text-4")},
metadate={
"author": "Albert Einstein",
},
embedding=[0.0, 0.0, 0.5],
),
TextNode(
text=(
"I am no bird; and no net ensnares me;"
+ " I am a free human being with an independent will."
),
id_="f658de3b-8cef-4d1c-8bed-9a263c907251",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="text-5")},
metadate={
"author": "Charlotte Bronte",
},
embedding=[0.0, 0.0, 0.3],
),
]
@pytest.mark.skipif(chromadb_not_available, reason="chromadb is not available")
@pytest.mark.asyncio()
@pytest.mark.parametrize("use_async", [True, False])
async def test_add_to_chromadb_and_query(
vector_store: ChromaVectorStore,
node_embeddings: List[TextNode],
use_async: bool,
) -> None:
if use_async:
await vector_store.async_add(node_embeddings)
res = await vector_store.aquery(
VectorStoreQuery(query_embedding=[1.0, 0.0, 0.0], similarity_top_k=1)
)
else:
vector_store.add(node_embeddings)
res = vector_store.query(
VectorStoreQuery(query_embedding=[1.0, 0.0, 0.0], similarity_top_k=1)
)
assert res.nodes
assert res.nodes[0].get_content() == "lorem ipsum" | |
162178 | """Test MongoDB Atlas Vector Search functionality."""
from __future__ import annotations
import os
from time import sleep
from typing import List
import pytest
try:
from pymongo import MongoClient
INDEX_NAME = "llamaindex-test-index"
NAMESPACE = "llamaindex_test_db.llamaindex_test_collection"
CONNECTION_STRING = os.environ.get("MONGODB_ATLAS_URI")
DB_NAME, COLLECTION_NAME = NAMESPACE.split(".")
test_client = MongoClient(CONNECTION_STRING) # type: ignore
collection = test_client[DB_NAME][COLLECTION_NAME]
pymongo_available = True
except (ImportError, Exception):
pymongo_available = False
from llama_index.legacy.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.legacy.vector_stores.mongodb import MongoDBAtlasVectorSearch
from llama_index.legacy.vector_stores.types import VectorStoreQuery
@pytest.fixture(scope="session")
def node_embeddings() -> list[TextNode]:
return [
TextNode(
text="lorem ipsum",
id_="c330d77f-90bd-4c51-9ed2-57d8d693b3b0",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
embedding=[1.0, 0.0, 0.0],
),
TextNode(
text="lorem ipsum",
id_="c3d1e1dd-8fb4-4b8f-b7ea-7fa96038d39d",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-1")},
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
embedding=[0.0, 1.0, 0.0],
),
TextNode(
text="lorem ipsum",
id_="c3ew11cd-8fb4-4b8f-b7ea-7fa96038d39d",
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-2")},
metadata={
"director": "Christopher Nolan",
},
embedding=[0.0, 0.0, 1.0],
),
]
@pytest.mark.skipif(not pymongo_available, reason="pymongo is not available")
@pytest.mark.skip(reason="Need to manually provide a valid Atlas URI")
class TestMongoDBAtlasVectorSearch:
@classmethod
def setup_class(cls) -> None:
# insure the test collection is empty
assert collection.count_documents({}) == 0 # type: ignore[index]
@classmethod
def teardown_class(cls) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
@pytest.fixture(autouse=True)
def setup(self) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
def test_add_and_delete(self) -> None:
vector_store = MongoDBAtlasVectorSearch(
mongodb_client=test_client, # type: ignore
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
index_name=INDEX_NAME,
)
sleep(1) # waits for mongot to update Lucene's index
vector_store.add(
[
TextNode(
text="test node text",
id_="test node id",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc id")
},
embedding=[0.5, 0.5],
)
]
)
assert collection.count_documents({}) == 1
vector_store.delete("test doc id")
assert collection.count_documents({}) == 0
def test_query(self, node_embeddings: List[TextNode]) -> None:
vector_store = MongoDBAtlasVectorSearch(
mongodb_client=test_client, # type: ignore
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
index_name=INDEX_NAME,
)
vector_store.add(node_embeddings) # type: ignore
sleep(1) # wait for mongot to update the index
res = vector_store.query(
VectorStoreQuery(query_embedding=[1.0, 0.0, 0.0], similarity_top_k=1)
)
assert res.nodes
assert res.nodes[0].get_content() == "lorem ipsum" | |
162180 | from typing import List
import numpy as np
import pandas as pd
from llama_index.legacy.vector_stores.lancedb import _to_llama_similarities
data_stub = {
"id": [1, 2, 3],
"doc_id": ["doc1", "doc2", "doc3"],
"vector": [np.array([0.1, 0.2]), np.array([0.3, 0.4]), np.array([0.5, 0.6])],
"text": ["text1", "text2", "text3"],
"file_name": ["file1.txt", "file2.txt", "file3.txt"],
"_node_content": ["content1", "content2", "content3"],
"document_id": ["doc_id1", "doc_id2", "doc_id3"],
"ref_doc_id": ["ref1", "ref2", "ref3"],
}
def test_to_llama_similarities_from_df_w_score() -> None:
data = dict(data_stub)
scores: List[float] = [9, 9 - np.log(2), 9 - np.log(4)]
# lance provides 'score' in reverse natural sort test should as well
reversed_sort = scores.copy()
reversed_sort.sort(reverse=True)
assert np.array_equal(reversed_sort, scores) # gut check setup
data["score"] = scores
df = pd.DataFrame(data)
llama_sim_array = _to_llama_similarities(df)
assert np.allclose(llama_sim_array, [1, 0.5, 0.25])
def test_to_llama_similarities_from_df_w_distance() -> None:
data = dict(data_stub)
distances: List[float] = [np.log(4 / 3), np.log(2), np.log(4)]
# lance provides '_distance' by natural sort test should as well
natural_sort = distances.copy()
natural_sort.sort()
assert np.array_equal(natural_sort, distances) # gut check setup
data["_distance"] = distances
df = pd.DataFrame(data)
llama_sim_array = _to_llama_similarities(df)
assert np.allclose(llama_sim_array, [0.75, 0.5, 0.25])
def test_to_llama_similarity_from_df_ordinal() -> None:
data = dict(data_stub)
df = pd.DataFrame(data)
llama_sim_array = _to_llama_similarities(df)
assert np.allclose(llama_sim_array, [1, 0.5, 0]) | |
162269 | """Test deeplake indexes."""
from typing import List
import pytest
from llama_index.legacy.indices.vector_store.base import VectorStoreIndex
from llama_index.legacy.schema import Document, TextNode
from llama_index.legacy.service_context import ServiceContext
from llama_index.legacy.storage.storage_context import StorageContext
from llama_index.legacy.vector_stores import DeepLakeVectorStore
try:
import deeplake
except ImportError:
deeplake = None # type: ignore
EMBEDDING_DIM = 100
NUMBER_OF_DATA = 10
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
doc_text1 = "Hello world!"
doc_text2 = "This is the first test. answer is A"
doc_text3 = "This is the second test. answer is B"
doc_text4 = "This is the third test. answer is C"
return [
Document(text=doc_text1),
Document(text=doc_text2),
Document(text=doc_text3),
Document(text=doc_text4),
]
@pytest.mark.skipif(deeplake is None, reason="deeplake not installed")
def test_build_deeplake(
documents: List[Document],
mock_service_context: ServiceContext,
) -> None:
import deeplake
"""Test build VectorStoreIndex with DeepLakeVectorStore."""
dataset_path = "./llama_index_test"
vector_store = DeepLakeVectorStore(
dataset_path=dataset_path,
overwrite=True,
verbose=False,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents,
storage_context=storage_context,
service_context=mock_service_context,
)
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve("What is the answer to the third test?")
assert len(nodes) == 1
assert nodes[0].node.get_content() == "This is the third test. answer is C"
node = nodes[0].node
node_with_embedding = node.copy()
node_with_embedding.embedding = [1.0 for i in range(EMBEDDING_DIM)]
new_nodes = [node_with_embedding for i in range(NUMBER_OF_DATA)]
vector_store.add(new_nodes)
assert len(vector_store._vectorstore) == 14
ref_doc_id = str(node.ref_doc_id)
vector_store.delete(ref_doc_id)
assert len(vector_store._vectorstore) == 3
deeplake.delete(dataset_path)
@pytest.mark.skipif(deeplake is None, reason="deeplake not installed")
def test_node_with_metadata(
mock_service_context: ServiceContext,
) -> None:
import deeplake
dataset_path = "./llama_index_test"
vector_store = DeepLakeVectorStore(
dataset_path=dataset_path,
overwrite=True,
verbose=False,
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
input_nodes = [TextNode(text="test node text", metadata={"key": "value"})]
index = VectorStoreIndex(
input_nodes,
storage_context=storage_context,
service_context=mock_service_context,
)
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve("What is?")
assert len(nodes) == 1
assert nodes[0].node.get_content() == "test node text"
assert nodes[0].node.metadata == {"key": "value"}
deeplake.delete(dataset_path)
@pytest.mark.skipif(deeplake is None, reason="deeplake not installed")
def test_backwards_compatibility() -> None:
import deeplake
from deeplake.core.vectorstore import utils
# create data
texts, embeddings, ids, metadatas, images = utils.create_data(
number_of_data=NUMBER_OF_DATA, embedding_dim=EMBEDDING_DIM
)
metadatas = [metadata.update({"doc_id": "2"}) for metadata in metadatas]
node = TextNode(
text="test node text",
metadata={"key": "value", "doc_id": "1"},
id_="1",
embedding=[1.0 for i in range(EMBEDDING_DIM)],
)
nodes = [node for i in range(10)]
dataset_path = "local_ds1"
ds = deeplake.empty(dataset_path)
ds.create_tensor("ids", htype="text")
ds.create_tensor("embedding", htype="embedding")
ds.create_tensor("text", htype="text")
ds.create_tensor("metadata", htype="json")
ds.extend(
{
"ids": ids,
"text": texts,
"metadata": metadatas,
"embedding": embeddings,
}
)
vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path,
overwrite=False,
verbose=False,
)
vectorstore.add(nodes)
assert len(vectorstore._vectorstore) == 20
deeplake.delete(dataset_path) | |
162326 | from llama_index.legacy.node_parser.file.json import JSONNodeParser
from llama_index.legacy.schema import Document
def test_split_empty_text() -> None:
json_splitter = JSONNodeParser()
input_text = Document(text="")
result = json_splitter.get_nodes_from_documents([input_text])
assert result == []
def test_split_valid_json() -> None:
json_splitter = JSONNodeParser()
input_text = Document(
text='[{"name": "John", "age": 30}, {"name": "Alice", "age": 25}]'
)
result = json_splitter.get_nodes_from_documents([input_text])
assert len(result) == 2
assert result[0].text == "name John\nage 30"
assert result[1].text == "name Alice\nage 25"
def test_split_valid_json_defaults() -> None:
json_splitter = JSONNodeParser()
input_text = Document(text='[{"name": "John", "age": 30}]')
result = json_splitter.get_nodes_from_documents([input_text])
assert len(result) == 1
assert result[0].text == "name John\nage 30"
def test_split_valid_dict_json() -> None:
json_splitter = JSONNodeParser()
input_text = Document(text='{"name": "John", "age": 30}')
result = json_splitter.get_nodes_from_documents([input_text])
assert len(result) == 1
assert result[0].text == "name John\nage 30"
def test_split_invalid_json() -> None:
json_splitter = JSONNodeParser()
input_text = Document(text='{"name": "John", "age": 30,}')
result = json_splitter.get_nodes_from_documents([input_text])
assert result == [] | |
162328 | def test_complex_md() -> None:
test_data = Document(
text="""
# Using LLMs
## Concept
Picking the proper Large Language Model (LLM) is one of the first steps you need to consider when building any LLM application over your data.
LLMs are a core component of LlamaIndex. They can be used as standalone modules or plugged into other core LlamaIndex modules (indices, retrievers, query engines). They are always used during the response synthesis step (e.g. after retrieval). Depending on the type of index being used, LLMs may also be used during index construction, insertion, and query traversal.
LlamaIndex provides a unified interface for defining LLM modules, whether it's from OpenAI, Hugging Face, or LangChain, so that you
don't have to write the boilerplate code of defining the LLM interface yourself. This interface consists of the following (more details below):
- Support for **text completion** and **chat** endpoints (details below)
- Support for **streaming** and **non-streaming** endpoints
- Support for **synchronous** and **asynchronous** endpoints
## Usage Pattern
The following code snippet shows how you can get started using LLMs.
```python
from llama_index.legacy.llms import OpenAI
# non-streaming
resp = OpenAI().complete("Paul Graham is ")
print(resp)
```
```{toctree}
---
maxdepth: 1
---
llms/usage_standalone.md
llms/usage_custom.md
```
## A Note on Tokenization
By default, LlamaIndex uses a global tokenizer for all token counting. This defaults to `cl100k` from tiktoken, which is the tokenizer to match the default LLM `gpt-3.5-turbo`.
If you change the LLM, you may need to update this tokenizer to ensure accurate token counts, chunking, and prompting.
The single requirement for a tokenizer is that it is a callable function, that takes a string, and returns a list.
You can set a global tokenizer like so:
```python
from llama_index.legacy import set_global_tokenizer
# tiktoken
import tiktoken
set_global_tokenizer(tiktoken.encoding_for_model("gpt-3.5-turbo").encode)
# huggingface
from transformers import AutoTokenizer
set_global_tokenizer(
AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta").encode
)
```
## LLM Compatibility Tracking
While LLMs are powerful, not every LLM is easy to set up. Furthermore, even with proper setup, some LLMs have trouble performing tasks that require strict instruction following.
LlamaIndex offers integrations with nearly every LLM, but it can be often unclear if the LLM will work well out of the box, or if further customization is needed.
The tables below attempt to validate the **initial** experience with various LlamaIndex features for various LLMs. These notebooks serve as a best attempt to gauge performance, as well as how much effort and tweaking is needed to get things to function properly.
Generally, paid APIs such as OpenAI or Anthropic are viewed as more reliable. However, local open-source models have been gaining popularity due to their customizability and approach to transparency.
**Contributing:** Anyone is welcome to contribute new LLMs to the documentation. Simply copy an existing notebook, setup and test your LLM, and open a PR with your results.
If you have ways to improve the setup for existing notebooks, contributions to change this are welcome!
**Legend**
- ✅ = should work fine
- ⚠️ = sometimes unreliable, may need prompt engineering to improve
- 🛑 = usually unreliable, would need prompt engineering/fine-tuning to improve
### Paid LLM APIs
| Model Name | Basic Query Engines | Router Query Engine | Sub Question Query Engine | Text2SQL | Pydantic Programs | Data Agents | <div style="width:290px">Notes</div> |
| ------------------------------------------------------------------------------------------------------------------------ | ------------------- | ------------------- | ------------------------- | -------- | ----------------- | ----------- | --------------------------------------- |
| [gpt-3.5-turbo](https://colab.research.google.com/drive/1oVqUAkn0GCBG5OCs3oMUPlNQDdpDTH_c?usp=sharing) (openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [gpt-3.5-turbo-instruct](https://colab.research.google.com/drive/1DrVdx-VZ3dXwkwUVZQpacJRgX7sOa4ow?usp=sharing) (openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ⚠️ | Tool usage in data-agents seems flakey. |
| [gpt-4](https://colab.research.google.com/drive/1RsBoT96esj1uDID-QE8xLrOboyHKp65L?usp=sharing) (openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [claude-2](https://colab.research.google.com/drive/1os4BuDS3KcI8FCcUM_2cJma7oI2PGN7N?usp=sharing) (anthropic) | ✅ | ✅ | ✅ | ✅ | ✅ | ⚠️ | Prone to hallucinating tool inputs. |
| [claude-instant-1.2](https://colab.research.google.com/drive/1wt3Rt2OWBbqyeRYdiLfmB0_OIUOGit_D?usp=sharing) (anthropic) | ✅ | ✅ | ✅ | ✅ | ✅ | ⚠️ | Prone to hallucinating tool inputs. |
### Open Source LLMs
Since open source LLMs require large amounts of resources, the quantization is reported. Quantization is just a method for reducing the size of an LLM by shrinking the accuracy of calculations within the model. Research has shown that up to 4Bit quantization can be achieved for large LLMs without impacting performance too severely.
| Model Name | Basic Query Engines | Router Query Engine | SubQuestion Query Engine | Text2SQL | Pydantic Programs | Data Agents | <div style="width:290px">Notes</div> |
| ------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | ------------------- | ------------------------ | -------- | ----------------- | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [llama2-chat-7b 4bit](https://colab.research.google.com/drive/14N-hmJ87wZsFqHktrw40OU6sVcsiSzlQ?usp=sharing) (huggingface) | ✅ | 🛑 | 🛑 | 🛑 | 🛑 | ⚠️ | Llama2 seems to be quite chatty, which makes parsing structured outputs difficult. Fine-tuning and prompt engineering likely required for better performance on structured outputs. |
| [llama2-13b-chat](https://colab.research.google.com/drive/1S3eCZ8goKjFktF9hIakzcHqDE72g0Ggb?usp=sharing) (replicate) | ✅ | ✅ | 🛑 | ✅ | 🛑 | 🛑 | Our ReAct prompt expects structured outputs, which llama-13b struggles at |
| [llama2-70b-chat](https://colab.research.google.com/drive/1BeOuVI8StygKFTLSpZ0vGCouxar2V5UW?usp=sharing) (replicate) | ✅ | ✅ | ✅ | ✅ | 🛑 | ⚠️ | There are still some issues with parsing structured outputs, especially with pydantic programs. |
| [Mistral-7B-instruct-v0.1 4bit](https://colab.research.google.com/drive/1ZAdrabTJmZ_etDp10rjij_zME2Q3umAQ?usp=sharing) (huggingface) | ✅ | 🛑 | 🛑 | ⚠️ | ⚠️ | ⚠️ | Mistral seems slightly more reliable for structured outputs compared to Llama2. Likely with some prompt engineering, it may do | |
162459 | """Test pydantic output parser."""
import pytest
from llama_index.legacy.bridge.pydantic import BaseModel
from llama_index.legacy.output_parsers.pydantic import PydanticOutputParser
class AttrDict(BaseModel):
test_attr: str
foo: int
class TestModel(BaseModel):
__test__ = False
title: str
attr_dict: AttrDict
def test_pydantic() -> None:
"""Test pydantic output parser."""
output = """\
Here is the valid JSON:
{
"title": "TestModel",
"attr_dict": {
"test_attr": "test_attr",
"foo": 2
}
}
"""
parser = PydanticOutputParser(output_cls=TestModel)
parsed_output = parser.parse(output)
assert isinstance(parsed_output, TestModel)
assert parsed_output.title == "TestModel"
assert isinstance(parsed_output.attr_dict, AttrDict)
assert parsed_output.attr_dict.test_attr == "test_attr"
assert parsed_output.attr_dict.foo == 2
# TODO: figure out testing conditions
with pytest.raises(ValueError):
output = "hello world"
parsed_output = parser.parse(output)
def test_pydantic_format() -> None:
"""Test pydantic format."""
query = "hello world"
parser = PydanticOutputParser(output_cls=AttrDict)
formatted_query = parser.format(query)
assert "hello world" in formatted_query | |
162622 | ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"response: In the summer of 1995, Steve Jobs was involved in the process of returning to Apple after his departure from the company a decade earlier.\n"
]
}
],
"source": [
"# from llama_index.llms.openai import OpenAI\n",
"\n",
"orig_question = \"what did he do in the summer of 1995?\"\n",
"llm = OpenAI(model=\"gpt-4\")\n",
"clarifying_questions = []\n",
"\n",
"try:\n",
" response = agent.chat(orig_question)\n",
"except HumanInputRequiredException as e:\n",
" response = input(e.message)\n",
" clarifying_questions.append((e.message, response))\n",
" should_end = False\n",
" while not should_end:\n",
" clarifying_texts = \"\\n\".join(\n",
" [\n",
" f\"\"\"\n",
" Q: {question}\n",
" A: {answer}\n",
" \"\"\"\n",
" for question, answer in clarifying_questions\n",
" ]\n",
" )\n",
" query_text = f\"\"\"\n",
"Given a query and a set of clarifying questions, please rewrite the query to be more clear.\n",
"Example:\n",
"Q: What trajectory is the monthly earning from the three months: April, May and June?\n",
"Clarifying Questions:\n",
" Q: What year are you referring to?\n",
" A: In 2022\n",
" Q: What company are you referring to?\n",
" A: Uber\n",
"Rewrite: What was the trajectory of Uber's monthly earnings for the months of April, May, and June in 2022?\n",
"\n",
"Q:{orig_question}\n",
"Clarifying Questions: {clarifying_texts}\n",
"Rewrite: \"\"\"\n",
" rewrite_response = llm.complete(query_text)\n",
" orig_question = rewrite_response\n",
" try:\n",
" output = agent.chat(rewrite_response.text)\n",
" should_end = True\n",
" print(f\"response: {output.response}\")\n",
" except HumanInputRequiredException as er:\n",
" response = input(er.message)\n",
" clarifying_questions.append((er.message, response))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | |
163404 | than four function calls in ReAct, which often lead to divergent behavior, show less than 10% accuracy in ReAct.
On the other hand, when these examples are processed with LLMCompiler, they achieve around 50% accuracy by
circumventing repetitive calls. It is worth noting that there are instances with three function calls in ReAct, where
an extra search can lead to improved accuracy by retrying with an alternate entity name when the initial search fails,
yielding a better accuracy than LLMCompiler. While this shows a potential adaptability advantage of ReAct, such
instances represent less than 3% of cases.
**A.2** **Failure Case Analysis of LLMCompiler**
This section delves into a qualitative analysis of LLMCompiler’s failure cases on the ParallelQA benchmark, which
can be broadly attributed to failures in the Planner, Executor, or the final output process. Failures in the final output
process refer to cases when LLMs are unable to use the observations collected from tool execution (which are incorporated into the context) to deliver the correct answer to the user. Among the 10.6% (36 examples) of LLMCompiler’s
total failures reported in Tab. 1, we have noted that the Planner, Executor, and final output process contributed to 8%,
64%, and 28% of the failures, respectively. The Planner’s 8% failure rate is exclusive to LLMCompiler. For instance,
the Planner would incorrectly map inputs and outputs by assigning a wrong identifier as an input to a subsequent task,
thereby forming an incorrect DAG. However, with adequate tool definitions and in-context examples, Planner errors
are significantly reduced (only 3 instances in total throughout our experiment), underscoring the LLM’s capability to
decompose problems into complex multi-task dependencies.
The remaining 92% of the total failures are attributed to the Executor and the final output process. The Executor
accounts for most of these failures (64%), with common issues like the math tool choosing wrong attributes or
mishandling unit conversions. For the final output process (28% of failures), errors include incorrect conclusions from
the gathered observations, such as failing to pick the smallest attribute from the collected data. It’s worth noting that
these problems are not exclusive to LLMCompiler but also occur in ReAct. Nevertheless, LLMCompiler tends to
have slightly fewer failures in these areas than ReAct as it provides only relevant contexts to each tool, aiding in more
accurate information extraction. We believe that optimizing the structure of the agent scratchpad, rather than simply
appending observations, could further reduce failures in the final output process.
**A.3** **LLMCompiler Details**
**A.3.1** **Streamed Planner**
The Planner may incur a non-trivial overhead for user queries that involve a lot of tasks as it blocks the Task Fetching
Unit and the Executor, which must wait for the Planner output before initiating their processes. However, analogous to
instruction pipelining in modern computer systems, this can be mitigated by enabling the Planner to asynchronously
stream the dependency graph, thereby allowing each task to be immediately processed by the Executor as soon as its
dependencies are all resolved. In Table A.1, we present a latency comparison of LLMCompiler with and without
the streaming mechanism across different benchmarks. The results demonstrate consistent latency improvements with
streaming. Particularly, in the ParallelQA benchmark, the streaming feature leads to a latency gain of up to 1.3×. This
is attributed to the math tool’s longer execution time for ParallelQA, which can effectively hide the Planner’s latency
in generating subsequent tasks, unlike the shorter execution times of the search tool used in HotpotQA and Movie
Recommendation.
17
-----
**A.3.2** **User-Supplied Information**
LLMCompiler requires the following two inputs from the user:
1. Tool Definitions: Users need to specify the tools that LLMs can use, including their descriptions and argument
specifications. Optionally, users can also provide in-context examples demonstrating the usage of these tools. This
is essentially the same requirement as other frameworks like ReAct and OpenAI function calling.
2. In-context Examples for the Planner: Optionally, users can provide LLMCompiler with examples of how the
Planner should behave. For instance, in the case of Figure 2, users may provide examples illustrating expected
inter-task dependencies for certain queries. Such examples can aid the Planner LLM in generating the appropriate
dependency graph in the correct format for incoming inputs. In Appendix A.6, we include the examples that we
used in our evaluations.
**A.4** **Experiment Details**
Our experiments evaluate two different common scenarios: (1) using API-based closed-source models; and (2) using
open-source models with an in-house serving framework. We use OpenAI’s GPT models as closed-source models,
in particular, gpt-3.5-turbo (1106 release) for HotpotQA and Movie Recommendation, gpt-4-turbo (1106 release) for
ParallelQA, and gpt-4 (0613 release) for Game of 24. Experiments on HotpotQA, Movie Recommendation, and
ParallelQA are all conducted in November 2023 after the 1106 release. The Game of 24 experiments are conducted
over a two-month period from September to October 2023. For an open-source model, we use LLaMA-2 [48], which
was hosted on 2 A100-80GB GPUs using the vLLM [25] framework. All the runs have been carried out with zero
temperature, except for thought proposer and state evaluator for the Game of 24 evaluation, where the
temperature is set to 0.7. Since OpenAI has randomness in outputs even with temperature 0, we have conducted 3 runs
and reported the average accuracy. Across ReAct, OpenAI parallel function calling, and LLMCompiler, we perform
3, 1, and 5-shot learning for HotpotQA, Movie Recommendation, and ParallelQA, respectively; the same examples
across different methods were used to ensure a fair comparison. For the Game of 24, we use 2 in-context examples for
the Planner. We use the same instruction prompts across different methods for a fair comparison, except for ReAct[†]
in Sec. 4.1 with additional ReAct-specific prompts. For WebShop experiment, we use gpt-4-0613 with 8k context
window and gpt-3.5-turbo model with 16k context window.
**A.5** **Analysis**
**A.5.1** **Parallel Speedup Modeling**
While LLMCompiler shows noticeable latency gain in various workloads, it is not achieving the N× latency speedup
for N-way parallel workloads. This is mostly due to the overhead associated with LLMCompiler’s Planner and final
answering process that cannot be parallelized. In our Movie Recommendation experiment, LLMCompiler’s Planner
and the answering process have an overhead of 1.88 and 1.62 seconds on average, respectively, whose combined
overhead already comprises more than half of LLMCompiler’s overall latency in Tab 1. Another source of overhead
is the straggler effect among the parallel tasks when they need to join together. We observe the average latency of the
slowest search to be 1.13 seconds which is nearly 2× the average latency of all tasks, which is 0.61 seconds. Below,
we provide an analytical latency modeling of ReAct, LLMCompiler, and LLMCompiler with streaming, and we
provide an analysis of achievable latency speedup.
In this section, our focus is on embarrassingly parallelizable workload (pattern Figure 3(a)), as this allows for a
clearer understanding of the impact of each component on potential latency gains. For the precise latency analysis, we
consider three key components: the Planner, the Task Fetching Unit, and the Executor, in Figure 2. Assume that the
Planner generates N different tasks to be done. We define Pi as the Planner’s output corresponding to the i-th atomic
task. Each Pi is a blueprint for a specific atomic task, which we refer to as Ei. The execution of Ei involves a specific
function call using the appropriate tool. The latency function of each unit in the system is defined to quantify the time
taken for specific operations. For the Planner, the latency is denoted as TP(Pi), representing the time taken by the
Planner to generate the plan Pi. Similarly, for the Executor, the latency, TE(Ei), corresponds to the time required to
complete the task Ei. We ignore the latency of Task Formulation Unit as it is negligible in this section. Our focus here
is on comparing the latency models of ReAct [58], and LLMCompiler.
18
-----
To begin our analysis of ReAct’s latency, we express its total latency as:
_T[R]_ = ∑
_i=1_
_TP[R][(][P][i][) +][ T][E][(][E][i][)]_ . (1)
� | |
163445 | # Sentence Window Retriever
This LlamaPack provides an example of our sentence window retriever.
This specific template shows the e2e process of building this. It loads
a document, chunks it up, adds surrounding context as metadata to each chunk,
and during retrieval inserts the context back into each chunk for response synthesis.
Check out the [notebook here](https://github.com/run-llama/llama-hub/blob/main/llama_hub/llama_packs/sentence_window_retriever/sentence_window.ipynb).
## CLI Usage
You can download llamapacks directly using `llamaindex-cli`, which comes installed with the `llama-index` python package:
```bash
llamaindex-cli download-llamapack SentenceWindowRetrieverPack --download-dir ./sentence_window_retriever_pack
```
You can then inspect the files at `./sentence_window_retriever_pack` and use them as a template for your own project.
## Code Usage
You can download the pack to a the `./sentence_window_retriever_pack` directory:
```python
from llama_index.core.llama_pack import download_llama_pack
# download and install dependencies
SentenceWindowRetrieverPack = download_llama_pack(
"SentenceWindowRetrieverPack", "./sentence_window_retriever_pack"
)
```
From here, you can use the pack, or inspect and modify the pack in `./sentence_window_retriever_pack`.
Then, you can set up the pack like so:
```python
# create the pack
# get documents from any data loader
sentence_window_retriever_pack = SentenceWindowRetrieverPack(
documents,
)
```
The `run()` function is a light wrapper around `query_engine.query()`.
```python
response = sentence_window_retriever_pack.run(
"Tell me a bout a Music celebritiy."
)
```
You can also use modules individually.
```python
# get the sentence vector index
index = sentence_window_retriever_pack.sentence_index
# get the node parser
node_parser = sentence_window_retriever_pack.node_parser
# get the metadata replacement postprocessor
postprocessor = sentence_window_retriever_pack.postprocessor
# get the query engine
query_engine = sentence_window_retriever_pack.query_engine
``` | |
163449 | """Sentence window retriever."""
from typing import Any, Dict, List
from llama_index.core import Settings, VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import (
SentenceWindowNodeParser,
)
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
from llama_index.core.schema import Document
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai import OpenAI
class SentenceWindowRetrieverPack(BaseLlamaPack):
"""Sentence Window Retriever pack.
Build input nodes from a text file by inserting metadata,
build a vector index over the input nodes,
then after retrieval insert the text into the output nodes
before synthesis.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
self.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
self.embed_model = HuggingFaceEmbedding(
model_name="sentence-transformers/all-mpnet-base-v2", max_length=512
)
Settings.llm = self.llm
Settings.embed_model = self.embed_model
# extract nodes
nodes = self.node_parser.get_nodes_from_documents(docs)
self.sentence_index = VectorStoreIndex(nodes)
self.postprocessor = MetadataReplacementPostProcessor(
target_metadata_key="window"
)
self.query_engine = self.sentence_index.as_query_engine(
similarity_top_k=2,
# the target key defaults to `window` to match the node_parser's default
node_postprocessors=[self.postprocessor],
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"sentence_index": self.sentence_index,
"node_parser": self.node_parser,
"postprocessor": self.postprocessor,
"llm": self.llm,
"embed_model": self.embed_model,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs) | |
163771 | 3-08-25
### New Features
- Added support for `MonsterLLM` using MonsterAPI (#7343)
- Support comments fields in NebulaGraphStore and int type VID (#7402)
- Added configurable endpoint for DynamoDB (#6777)
- Add structured answer filtering for Refine response synthesizer (#7317)
### Bug Fixes / Nits
- Use `utf-8` for json file reader (#7390)
- Fix entity extractor initialization (#7407)
## [0.8.9] - 2023-08-24
### New Features
- Added support for FalkorDB/RedisGraph graph store (#7346)
- Added directed sub-graph RAG (#7378)
- Added support for `BM25Retriever` (#7342)
### Bug Fixes / Nits
- Added `max_tokens` to `Xinference` LLM (#7372)
- Support cache dir creation in multithreaded apps (#7365)
- Ensure temperature is a float for openai (#7382)
- Remove duplicate subjects in knowledge graph retriever (#7378)
- Added support for both pydantic v1 and v2 to allow other apps to move forward (#7394)
### Breaking/Deprecated API Changes
- Refactor prompt template (#7319)
- Use `BasePromptTemplate` for generic typing
- Use `PromptTemplate`, `ChatPromptTemplate`, `SelectorPromptTemplate` as core implementations
- Use `LangchainPromptTemplate` for compatibility with Langchain prompt templates
- Fully replace specific prompt classes (e.g. `SummaryPrompt`) with generic `BasePromptTemplate` for typing in codebase.
- Keep `Prompt` as an alias for `PromptTemplate` for backwards compatibility.
- BREAKING CHANGE: remove support for `Prompt.from_langchain_prompt`, please use `template=LangchainPromptTemplate(lc_template)` instead.
## [0.8.8] - 2023-08-23
### New Features
- `OpenAIFineTuningHandler` for collecting LLM inputs/outputs for OpenAI fine tuning (#7367)
### Bug Fixes / Nits
- Add support for `claude-instant-1.2` (#7369)
## [0.8.7] - 2023-08-22
### New Features
- Support fine-tuned OpenAI models (#7364)
- Added support for Cassandra vector store (#6784)
- Support pydantic fields in tool functions (#7348)
### Bug Fixes / Nits
- Fix infinite looping with forced function call in `OpenAIAgent` (#7363)
## [0.8.6] - 2023-08-22
### New Features
- auto vs. recursive retriever notebook (#7353)
- Reader and Vector Store for BagelDB with example notebooks (#7311)
### Bug Fixes / Nits
- Use service context for intermediate index in retry source query engine (#7341)
- temp fix for prompt helper + chat models (#7350)
- Properly skip unit-tests when packages not installed (#7351)
## [0.8.5.post2] - 2023-08-20
### New Features
- Added FireStore docstore/index store support (#7305)
- add recursive agent notebook (#7330)
### Bug Fixes / Nits
- Fix Azure pydantic error (#7329)
- fix callback trace ids (make them a context var) (#7331)
## [0.8.5.post1] - 2023-08-18
### New Features
- Awadb Vector Store (#7291)
### Bug Fixes / Nits
- Fix bug in OpenAI llm temperature type
## [0.8.5] - 2023-08-18
### New Features
- Expose a system prompt/query wrapper prompt in the service context for open-source LLMs (#6647)
- Changed default MyScale index format to `MSTG` (#7288)
- Added tracing to chat engines/agents (#7304)
- move LLM and embeddings to pydantic (#7289)
### Bug Fixes / Nits
- Fix sentence splitter bug (#7303)
- Fix sentence splitter infinite loop (#7295)
## [0.8.4] - 2023-08-17
### Bug Fixes / Nits
- Improve SQL Query parsing (#7283)
- Fix loading embed_model from global service context (#7284)
- Limit langchain version until we migrate to pydantic v2 (#7297)
## [0.8.3] - 2023-08-16
### New Features
- Added Knowledge Graph RAG Retriever (#7204)
### Bug Fixes / Nits
- accept `api_key` kwarg in OpenAI LLM class constructor (#7263)
- Fix to create separate queue instances for separate instances of `StreamingAgentChatResponse` (#7264)
## [0.8.2.post1] - 2023-08-14
### New Features
- Added support for Rockset as a vector store (#7111)
### Bug Fixes
- Fixed bug in service context definition that could disable LLM (#7261)
## [0.8.2] - 2023-08-14
### New Features
- Enable the LLM or embedding model to be disabled by setting to `None` in the service context (#7255)
- Resolve nearly any huggingface embedding model using the `embed_model="local:<model_name>"` syntax (#7255)
- Async tool-calling support (#7239)
### Bug Fixes / Nits
- Updated supabase kwargs for add and query (#7103)
- Small tweak to default prompts to allow for more general purpose queries (#7254)
- Make callback manager optional for `CustomLLM` + docs update (#7257)
## [0.8.1] - 2023-08-13
### New Features
- feat: add node_postprocessors to ContextChatEngine (#7232)
- add ensemble query engine tutorial (#7247)
### Smaller Features
- Allow EMPTY keys for Fastchat/local OpenAI API endpoints (#7224)
## [0.8.0] - 2023-08-11
### New Features
- Added "LLAMA_INDEX_CACHE_DIR" to control cached files (#7233)
- Default to pydantic selectors when possible (#7154, #7223)
- Remove the need for langchain wrappers on `embed_model` in the service context (#7157)
- Metadata extractors take an `LLM` object now, in addition to `LLMPredictor` (#7202)
- Added local mode + fallback to llama.cpp + llama2 (#7200)
- Added local fallback for embeddings to `BAAI/bge-small-en` (#7200)
- Added `SentenceWindowNodeParser` + `MetadataReplacementPostProcessor` (#7211)
### Breaking Changes
- Change default LLM to gpt-3.5-turbo from text-davinci-003 (#7223)
- Change prompts for compact/refine/tree_summarize to work better with gpt-3.5-turbo (#7150, #7179, #7223)
- Increase default LLM temperature to 0.1 (#7180)
## [0.7.24.post1] - 2023-08-11
### Other Changes
- Reverted #7223 changes to defaults (#7235)
## [0.7.24] - 2023-08-10
### New Features
- Default to pydantic selectors when possible (#7154, #7223)
- Remove the need for langchain wrappers on `embed_model` in the service context (#7157)
- Metadata extractors take an `LLM` object now, in addition to `LLMPredictor` (#7202)
- Added local mode + fallback to llama.cpp + llama2 (#7200)
- Added local fallback for embeddings to `BAAI/bge-small-en` (#7200)
- Added `SentenceWindowNodeParser` + `MetadataReplacementPostProcessor` (#7211)
### Breaking Changes
- Change default LLM to gpt-3.5-turbo from text-davinci-003 (#7223)
- Change prompts for compact/refine/tree_summarize to work better with gpt-3.5-turbo (#7150, #7179, #7223)
- Increase default LLM temperature to 0.1 (#7180)
### Other Changes
- docs: Improvements to Mendable Search (#7220)
- Refactor openai agent (#7077)
### Bug Fixes / Nits
- Use `1 - cosine_distance` for pgvector/postgres vector db (#7217)
- fix metadata formatting and extraction (#7216)
- fix(readers): Fix non-ASCII JSON Reader bug (#7086)
- Chore: change PgVectorStore variable name from `sim` to `distance` for clarity (#7226)
## [0.7.23] - 2023-08-10
### Bug Fixes / Nits
- Fixed metadata formatting with custom tempalates and inheritance (#7216)
## [0.7.23] - 2023-08-1 | |
163777 | <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
# Welcome to LlamaIndex 🦙 !
LlamaIndex is a framework for building context-augmented generative AI applications with [LLMs](https://en.wikipedia.org/wiki/Large_language_model) including [agents](./understanding/agent/basic_agent/) and [workflows](./understanding/workflows/).
<div class="grid cards" markdown>
- <span style="font-size: 200%">[Introduction](#introduction)</span>
What is context augmentation? What are agents and workflows? How does LlamaIndex help build them?
- <span style="font-size: 200%">[Use cases](#use-cases)</span>
What kind of apps can you build with LlamaIndex? Who should use it?
- <span style="font-size: 200%">[Getting started](#getting-started)</span>
Get started in Python or TypeScript in just 5 lines of code!
- <span style="font-size: 200%">[LlamaCloud](#llamacloud)</span>
Managed services for LlamaIndex including [LlamaParse](https://docs.cloud.llamaindex.ai/llamaparse/getting_started), the world's best document parser.
- <span style="font-size: 200%">[Community](#community)</span>
Get help and meet collaborators on Discord, Twitter, LinkedIn, and learn how to contribute to the project.
- <span style="font-size: 200%">[Related projects](#related-projects)</span>
Check out our library of connectors, readers, and other integrations at [LlamaHub](https://llamahub.ai) as well as demos and starter apps like [create-llama](https://www.npmjs.com/package/create-llama).
</div>
## Introduction
### What is context augmentation?
LLMs offer a natural language interface between humans and data. LLMs come pre-trained on huge amounts of publicly available data, but they are not trained on **your** data. Your data may be private or specific to the problem you're trying to solve. It's behind APIs, in SQL databases, or trapped in PDFs and slide decks.
Context augmentation makes your data available to the LLM to solve the problem at hand. LlamaIndex provides the tools to build any of context-augmentation use case, from prototype to production. Our tools allow you to ingest, parse, index and process your data and quickly implement complex query workflows combining data access with LLM prompting.
The most popular example of context-augmentation is [Retrieval-Augmented Generation or RAG](./getting_started/concepts.md), which combines context with LLMs at inference time.
### What are agents?
[Agents](./understanding/agent/basic_agent/) are LLM-powered knowledge assistants that use tools to perform tasks like research, data extraction, and more. Agents range from simple question-answering to being able to sense, decide and take actions in order to complete tasks.
LlamaIndex provides a framework for building agents including the ability to use RAG pipelines as one of many tools to complete a task.
### What are workflows?
[Workflows](./understanding/workflows/) are multi-step processes that combine one or more agents, data connectors, and other tools to complete a task. They are event-driven software that allows you to combine RAG data sources and multiple agents to create a complex application that can perform a wide variety of tasks with reflection, error-correction, and other hallmarks of advanced LLM applications. You can then [deploy these agentic workflows](./module_guides/workflow/deployment.md) as production microservices.
### LlamaIndex is the framework for Context-Augmented LLM Applications
LlamaIndex imposes no restriction on how you use LLMs. You can use LLMs as auto-complete, chatbots, agents, and more. It just makes using them easier. We provide tools like:
- **Data connectors** ingest your existing data from their native source and format. These could be APIs, PDFs, SQL, and (much) more.
- **Data indexes** structure your data in intermediate representations that are easy and performant for LLMs to consume.
- **Engines** provide natural language access to your data. For example:
- Query engines are powerful interfaces for question-answering (e.g. a RAG flow).
- Chat engines are conversational interfaces for multi-message, "back and forth" interactions with your data.
- **Agents** are LLM-powered knowledge workers augmented by tools, from simple helper functions to API integrations and more.
- **Observability/Evaluation** integrations that enable you to rigorously experiment, evaluate, and monitor your app in a virtuous cycle.
- **Workflows** allow you to combine all of the above into an event-driven system for flexible than other, graph-based approaches.
## Use cases
Some popular use cases for LlamaIndex and context augmentation in general include:
- [Question-Answering](./use_cases/q_and_a/index.md) (Retrieval-Augmented Generation aka RAG)
- [Chatbots](./use_cases/chatbots.md)
- [Document Understanding and Data Extraction](./use_cases/extraction.md)
- [Autonomous Agents](./use_cases/agents.md) that can perform research and take actions
- [Multi-modal applications](./use_cases/multimodal.md) that combine text, images, and other data types
- [Fine-tuning](./use_cases/fine_tuning.md) models on data to improve performance
Check out our [use cases](./use_cases/index.md) documentation for more examples and links to tutorials.
### 👨👩👧👦 Who is LlamaIndex for?
LlamaIndex provides tools for beginners, advanced users, and everyone in between.
Our high-level API allows beginner users to use LlamaIndex to ingest and query their data in 5 lines of code.
For more complex applications, our lower-level APIs allow advanced users to customize and extend any module -- data connectors, indices, retrievers, query engines, and reranking modules -- to fit their needs.
## Getting Started
LlamaIndex is available in Python (these docs) and [Typescript](https://ts.llamaindex.ai/). If you're not sure where to start, we recommend reading [how to read these docs](./getting_started/reading.md) which will point you to the right place based on your experience level.
### 30 second quickstart
Set an environment variable called `OPENAI_API_KEY` with an [OpenAI API key](https://platform.openai.com/api-keys). Install the Python library:
```bash
pip install llama-index
```
Put some documents in a folder called `data`, then ask questions about them with our famous 5-line starter:
```python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("Some question about the data should go here")
print(response)
```
If any part of this trips you up, don't worry! Check out our more comprehensive starter tutorials using [remote APIs like OpenAI](./getting_started/starter_example.md) or [any model that runs on your laptop](./getting_started/starter_example_local.md).
## LlamaCloud
If you're an enterprise developer, check out [**LlamaCloud**](https://llamaindex.ai/enterprise). It is an end-to-end managed service for data parsing, ingestion, indexing, and retrieval, allowing you to get production-quality data for your production LLM application. It's available both hosted on our servers or as a self-hosted solution.
### LlamaParse
LlamaParse is our state-of-the-art document parsing solution. It's available as part of LlamaCloud and also available as a self-serve API. You can [sign up](https://cloud.llamaindex.ai/) and parse up to 1000 pages/day for free, or enter a credit card for unlimited parsing. [Learn more](https://llamaindex.ai/enterprise).
## Community
Need he | |
163793 | # Deprecated Terms
As LlamaIndex continues to evolve, many class names and APIs have been adjusted, improved, and deprecated.
The following is a list of previously popular terms that have been deprecated, with links to their replacements.
## GPTSimpleVectorIndex
This has been renamed to `VectorStoreIndex`, as well as unifying all vector indexes to a single unified interface. You can integrate with various vector databases by modifying the underlying `vector_store`.
Please see the following links for more details on usage.
- [Index Usage Pattern](../module_guides/evaluating/usage_pattern.md)
- [Vector Store Guide](../module_guides/indexing/vector_store_guide.ipynb)
- [Vector Store Integrations](../community/integrations/vector_stores.md)
## GPTVectorStoreIndex
This has been renamed to `VectorStoreIndex`, but it is only a cosmetic change. Please see the following links for more details on usage.
- [Index Usage Pattern](../module_guides/evaluating/usage_pattern.md)
- [Vector Store Guide](../module_guides/indexing/vector_store_guide.ipynb)
- [Vector Store Integrations](../community/integrations/vector_stores.md)
## LLMPredictor
The `LLMPredictor` object is no longer intended to be used by users. Instead, you can setup an LLM directly and pass it into the `Settings` or the interface using the LLM. The `LLM` class itself has similar attributes and methods as the `LLMPredictor`.
- [LLMs in LlamaIndex](../module_guides/models/llms.md)
- [Setting LLMs in the Settings](../module_guides/supporting_modules/settings.md)
## PromptHelper and max_input_size/
The `max_input_size` parameter for the prompt helper has since been replaced with `context_window`.
The `PromptHelper` in general has been deprecated in favour of specifying parameters directly in the `service_context` and `node_parser`.
See the following links for more details.
- [Configuring settings in the Settings](../module_guides/supporting_modules/settings.md)
- [Parsing Documents into Nodes](../module_guides/loading/node_parsers/index.md) | |
163796 | # Using LLMs
!!! tip
For a list of our supported LLMs and a comparison of their functionality, check out our [LLM module guide](../../module_guides/models/llms.md).
One of the first steps when building an LLM-based application is which LLM to use; you can also use more than one if you wish.
LLMs are used at multiple different stages of your workflow:
- During **Indexing** you may use an LLM to determine the relevance of data (whether to index it at all) or you may use an LLM to summarize the raw data and index the summaries instead.
- During **Querying** LLMs can be used in two ways:
- During **Retrieval** (fetching data from your index) LLMs can be given an array of options (such as multiple different indices) and make decisions about where best to find the information you're looking for. An agentic LLM can also use _tools_ at this stage to query different data sources.
- During **Response Synthesis** (turning the retrieved data into an answer) an LLM can combine answers to multiple sub-queries into a single coherent answer, or it can transform data, such as from unstructured text to JSON or another programmatic output format.
LlamaIndex provides a single interface to a large number of different LLMs, allowing you to pass in any LLM you choose to any stage of the flow. It can be as simple as this:
```python
from llama_index.llms.openai import OpenAI
response = OpenAI().complete("Paul Graham is ")
print(response)
```
Usually, you will instantiate an LLM and pass it to `Settings`, which you then pass to other stages of the flow, as in this example:
```python
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
Settings.llm = OpenAI(temperature=0.2, model="gpt-4")
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(
documents,
)
```
In this case, you've instantiated OpenAI and customized it to use the `gpt-4` model instead of the default `gpt-3.5-turbo`, and also modified the `temperature`. The `VectorStoreIndex` will now use gpt-4 to answer questions when querying.
!!! tip
The `Settings` is a bundle of configuration data that you pass into different parts of LlamaIndex. You can [learn more about Settings](../../module_guides/supporting_modules/settings.md) and how to customize it.
## Available LLMs
We support integrations with OpenAI, Hugging Face, PaLM, and more. Check out our [module guide to LLMs](../../module_guides/models/llms.md) for a full list, including how to run a local model.
!!! tip
A general note on privacy and LLMs can be found on the [privacy page](./privacy.md).
### Using a local LLM
LlamaIndex doesn't just support hosted LLM APIs; you can also [run a local model such as Llama2 locally](https://replicate.com/blog/run-llama-locally).
For example, if you have [Ollama](https://github.com/ollama/ollama) installed and running:
```python
from llama_index.llms.ollama import Ollama
from llama_index.core import Settings
Settings.llm = Ollama(model="llama2", request_timeout=60.0)
```
See the [custom LLM's How-To](../../module_guides/models/llms/usage_custom.md) for more details.
## Prompts
By default LlamaIndex comes with a great set of built-in, battle-tested prompts that handle the tricky work of getting a specific LLM to correctly handle and format data. This is one of the biggest benefits of using LlamaIndex. If you want to, you can [customize the prompts](../../module_guides/models/prompts/index.md). | |
163801 | # Cost Analysis
## Concept
Each call to an LLM will cost some amount of money - for instance, OpenAI's gpt-3.5-turbo costs $0.002 / 1k tokens. The cost of building an index and querying depends on
- the type of LLM used
- the type of data structure used
- parameters used during building
- parameters used during querying
The cost of building and querying each index is a TODO in the reference documentation. In the meantime, we provide the following information:
1. A high-level overview of the cost structure of the indices.
2. A token predictor that you can use directly within LlamaIndex!
### Overview of Cost Structure
#### Indices with no LLM calls
The following indices don't require LLM calls at all during building (0 cost):
- `SummaryIndex`
- `SimpleKeywordTableIndex` - uses a regex keyword extractor to extract keywords from each document
- `RAKEKeywordTableIndex` - uses a RAKE keyword extractor to extract keywords from each document
#### Indices with LLM calls
The following indices do require LLM calls during build time:
- `TreeIndex` - use LLM to hierarchically summarize the text to build the tree
- `KeywordTableIndex` - use LLM to extract keywords from each document
### Query Time
There will always be >= 1 LLM call during query time, in order to synthesize the final answer.
Some indices contain cost tradeoffs between index building and querying. `SummaryIndex`, for instance,
is free to build, but running a query over a summary index (without filtering or embedding lookups), will
call the LLM {math}`N` times.
Here are some notes regarding each of the indices:
- `SummaryIndex`: by default requires {math}`N` LLM calls, where N is the number of nodes.
- `TreeIndex`: by default requires {math}`\log (N)` LLM calls, where N is the number of leaf nodes.
- Setting `child_branch_factor=2` will be more expensive than the default `child_branch_factor=1` (polynomial vs logarithmic), because we traverse 2 children instead of just 1 for each parent node.
- `KeywordTableIndex`: by default requires an LLM call to extract query keywords.
- Can do `index.as_retriever(retriever_mode="simple")` or `index.as_retriever(retriever_mode="rake")` to also use regex/RAKE keyword extractors on your query text.
- `VectorStoreIndex`: by default, requires one LLM call per query. If you increase the `similarity_top_k` or `chunk_size`, or change the `response_mode`, then this number will increase.
## Usage Pattern
LlamaIndex offers token **predictors** to predict token usage of LLM and embedding calls.
This allows you to estimate your costs during 1) index construction, and 2) index querying, before
any respective LLM calls are made.
Tokens are counted using the `TokenCountingHandler` callback. See the [example notebook](../../../examples/callbacks/TokenCountingHandler.ipynb) for details on the setup.
### Using MockLLM
To predict token usage of LLM calls, import and instantiate the MockLLM as shown below. The `max_tokens` parameter is used as a "worst case" prediction, where each LLM response will contain exactly that number of tokens. If `max_tokens` is not specified, then it will simply predict back the prompt.
```python
from llama_index.core.llms import MockLLM
from llama_index.core import Settings
# use a mock llm globally
Settings.llm = MockLLM(max_tokens=256)
```
You can then use this predictor during both index construction and querying.
### Using MockEmbedding
You may also predict the token usage of embedding calls with `MockEmbedding`.
```python
from llama_index.core import MockEmbedding
from llama_index.core import Settings
# use a mock embedding globally
Settings.embed_model = MockEmbedding(embed_dim=1536)
```
## Usage Pattern
Read about the [full usage pattern](./usage_pattern.md) for more details! | |
163802 | # Basic workflow
## Getting started
Workflows are built into LlamaIndex core, so to use them all you need is
```
pip install llama-index-core
```
During development you will probably find it helpful to visualize your workflow; you can use our built-in visualizer for this by installing it:
```
pip install llama-index-utils-workflow
```
## Dependencies
The minimal dependencies for a workflow are:
```python
from llama_index.core.workflow import (
StartEvent,
StopEvent,
Workflow,
step,
)
```
## Single-step workflow
A workflow is usually implemented as a class that inherits from `Workflow`. The class can define an arbitrary number of steps, each of which is a method decorated with `@step`. Here is the simplest possible workflow:
```python
class MyWorkflow(Workflow):
@step
async def my_step(self, ev: StartEvent) -> StopEvent:
# do something here
return StopEvent(result="Hello, world!")
w = MyWorkflow(timeout=10, verbose=False)
result = await w.run()
print(result)
```
This will simply print "Hello, World!" to the console.
In this code we:
* Define a class `MyWorkflow` that inherits from `Workflow`
* Use the @step decorator to define a single step `my_step`
* The step takes a single argument, `ev`, which is an instance of `StartEvent`
* The step returns a `StopEvent` with a result of "Hello, world!"
* We create an instance of `MyWorkflow` with a timeout of 10 seconds and verbosity off
* We run the workflow and print the result
## Type annotations for steps
The type annotations (e.g. `ev: StartEvent`) and `-> StopEvent` are essential to the way Workflows work. The expected types determine what event types will trigger a step. Tools like the visualizer (see below) also rely on these annotations to determine what types are generated and therefore where control flow goes next.
Type annotations are validated at compile time, so you will get an error message if for instance you emit an event that is never consumed by another step.
## Start and Stop events
`StartEvent` and `StopEvent` are special events that are used to start and stop a workflow. Any step that accepts a `StartEvent` will be triggered by the `run` command. Emitting a `StopEvent` will end the execution of the workflow and return a final result, even if other steps remain un-executed.
## Running a workflow in regular python
Workflows are async by default, so you use `await` to get the result of the `run` command. This will work fine in a notebook environment; in a vanilla python script you will need to import `asyncio` and wrap your code in an async function, like this:
```python
async def main():
w = MyWorkflow(timeout=10, verbose=False)
result = await w.run()
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
```
In the remaining examples in this tutorial we will assume an async environment for simplicity.
## Visualizing a workflow
A great feature of workflows is the built-in visualizer, which we already installed. Let's visualize the simple workflow we just created:
```python
from llama_index.utils.workflow import draw_all_possible_flows
draw_all_possible_flows(MyWorkflow, filename="basic_workflow.html")
```
This will create a file called `basic_workflow.html` in the current directory. Open it in your browser to see an interactive, visual representation of the workflow. It will look something like this:

Of course, a flow with a single step is not very useful! Let's define a multi-step workflow.
## Custom Events
Multiple steps are created by defining custom events that can be emitted by steps and trigger other steps. Let's define a simple 3-step workflow.
We bring in our imports as before, plus a new import for `Event`:
```python
from llama_index.core.workflow import (
StartEvent,
StopEvent,
Workflow,
step,
Event,
)
from llama_index.utils.workflow import draw_all_possible_flows
```
Now we define two custom events, `FirstEvent` and `SecondEvent`. These classes can have any names and properties, but must inherit from `Event`:
```python
class FirstEvent(Event):
first_output: str
class SecondEvent(Event):
second_output: str
```
## Defining the workflow
Now we define the workflow itself. We do this by defining the input and output types on each step.
* `step_one` takes a `StartEvent` and returns a `FirstEvent`
* `step_two` takes a `FirstEvent` and returns a `SecondEvent`
* `step_three` takes a `SecondEvent` and returns a `StopEvent`
```python
class MyWorkflow(Workflow):
@step
async def step_one(self, ev: StartEvent) -> FirstEvent:
print(ev.first_input)
return FirstEvent(first_output="First step complete.")
@step
async def step_two(self, ev: FirstEvent) -> SecondEvent:
print(ev.first_output)
return SecondEvent(second_output="Second step complete.")
@step
async def step_three(self, ev: SecondEvent) -> StopEvent:
print(ev.second_output)
return StopEvent(result="Workflow complete.")
w = MyWorkflow(timeout=10, verbose=False)
result = await w.run(first_input="Start the workflow.")
print(result)
```
The full output will be
```
Start the workflow.
First step complete.
Second step complete.
Workflow complete.
```
And we can use our visualizer to see all possible flows through this workflow:
```python
from llama_index.utils.workflow import draw_all_possible_flows
draw_all_possible_flows(MyWorkflow, filename="multi_step_workflow.html")
```

Of course there is still not much point to a workflow if you just run through it from beginning to end! Let's do some [branching and looping](branches_and_loops.md). | |
163818 | # Storing
Once you have data [loaded](../loading/loading.md) and [indexed](../indexing/indexing.md), you will probably want to store it to avoid the time and cost of re-indexing it. By default, your indexed data is stored only in memory.
## Persisting to disk
The simplest way to store your indexed data is to use the built-in `.persist()` method of every Index, which writes all the data to disk at the location specified. This works for any type of index.
```python
index.storage_context.persist(persist_dir="<persist_dir>")
```
Here is an example of a Composable Graph:
```python
graph.root_index.storage_context.persist(persist_dir="<persist_dir>")
```
You can then avoid re-loading and re-indexing your data by loading the persisted index like this:
```python
from llama_index.core import StorageContext, load_index_from_storage
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="<persist_dir>")
# load index
index = load_index_from_storage(storage_context)
```
!!! tip
Important: if you had initialized your index with a custom `transformations`, `embed_model`, etc., you will need to pass in the same options during `load_index_from_storage`, or have it set as the [global settings](../../module_guides/supporting_modules/settings.md).
## Using Vector Stores
As discussed in [indexing](../indexing/indexing.md), one of the most common types of Index is the VectorStoreIndex. The API calls to create the [embeddings](../indexing/indexing.md#what-is-an-embedding) in a VectorStoreIndex can be expensive in terms of time and money, so you will want to store them to avoid having to constantly re-index things.
LlamaIndex supports a [huge number of vector stores](../../module_guides/storing/vector_stores.md) which vary in architecture, complexity and cost. In this example we'll be using Chroma, an open-source vector store.
First you will need to install chroma:
```
pip install chromadb
```
To use Chroma to store the embeddings from a VectorStoreIndex, you need to:
- initialize the Chroma client
- create a Collection to store your data in Chroma
- assign Chroma as the `vector_store` in a `StorageContext`
- initialize your VectorStoreIndex using that StorageContext
Here's what that looks like, with a sneak peek at actually querying the data:
```python
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
# load some documents
documents = SimpleDirectoryReader("./data").load_data()
# initialize client, setting path to save data
db = chromadb.PersistentClient(path="./chroma_db")
# create collection
chroma_collection = db.get_or_create_collection("quickstart")
# assign chroma as the vector_store to the context
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# create your index
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
# create a query engine and query
query_engine = index.as_query_engine()
response = query_engine.query("What is the meaning of life?")
print(response)
```
If you've already created and stored your embeddings, you'll want to load them directly without loading your documents or creating a new VectorStoreIndex:
```python
import chromadb
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
# initialize client
db = chromadb.PersistentClient(path="./chroma_db")
# get collection
chroma_collection = db.get_or_create_collection("quickstart")
# assign chroma as the vector_store to the context
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# load your index from stored vectors
index = VectorStoreIndex.from_vector_store(
vector_store, storage_context=storage_context
)
# create a query engine
query_engine = index.as_query_engine()
response = query_engine.query("What is llama2?")
print(response)
```
!!! tip
We have a [more thorough example of using Chroma](../../examples/vector_stores/ChromaIndexDemo.ipynb) if you want to go deeper on this store.
### You're ready to query!
Now you have loaded data, indexed it, and stored that index, you're ready to [query your data](../querying/querying.md).
## Inserting Documents or Nodes
If you've already created an index, you can add new documents to your index using the `insert` method.
```python
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex([])
for doc in documents:
index.insert(doc)
```
See the [document management how-to](../../module_guides/indexing/document_management.md) for more details on managing documents and an example notebook. | |
163823 | # Indexing
With your data loaded, you now have a list of Document objects (or a list of Nodes). It's time to build an `Index` over these objects so you can start querying them.
## What is an Index?
In LlamaIndex terms, an `Index` is a data structure composed of `Document` objects, designed to enable querying by an LLM. Your Index is designed to be complementary to your querying strategy.
LlamaIndex offers several different index types. We'll cover the two most common here.
## Vector Store Index
A `VectorStoreIndex` is by far the most frequent type of Index you'll encounter. The Vector Store Index takes your Documents and splits them up into Nodes. It then creates `vector embeddings` of the text of every node, ready to be queried by an LLM.
### What is an embedding?
`Vector embeddings` are central to how LLM applications function.
A `vector embedding`, often just called an embedding, is a **numerical representation of the semantics, or meaning of your text**. Two pieces of text with similar meanings will have mathematically similar embeddings, even if the actual text is quite different.
This mathematical relationship enables **semantic search**, where a user provides query terms and LlamaIndex can locate text that is related to the **meaning of the query terms** rather than simple keyword matching. This is a big part of how Retrieval-Augmented Generation works, and how LLMs function in general.
There are [many types of embeddings](../../module_guides/models/embeddings.md), and they vary in efficiency, effectiveness and computational cost. By default LlamaIndex uses `text-embedding-ada-002`, which is the default embedding used by OpenAI. If you are using different LLMs you will often want to use different embeddings.
### Vector Store Index embeds your documents
Vector Store Index turns all of your text into embeddings using an API from your LLM; this is what is meant when we say it "embeds your text". If you have a lot of text, generating embeddings can take a long time since it involves many round-trip API calls.
When you want to search your embeddings, your query is itself turned into a vector embedding, and then a mathematical operation is carried out by VectorStoreIndex to rank all the embeddings by how semantically similar they are to your query.
### Top K Retrieval
Once the ranking is complete, VectorStoreIndex returns the most-similar embeddings as their corresponding chunks of text. The number of embeddings it returns is known as `k`, so the parameter controlling how many embeddings to return is known as `top_k`. This whole type of search is often referred to as "top-k semantic retrieval" for this reason.
Top-k retrieval is the simplest form of querying a vector index; you will learn about more complex and subtler strategies when you read the [querying](../querying/querying.md) section.
### Using Vector Store Index
To use the Vector Store Index, pass it the list of Documents you created during the loading stage:
```python
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents)
```
!!! tip
`from_documents` also takes an optional argument `show_progress`. Set it to `True` to display a progress bar during index construction.
You can also choose to build an index over a list of Node objects directly:
```python
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex(nodes)
```
With your text indexed, it is now technically ready for [querying](../querying/querying.md)! However, embedding all your text can be time-consuming and, if you are using a hosted LLM, it can also be expensive. To save time and money you will want to [store your embeddings](../storing/storing.md) first.
## Summary Index
A Summary Index is a simpler form of Index best suited to queries where, as the name suggests, you are trying to generate a summary of the text in your Documents. It simply stores all of the Documents and returns all of them to your query engine.
## Further Reading
If your data is a set of interconnected concepts (in computer science terms, a "graph") then you may be interested in our [knowledge graph index](../../examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb). | |
163826 | # Agents
Putting together an agent in LlamaIndex can be done by defining a set of tools and providing them to our ReActAgent implementation. We're using it here with OpenAI, but it can be used with any sufficiently capable LLM:
```python
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
from llama_index.core.agent import ReActAgent
# define sample Tool
def multiply(a: int, b: int) -> int:
"""Multiply two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
# initialize llm
llm = OpenAI(model="gpt-3.5-turbo-0613")
# initialize ReAct agent
agent = ReActAgent.from_tools([multiply_tool], llm=llm, verbose=True)
```
These tools can be Python functions as shown above, or they can be LlamaIndex query engines:
```python
from llama_index.core.tools import QueryEngineTool
query_engine_tools = [
QueryEngineTool(
query_engine=sql_agent,
metadata=ToolMetadata(
name="sql_agent", description="Agent that can execute SQL queries."
),
),
]
agent = ReActAgent.from_tools(query_engine_tools, llm=llm, verbose=True)
```
You can learn more in our [Agent Module Guide](../../module_guides/deploying/agents/index.md).
## Native OpenAIAgent
We have an `OpenAIAgent` implementation built on the [OpenAI API for function calling](https://openai.com/blog/function-calling-and-other-api-updates) that allows you to rapidly build agents:
- [OpenAIAgent](../../examples/agent/openai_agent.ipynb)
- [OpenAIAgent with Query Engine Tools](../../examples/agent/openai_agent_with_query_engine.ipynb)
- [OpenAIAgent Query Planning](../../examples/agent/openai_agent_query_plan.ipynb)
- [OpenAI Assistant](../../examples/agent/openai_assistant_agent.ipynb)
- [OpenAI Assistant Cookbook](../../examples/agent/openai_assistant_query_cookbook.ipynb)
- [Forced Function Calling](../../examples/agent/openai_forced_function_call.ipynb)
- [Parallel Function Calling](../../examples/agent/openai_agent_parallel_function_calling.ipynb)
- [Context Retrieval](../../examples/agent/openai_agent_context_retrieval.ipynb)
## Agentic Components within LlamaIndex
LlamaIndex provides core modules capable of automated reasoning for different use cases over your data which makes them essentially Agents. Some of these core modules are shown below along with example tutorials.
**SubQuestionQueryEngine for Multi Document Analysis**
- [Sub Question Query Engine (Intro)](../../examples/query_engine/sub_question_query_engine.ipynb)
- [10Q Analysis (Uber)](../../examples/usecases/10q_sub_question.ipynb)
- [10K Analysis (Uber and Lyft)](../../examples/usecases/10k_sub_question.ipynb)
**Query Transformations**
- [How-To](../../optimizing/advanced_retrieval/query_transformations.md)
- [Multi-Step Query Decomposition](../../examples/query_transformations/HyDEQueryTransformDemo.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb))
**Routing**
- [Usage](../../module_guides/querying/router/index.md)
- [Router Query Engine Guide](../../examples/query_engine/RouterQueryEngine.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs../../examples/query_engine/RouterQueryEngine.ipynb))
**LLM Reranking**
- [Second Stage Processing How-To](../../module_guides/querying/node_postprocessors/index.md)
- [LLM Reranking Guide (Great Gatsby)](../../examples/node_postprocessor/LLMReranker-Gatsby.ipynb)
**Chat Engines**
- [Chat Engines How-To](../../module_guides/deploying/chat_engines/index.md)
## Using LlamaIndex as as Tool within an Agent Framework
LlamaIndex can be used as as Tool within an agent framework - including LangChain, ChatGPT. These integrations are described below.
### LangChain
We have deep integrations with LangChain.
LlamaIndex query engines can be easily packaged as Tools to be used within a LangChain agent, and LlamaIndex can also be used as a memory module / retriever. Check out our guides/tutorials below!
**Resources**
- [Building a Chatbot Tutorial](chatbots/building_a_chatbot.md)
- [OnDemandLoaderTool Tutorial](../../examples/tools/OnDemandLoaderTool.ipynb)
### ChatGPT
LlamaIndex can be used as a ChatGPT retrieval plugin (we have a TODO to develop a more general plugin as well).
**Resources**
- [LlamaIndex ChatGPT Retrieval Plugin](https://github.com/openai/chatgpt-retrieval-plugin#llamaindex) | |
163828 | acted Terms
Now that we can extract terms, we need to put them somewhere so that we can query for them later. A `VectorStoreIndex` should be a perfect choice for now! But in addition, our app should also keep track of which terms are inserted into the index so that we can inspect them later. Using `st.session_state`, we can store the current list of terms in a session dict, unique to each user!
First things first though, let's add a feature to initialize a global vector index and another function to insert the extracted terms.
```python
from llama_index.core import Settings, VectorStoreIndex
...
if "all_terms" not in st.session_state:
st.session_state["all_terms"] = DEFAULT_TERMS
...
def insert_terms(terms_to_definition):
for term, definition in terms_to_definition.items():
doc = Document(text=f"Term: {term}\nDefinition: {definition}")
st.session_state["llama_index"].insert(doc)
@st.cache_resource
def initialize_index(llm_name, model_temperature, api_key):
"""Create the VectorStoreIndex object."""
Settings.llm = get_llm(llm_name, model_temperature, api_key)
index = VectorStoreIndex([])
return index, llm
...
with upload_tab:
st.subheader("Extract and Query Definitions")
if st.button("Initialize Index and Reset Terms"):
st.session_state["llama_index"] = initialize_index(
llm_name, model_temperature, api_key
)
st.session_state["all_terms"] = {}
if "llama_index" in st.session_state:
st.markdown(
"Either upload an image/screenshot of a document, or enter the text manually."
)
document_text = st.text_area("Or enter raw text")
if st.button("Extract Terms and Definitions") and (
uploaded_file or document_text
):
st.session_state["terms"] = {}
terms_docs = {}
with st.spinner("Extracting..."):
terms_docs.update(
extract_terms(
[Document(text=document_text)],
term_extract_str,
llm_name,
model_temperature,
api_key,
)
)
st.session_state["terms"].update(terms_docs)
if "terms" in st.session_state and st.session_state["terms"]:
st.markdown("Extracted terms")
st.json(st.session_state["terms"])
if st.button("Insert terms?"):
with st.spinner("Inserting terms"):
insert_terms(st.session_state["terms"])
st.session_state["all_terms"].update(st.session_state["terms"])
st.session_state["terms"] = {}
st.experimental_rerun()
```
Now you are really starting to leverage the power of streamlit! Let's start with the code under the upload tab. We added a button to initialize the vector index, and we store it in the global streamlit state dictionary, as well as resetting the currently extracted terms. Then, after extracting terms from the input text, we store it the extracted terms in the global state again and give the user a chance to review them before inserting. If the insert button is pressed, then we call our insert terms function, update our global tracking of inserted terms, and remove the most recently extracted terms from the session state.
## Querying for Extracted Terms/Definitions
With the terms and definitions extracted and saved, how can we use them? And how will the user even remember what's previously been saved?? We can simply add some more tabs to the app to handle these features.
```python
...
setup_tab, terms_tab, upload_tab, query_tab = st.tabs(
["Setup", "All Terms", "Upload/Extract Terms", "Query Terms"]
)
...
with terms_tab:
with terms_tab:
st.subheader("Current Extracted Terms and Definitions")
st.json(st.session_state["all_terms"])
...
with query_tab:
st.subheader("Query for Terms/Definitions!")
st.markdown(
(
"The LLM will attempt to answer your query, and augment it's answers using the terms/definitions you've inserted. "
"If a term is not in the index, it will answer using it's internal knowledge."
)
)
if st.button("Initialize Index and Reset Terms", key="init_index_2"):
st.session_state["llama_index"] = initialize_index(
llm_name, model_temperature, api_key
)
st.session_state["all_terms"] = {}
if "llama_index" in st.session_state:
query_text = st.text_input("Ask about a term or definition:")
if query_text:
query_text = (
query_text
+ "\nIf you can't find the answer, answer the query with the best of your knowledge."
)
with st.spinner("Generating answer..."):
response = (
st.session_state["llama_index"]
.as_query_engine(
similarity_top_k=5,
response_mode="compact",
text_qa_template=TEXT_QA_TEMPLATE,
refine_template=DEFAULT_REFINE_PROMPT,
)
.query(query_text)
)
st.markdown(str(response))
```
While this is mostly basic, some important things to note:
- Our initialize button has the same text as our other button. Streamlit will complain about this, so we provide a unique key instead.
- Some additional text has been added to the query! This is to try and compensate for times when the index does not have the answer.
- In our index query, we've specified two options:
- `similarity_top_k=5` means the index will fetch the top 5 closest matching terms/definitions to the query.
- `response_mode="compact"` means as much text as possible from the 5 matching terms/definitions will be used in each LLM call. Without this, the index would make at least 5 calls to the LLM, which can slow things down for the user.
## Dry Run Test
Well, actually I hope you've been testing as we went. But now, let's try one complete test.
1. Refresh the app
2. Enter your LLM settings
3. Head over to the query tab
4. Ask the following: `What is a bunnyhug?`
5. The app should give some nonsense response. If you didn't know, a bunnyhug is another word for a hoodie, used by people from the Canadian Prairies!
6. Let's add this definition to the app. Open the upload tab and enter the following text: `A bunnyhug is a common term used to describe a hoodie. This term is used by people from the Canadian Prairies.`
7. Click the extract button. After a few moments, the app should display the correctly extracted term/definition. Click the insert term button to save it!
8. If we open the terms tab, the term and definition we just extracted should be displayed
9. Go back to the query tab and try asking what a bunnyhug is. Now, the answer should be correct!
## Improvement | |
163829 | #1 - Create a Starting Index
With our base app working, it might feel like a lot of work to build up a useful index. What if we gave the user some kind of starting point to show off the app's query capabilities? We can do just that! First, let's make a small change to our app so that we save the index to disk after every upload:
```python
def insert_terms(terms_to_definition):
for term, definition in terms_to_definition.items():
doc = Document(text=f"Term: {term}\nDefinition: {definition}")
st.session_state["llama_index"].insert(doc)
# TEMPORARY - save to disk
st.session_state["llama_index"].storage_context.persist()
```
Now, we need some document to extract from! The repository for this project used the wikipedia page on New York City, and you can find the text [here](https://github.com/jerryjliu/llama_index/blob/main/examples/test_wiki/data/nyc_text.txt).
If you paste the text into the upload tab and run it (it may take some time), we can insert the extracted terms. Make sure to also copy the text for the extracted terms into a notepad or similar before inserting into the index! We will need them in a second.
After inserting, remove the line of code we used to save the index to disk. With a starting index now saved, we can modify our `initialize_index` function to look like this:
```python
@st.cache_resource
def initialize_index(llm_name, model_temperature, api_key):
"""Load the Index object."""
Settings.llm = get_llm(llm_name, model_temperature, api_key)
index = load_index_from_storage(storage_context)
return index
```
Did you remember to save that giant list of extracted terms in a notepad? Now when our app initializes, we want to pass in the default terms that are in the index to our global terms state:
```python
...
if "all_terms" not in st.session_state:
st.session_state["all_terms"] = DEFAULT_TERMS
...
```
Repeat the above anywhere where we were previously resetting the `all_terms` values.
## Improvement #2 - (Refining) Better Prompts
If you play around with the app a bit now, you might notice that it stopped following our prompt! Remember, we added to our `query_str` variable that if the term/definition could not be found, answer to the best of its knowledge. But now if you try asking about random terms (like bunnyhug!), it may or may not follow those instructions.
This is due to the concept of "refining" answers in Llama Index. Since we are querying across the top 5 matching results, sometimes all the results do not fit in a single prompt! OpenAI models typically have a max input size of 4097 tokens. So, Llama Index accounts for this by breaking up the matching results into chunks that will fit into the prompt. After Llama Index gets an initial answer from the first API call, it sends the next chunk to the API, along with the previous answer, and asks the model to refine that answer.
So, the refine process seems to be messing with our results! Rather than appending extra instructions to the `query_str`, remove that, and Llama Index will let us provide our own custom prompts! Let's create those now, using the [default prompts](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/default_prompts.py) and [chat specific prompts](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/chat_prompts.py) as a guide. Using a new file `constants.py`, let's create some new query templates:
```python
from llama_index.core import (
PromptTemplate,
SelectorPromptTemplate,
ChatPromptTemplate,
)
from llama_index.core.prompts.utils import is_chat_model
from llama_index.core.llms import ChatMessage, MessageRole
# Text QA templates
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information answer the following question "
"(if you don't know the answer, use the best of your knowledge): {query_str}\n"
)
TEXT_QA_TEMPLATE = PromptTemplate(DEFAULT_TEXT_QA_PROMPT_TMPL)
# Refine templates
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context and using the best of your knowledge, improve the existing answer. "
"If you can't improve the existing answer, just repeat it again."
)
DEFAULT_REFINE_PROMPT = PromptTemplate(DEFAULT_REFINE_PROMPT_TMPL)
CHAT_REFINE_PROMPT_TMPL_MSGS = [
ChatMessage(content="{query_str}", role=MessageRole.USER),
ChatMessage(content="{existing_answer}", role=MessageRole.ASSISTANT),
ChatMessage(
content="We have the opportunity to refine the above answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context and using the best of your knowledge, improve the existing answer. "
"If you can't improve the existing answer, just repeat it again.",
role=MessageRole.USER,
),
]
CHAT_REFINE_PROMPT = ChatPromptTemplate(CHAT_REFINE_PROMPT_TMPL_MSGS)
# refine prompt selector
REFINE_TEMPLATE = SelectorPromptTemplate(
default_template=DEFAULT_REFINE_PROMPT,
conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)],
)
```
That seems like a lot of code, but it's not too bad! If you looked at the default prompts, you might have noticed that there are default prompts, and prompts specific to chat models. Continuing that trend, we do the same for our custom prompts. Then, using a prompt selector, we can combine both prompts into a single object. If the LLM being used is a chat model (ChatGPT, GPT-4), then the chat prompts are used. Otherwise, use the normal prompt templates.
Another thing to note is that we only defined one QA template. In a chat model, this will be converted to a single "human" message.
So, now we can import these prompts into our app and use them during the query.
```python
from constants import REFINE_TEMPLATE, TEXT_QA_TEMPLATE
...
if "llama_index" in st.session_state:
query_text = st.text_input("Ask about a term or definition:")
if query_text:
query_text = query_text # Notice we removed the old instructions
with st.spinner("Generating answer..."):
response = (
st.session_state["llama_index"]
.as_query_engine(
similarity_top_k=5,
response_mode="compact",
text_qa_template=TEXT_QA_TEMPLATE,
refine_template=DEFAULT_REFINE_PROMPT,
)
.query(query_text)
)
st.markdown(str(response))
...
```
If you experiment a bit more with queries, hopefully you notice that the responses follow our instructions a little better now!
## Improvement | |
163830 | #3 - Image Support
Llama index also supports images! Using Llama Index, we can upload images of documents (papers, letters, etc.), and Llama Index handles extracting the text. We can leverage this to also allow users to upload images of their documents and extract terms and definitions from them.
If you get an import error about PIL, install it using `pip install Pillow` first.
```python
from PIL import Image
from llama_index.readers.file import ImageReader
@st.cache_resource
def get_file_extractor():
image_parser = ImageReader(keep_image=True, parse_text=True)
file_extractor = {
".jpg": image_parser,
".png": image_parser,
".jpeg": image_parser,
}
return file_extractor
file_extractor = get_file_extractor()
...
with upload_tab:
st.subheader("Extract and Query Definitions")
if st.button("Initialize Index and Reset Terms", key="init_index_1"):
st.session_state["llama_index"] = initialize_index(
llm_name, model_temperature, api_key
)
st.session_state["all_terms"] = DEFAULT_TERMS
if "llama_index" in st.session_state:
st.markdown(
"Either upload an image/screenshot of a document, or enter the text manually."
)
uploaded_file = st.file_uploader(
"Upload an image/screenshot of a document:",
type=["png", "jpg", "jpeg"],
)
document_text = st.text_area("Or enter raw text")
if st.button("Extract Terms and Definitions") and (
uploaded_file or document_text
):
st.session_state["terms"] = {}
terms_docs = {}
with st.spinner("Extracting (images may be slow)..."):
if document_text:
terms_docs.update(
extract_terms(
[Document(text=document_text)],
term_extract_str,
llm_name,
model_temperature,
api_key,
)
)
if uploaded_file:
Image.open(uploaded_file).convert("RGB").save("temp.png")
img_reader = SimpleDirectoryReader(
input_files=["temp.png"], file_extractor=file_extractor
)
img_docs = img_reader.load_data()
os.remove("temp.png")
terms_docs.update(
extract_terms(
img_docs,
term_extract_str,
llm_name,
model_temperature,
api_key,
)
)
st.session_state["terms"].update(terms_docs)
if "terms" in st.session_state and st.session_state["terms"]:
st.markdown("Extracted terms")
st.json(st.session_state["terms"])
if st.button("Insert terms?"):
with st.spinner("Inserting terms"):
insert_terms(st.session_state["terms"])
st.session_state["all_terms"].update(st.session_state["terms"])
st.session_state["terms"] = {}
st.experimental_rerun()
```
Here, we added the option to upload a file using Streamlit. Then the image is opened and saved to disk (this seems hacky but it keeps things simple). Then we pass the image path to the reader, extract the documents/text, and remove our temp image file.
Now that we have the documents, we can call `extract_terms()` the same as before.
## Conclusion/TLDR
In this tutorial, we covered a ton of information, while solving some common issues and problems along the way:
- Using different indexes for different use cases (List vs. Vector index)
- Storing global state values with Streamlit's `session_state` concept
- Customizing internal prompts with Llama Index
- Reading text from images with Llama Index
The final version of this tutorial can be found [here](https://github.com/abdulasiraj/A-Guide-to-Extracting-Terms-and-Definitions) and a live hosted demo is available on [Huggingface Spaces](https://huggingface.co/spaces/Nobody4591/Llama_Index_Term_Extractor). | |
163837 | # How to Build a Chatbot
LlamaIndex serves as a bridge between your data and Large Language Models (LLMs), providing a toolkit that enables you to establish a query interface around your data for a variety of tasks, such as question-answering and summarization.
In this tutorial, we'll walk you through building a context-augmented chatbot using a [Data Agent](https://gpt-index.readthedocs.io/en/stable/core_modules/agent_modules/agents/root.html). This agent, powered by LLMs, is capable of intelligently executing tasks over your data. The end result is a chatbot agent equipped with a robust set of data interface tools provided by LlamaIndex to answer queries about your data.
**Note**: This tutorial builds upon initial work on creating a query interface over SEC 10-K filings - [check it out here](https://medium.com/@jerryjliu98/how-unstructured-and-llamaindex-can-help-bring-the-power-of-llms-to-your-own-data-3657d063e30d).
### Context
In this guide, we’ll build a "10-K Chatbot" that uses raw UBER 10-K HTML filings from Dropbox. Users can interact with the chatbot to ask questions related to the 10-K filings.
### Preparation
```python
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import nest_asyncio
nest_asyncio.apply()
```
### Ingest Data
Let's first download the raw 10-k files, from 2019-2022.
```
# NOTE: the code examples assume you're operating within a Jupyter notebook.
# download files
!mkdir data
!wget "https://www.dropbox.com/s/948jr9cfs7fgj99/UBER.zip?dl=1" -O data/UBER.zip
!unzip data/UBER.zip -d data
```
To parse the HTML files into formatted text, we use the [Unstructured](https://github.com/Unstructured-IO/unstructured) library. Thanks to [LlamaHub](https://llamahub.ai/), we can directly integrate with Unstructured, allowing conversion of any text into a Document format that LlamaIndex can ingest.
First we install the necessary packages:
```
!pip install llama-hub unstructured
```
Then we can use the `UnstructuredReader` to parse the HTML files into a list of `Document` objects.
```python
from llama_index.readers.file import UnstructuredReader
from pathlib import Path
years = [2022, 2021, 2020, 2019]
loader = UnstructuredReader()
doc_set = {}
all_docs = []
for year in years:
year_docs = loader.load_data(
file=Path(f"./data/UBER/UBER_{year}.html"), split_documents=False
)
# insert year metadata into each year
for d in year_docs:
d.metadata = {"year": year}
doc_set[year] = year_docs
all_docs.extend(year_docs)
```
### Setting up Vector Indices for each year
We first setup a vector index for each year. Each vector index allows us
to ask questions about the 10-K filing of a given year.
We build each index and save it to disk.
```python
# initialize simple vector indices
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.core import Settings
Settings.chunk_size = 512
index_set = {}
for year in years:
storage_context = StorageContext.from_defaults()
cur_index = VectorStoreIndex.from_documents(
doc_set[year],
storage_context=storage_context,
)
index_set[year] = cur_index
storage_context.persist(persist_dir=f"./storage/{year}")
```
To load an index from disk, do the following
```python
# Load indices from disk
from llama_index.core import load_index_from_storage
index_set = {}
for year in years:
storage_context = StorageContext.from_defaults(
persist_dir=f"./storage/{year}"
)
cur_index = load_index_from_storage(
storage_context,
)
index_set[year] = cur_index
```
### Setting up a Sub Question Query Engine to Synthesize Answers Across 10-K Filings
Since we have access to documents of 4 years, we may not only want to ask questions regarding the 10-K document of a given year, but ask questions that require analysis over all 10-K filings.
To address this, we can use a [Sub Question Query Engine](https://gpt-index.readthedocs.io/en/stable/examples/query_engine/sub_question_query_engine.html). It decomposes a query into subqueries, each answered by an individual vector index, and synthesizes the results to answer the overall query.
LlamaIndex provides some wrappers around indices (and query engines) so that they can be used by query engines and agents. First we define a `QueryEngineTool` for each vector index.
Each tool has a name and a description; these are what the LLM agent sees to decide which tool to choose.
```python
from llama_index.core.tools import QueryEngineTool, ToolMetadata
individual_query_engine_tools = [
QueryEngineTool(
query_engine=index_set[year].as_query_engine(),
metadata=ToolMetadata(
name=f"vector_index_{year}",
description=f"useful for when you want to answer queries about the {year} SEC 10-K for Uber",
),
)
for year in years
]
```
Now we can create the Sub Question Query Engine, which will allow us to synthesize answers across the 10-K filings. We pass in the `individual_query_engine_tools` we defined above, as well as an `llm` that will be used to run the subqueries.
```python
from llama_index.llms.openai import OpenAI
from llama_index.core.query_engine import SubQuestionQueryEngine
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=individual_query_engine_tools,
llm=OpenAI(model="gpt-3.5-turbo"),
)
```
### Setting up the Chatbot Agent
We use a LlamaIndex Data Agent to setup the outer chatbot agent, which has access to a set of Tools. Specifically, we will use an OpenAIAgent, that takes advantage of OpenAI API function calling. We want to use the separate Tools we defined previously for each index (corresponding to a given year), as well as a tool for the sub question query engine we defined above.
First we define a `QueryEngineTool` for the sub question query engine:
```python
query_engine_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="sub_question_query_engine",
description="useful for when you want to answer queries that require analyzing multiple SEC 10-K documents for Uber",
),
)
```
Then, we combine the Tools we defined above into a single list of tools for the agent:
```python
tools = individual_query_engine_tools + [query_engine_tool]
```
Finally, we call `OpenAIAgent.from_tools` to create the agent, passing in the list of tools we defined above.
```python
from llama_index.agent.openai import OpenAIAgent
agent = OpenAIAgent.from_tools(tools, verbose=True)
```
## | |
164007 | ::: llama_index.embeddings.langchain
options:
members:
- LangchainEmbedding | |
164044 | ::: llama_index.core.memory.chat_memory_buffer | |
164759 | "show list of text ['file_path: data/paul_graham/paul_graham_essay.txt People who see the responses to essays I write sometimes tell me how sorry they feel for me, but I\\'m not exaggerating when I reply that it has always been like this, since the very beginning. It comes with the territory. An essay must tell readers things they don\\'t already know, and some people dislike being told such things. [11] People put plenty of stuff on the internet in the 90s of course, but putting something online is not the same as publishing it online. Publishing online means you treat the online version as the (or at least a) primary version. [12] There is a general lesson here that our experience with Y Combinator also teaches: Customs continue to constrain you long after the restrictions that caused them have disappeared. Customary VC practice had once, like the customs about publishing essays, been based on real constraints. Startups had once been much more expensive to start, and proportionally rare. Now they could be cheap and common, but the VCs\\' customs still reflected the old world, just as customs about writing essays still reflected the constraints of the print era. Which in turn implies that people who are independent-minded (i.e. less influenced by custom) will have an advantage in fields affected by rapid change (where customs are more likely to be obsolete). Here\\'s an interesting point, though: you can\\'t always predict which fields will be affected by rapid change. Obviously software and venture capital will be, but who would have predicted that essay writing would be? [13] Y Combinator was not the original name. At first we were called Cambridge Seed. But we didn\\'t want a regional name, in case someone copied us in Silicon Valley, so we renamed ourselves after one of the coolest tricks in the lambda calculus, the Y combinator. I picked orange as our color partly because it\\'s the warmest, and partly because no VC used it. In 2005 all the VCs used staid colors like maroon, navy blue, and forest green, because they were trying to appeal to LPs, not founders. The YC logo itself is an inside joke: the Viaweb logo had been a white V on a red circle, so I made the YC logo a white Y on an orange square. [14] YC did become a fund for a couple years starting in 2009, because it was getting so big I could no longer afford to fund it personally. But after Heroku got bought we had enough money to go back to being self-funded. [15] I\\'ve never liked the term \"deal flow,\" because it implies that the number of new startups at any given time is fixed. This is not only false, but it\\'s the purpose of YC to falsify it, by causing startups to be founded that would not otherwise have existed. [16] She reports that they were all different shapes and sizes, because there was a run on air conditioners and she had to get whatever she could, but that they were all heavier than she could carry now. [17] Another problem with HN was a bizarre edge case that occurs when you both write essays and run a forum. When you run a forum, you\\'re assumed to see if not every conversation, at least every conversation involving you. And when you write essays, people post highly imaginative misinterpretations of them on forums. Individually these two phenomena are tedious but bearable, but the combination is disastrous. You actually have to respond to the misinterpretations, because the assumption that you\\'re present in the conversation means that not responding to any sufficiently upvoted misinterpretation reads as a tacit admission that it\\'s correct. But that in turn encourages more; anyone who wants to pick a fight with you senses that now is their chance. [18] The worst thing about leaving YC was not working with Jessica anymore. We\\'d been working on YC almost the whole time we\\'d known each other, and we\\'d neither tried nor wanted to separate it from our personal lives, so leaving was like pulling up a deeply rooted tree. [19] One way to get more precise about the concept of invented vs discovered is to talk about space aliens. Any sufficiently advanced alien civilization would certainly know about the Pythagorean theorem, for example. I believe, though with less certainty, that they would also know about the Lisp in McCarthy\\'s 1960 paper. But if so there\\'s no reason to suppose that this is the limit of the language that might be known to them. Presumably aliens need numbers and errors and I/O too. So it seems likely there exists at least one path out of McCarthy\\'s Lisp along which discoveredness is preserved.', \"file_path: data/paul_graham/paul_graham_essay.txt [18] The worst thing about leaving YC was not working with Jessica anymore. We'd been working on YC almost the whole time we'd known each other, and we'd neither tried nor wanted to separate it from our personal lives, so leaving was like pulling up a deeply rooted tree. [19] One way to get more precise about the concept of invented vs discovered is to talk about space aliens. Any sufficiently advanced alien civilization would certainly know about the Pythagorean theorem, for example. I believe, though with less certainty, that they would also know about the Lisp in McCarthy's 1960 paper. But if so there's no reason to suppose that this is the limit of the language that might be known to them. Presumably aliens need numbers and errors and I/O too. So it seems likely there exists at least one path out of McCarthy's Lisp along which discoveredness is preserved. Thanks to Trevor Blackwell, John Collison, Patrick Collison, Daniel Gackle, Ralph Hazell, Jessica Livingston, Robert Morris, and Harj Taggar for reading drafts of this.\"] nomic-ai/nomic-embed-text-v1.5 {}\n"
]
},
{
"data": {
"text/markdown": [
"<b>The author, growing up, primarily worked on writing and programming. He started by writing short stories, which he admits were not very good, and tried programming on an IBM 1401 in ninth grade. However, he found it puzzling as he couldn't figure out what to do with it due to the lack of input data. His first significant experience with programming came with the advent of microcomputers, which he could use right at his desk and receive immediate responses. He built his own microcomputer and later convinced his father to buy a TRS-80. He wrote simple games, a program to predict rocket flights, and a word processor. Despite his interest in programming, he initially planned to study philosophy in college, but eventually switched to AI.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from llama_index.llms.fireworks import Fireworks\n",
"from llama_index.embeddings.fireworks import FireworksEmbedding\n",
"\n",
"llm = Fireworks(\n",
" temperature=0, model=\"accounts/fireworks/models/mixtral-8x7b-instruct\"\n",
")\n",
"\n",
"# create client and a new collection\n",
"chroma_client = chromadb.EphemeralClient()\n",
"chroma_collection = chroma_client.create_collection(\"quickstart\")\n",
"\n",
"# define embedding function\n",
"embed_model = FireworksEmbedding(\n",
" model_name=\"nomic-ai/nomic-embed-text-v1.5\",\n",
")\n",
"\n",
"# load documents\n",
"documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()\n",
"\n",
"# set up ChromaVectorStore and load in data\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context, embed_model=embed_model\n",
")\n",
"\n",
"# Query Data\n",
"query_engine = index.as_query_engine(llm=llm)\n",
"response = query_engine.query(\"What did the author do growing up?\")\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "349de571",
"metadata": {},
"source": [
"## Basic Example (including saving to disk) and resizable embeddings\n",
"\n",
"Extending the previous example, if you want to save to disk, simply initialize the Chroma client and pass the directory where you want the data to be saved to. \n",
"\n",
"`Caution`: Chroma makes a best-effort to automatically save data to disk, however multiple in-memory clients can stomp each other's work. As a best practice, only have one client per path running at any given time.\n",
"\n", | |
164792 | " fused_sim = alpha * (sparse_sim + dense_sim)\n",
" fused_similarities.append((fused_sim, all_nodes_dict[node_id]))\n",
"\n",
" fused_similarities.sort(key=lambda x: x[0], reverse=True)\n",
" fused_similarities = fused_similarities[:top_k]\n",
"\n",
" # create final response object\n",
" return VectorStoreQueryResult(\n",
" nodes=[x[1] for x in fused_similarities],\n",
" similarities=[x[0] for x in fused_similarities],\n",
" ids=[x[1].node_id for x in fused_similarities],\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vector_store = QdrantVectorStore(\n",
" \"llama2_paper\",\n",
" client=client,\n",
" enable_hybrid=True,\n",
" hybrid_fusion_fn=relative_score_fusion,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You may have noticed the alpha parameter in the above function. This can be set directely in the `as_query_engine()` call, which will set it in the vector index retriever."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"index.as_query_engine(alpha=0.5, similarity_top_k=2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Customizing Hybrid Qdrant Collections\n",
"\n",
"Instead of letting llama-index do it, you can also configure your Qdrant hybrid collections ahead of time.\n",
"\n",
"**NOTE:** The names of vector configs must be `text-dense` and `text-sparse` if creating a hybrid index."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from qdrant_client import models\n",
"\n",
"client.recreate_collection(\n",
" collection_name=\"llama2_paper\",\n",
" vectors_config={\n",
" \"text-dense\": models.VectorParams(\n",
" size=1536, # openai vector size\n",
" distance=models.Distance.COSINE,\n",
" )\n",
" },\n",
" sparse_vectors_config={\n",
" \"text-sparse\": models.SparseVectorParams(\n",
" index=models.SparseIndexParams()\n",
" )\n",
" },\n",
")\n",
"\n",
"# enable hybrid since we created a sparse collection\n",
"vector_store = QdrantVectorStore(\n",
" collection_name=\"llama2_paper\", client=client, enable_hybrid=True\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llama-index-4a-wkI5X-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 2
} | |
164907 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# DuckDB\n",
"\n",
">[DuckDB](https://duckdb.org/docs/api/python/overview) is a fast in-process analytical database. DuckDB is under an MIT license.\n",
"\n",
"In this notebook we are going to show how to use DuckDB as a Vector store to be used in LlamaIndex.\n",
"\n",
"Install DuckDB with:\n",
"\n",
"```sh\n",
"pip install duckdb\n",
"```\n",
"\n",
"Make sure to use the latest DuckDB version (>= 0.10.0).\n",
"\n",
"You can run DuckDB in different modes depending on persistence:\n",
"- `in-memory` is the default mode, where the database is created in memory, you can force this to be use by setting `database_name = \":memory:\"` when initializing the vector store.\n",
"- `persistence` is set by using a name for a database and setting a persistence directory `database_name = \"my_vector_store.duckdb\"` where the database is persisted in the default `persist_dir` or to the one you set it to.\n",
"\n",
"With the vector store created, you can:\n",
"- `.add` \n",
"- `.get` \n",
"- `.update`\n",
"- `.upsert`\n",
"- `.delete`\n",
"- `.peek`\n",
"- `.query` to run a search. \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic example\n",
"\n",
"In this basic example, we take the Paul Graham essay, split it into chunks, embed it using an open-source embedding model, load it into `DuckDBVectorStore`, and then query it.\n",
"\n",
"For the embedding model we will use OpenAI. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Creating a DuckDB Index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install duckdb\n",
"!pip install llama-index-vector-stores-duckdb"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.vector_stores.duckdb import DuckDBVectorStore\n",
"from llama_index.core import StorageContext\n",
"\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Setup OpenAI API\n",
"import os\n",
"import openai\n",
"\n",
"openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Download and prepare the sample dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-02-16 19:38:34-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.111.133, 185.199.108.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: ‘data/paul_graham/paul_graham_essay.txt’\n",
"\n",
"data/paul_graham/pa 100%[===================>] 73.28K --.-KB/s in 0.06s \n",
"\n",
"2024-02-16 19:38:34 (1.24 MB/s) - ‘data/paul_graham/paul_graham_essay.txt’ saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"documents = SimpleDirectoryReader(\"data/paul_graham/\").load_data()\n",
"\n",
"vector_store = DuckDBVectorStore()\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"<b>The author mentions that before college, they worked on two main things outside of school: writing and programming. They wrote short stories and also tried writing programs on an IBM 1401 computer. They later got a microcomputer and started programming more extensively.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"What did the author do growing up?\")\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Persisting to disk example\n",
"\n",
"Extending the previous example, if you want to save to disk, simply initialize the DuckDBVectorStore by specifying a database name and persist directory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Save to disk\n",
"documents = SimpleDirectoryReader(\"data/paul_graham/\").load_data()\n",
"\n",
"vector_store = DuckDBVectorStore(\"pg.duckdb\", persist_dir=\"./persist/\")\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load from disk\n",
"vector_store = DuckDBVectorStore.from_local(\"./persist/pg.duckdb\")\n",
"index = VectorStoreIndex.from_vector_store(vector_store)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"<b>The author mentions that before college, they worked on two main things outside of school: writing and programming. They wrote short stories and also tried writing programs on an IBM 1401 computer. They later got a microcomputer and started programming more extensively.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Query Data\n",
"query_engine = index.as_query_engine()\n", | |
164935 | {
"cells": [
{
"cell_type": "markdown",
"id": "714eb664",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/PineconeIndexDemo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "307804a3-c02b-4a57-ac0d-172c30ddc851",
"metadata": {},
"source": [
"# Pinecone Vector Store"
]
},
{
"cell_type": "markdown",
"id": "36be66bf",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9ddff1e4",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-vector-stores-pinecone"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6807106d",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index>=0.9.31 pinecone-client>=3.0.0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d48af8e1",
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import sys\n",
"import os\n",
"\n",
"logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))"
]
},
{
"cell_type": "markdown",
"id": "f7010b1d-d1bb-4f08-9309-a328bb4ea396",
"metadata": {},
"source": [
"#### Creating a Pinecone Index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0ce3143d-198c-4dd2-8e5a-c5cdf94f017a",
"metadata": {},
"outputs": [],
"source": [
"from pinecone import Pinecone, ServerlessSpec"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ad14111-0bbb-4c62-906d-6d6253e0cdee",
"metadata": {},
"outputs": [],
"source": [
"os.environ[\n",
" \"PINECONE_API_KEY\"\n",
"] = \"<Your Pinecone API key, from app.pinecone.io>\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
"\n",
"api_key = os.environ[\"PINECONE_API_KEY\"]\n",
"\n",
"pc = Pinecone(api_key=api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "233a080f",
"metadata": {},
"outputs": [],
"source": [
"# delete if needed\n",
"# pc.delete_index(\"quickstart\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c2c90087-bdd9-4ca4-b06b-2af883559f88",
"metadata": {},
"outputs": [],
"source": [
"# dimensions are for text-embedding-ada-002\n",
"\n",
"pc.create_index(\n",
" name=\"quickstart\",\n",
" dimension=1536,\n",
" metric=\"euclidean\",\n",
" spec=ServerlessSpec(cloud=\"aws\", region=\"us-west-2\"),\n",
")\n",
"\n",
"# If you need to create a PodBased Pinecone index, you could alternatively do this:\n",
"#\n",
"# from pinecone import Pinecone, PodSpec\n",
"#\n",
"# pc = Pinecone(api_key='xxx')\n",
"#\n",
"# pc.create_index(\n",
"# \t name='my-index',\n",
"# \t dimension=1536,\n",
"# \t metric='cosine',\n",
"# \t spec=PodSpec(\n",
"# \t\t environment='us-east1-gcp',\n",
"# \t\t pod_type='p1.x1',\n",
"# \t\t pods=1\n",
"# \t )\n",
"# )\n",
"#"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "667f3cb3-ce18-48d5-b9aa-bfc1a1f0f0f6",
"metadata": {},
"outputs": [],
"source": [
"pinecone_index = pc.Index(\"quickstart\")"
]
},
{
"cell_type": "markdown",
"id": "8ee4473a-094f-4d0a-a825-e1213db07240",
"metadata": {},
"source": [
"#### Load documents, build the PineconeVectorStore and VectorStoreIndex"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a2bcc07",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.vector_stores.pinecone import PineconeVectorStore\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "markdown",
"id": "7d782f76",
"metadata": {},
"source": [
"Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5104674e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Will not apply HSTS. The HSTS database must be a regular and non-world-writable file.\n",
"ERROR: could not open HSTS store at '/home/loganm/.wget-hsts'. HSTS will be disabled.\n",
"--2024-01-16 11:56:25-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.111.133, 185.199.110.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: ‘data/paul_graham/paul_graham_essay.txt’\n",
"\n",
"data/paul_graham/pa 100%[===================>] 73.28K --.-KB/s in 0.04s \n",
"\n",
"2024-01-16 11:56:25 (1.79 MB/s) - ‘data/paul_graham/paul_graham_essay.txt’ saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "68cbd239-880e-41a3-98d8-dbb3fab55431",
"metadata": {},
"outputs": [],
"source": [
"# load documents\n",
"documents = SimpleDirectoryReader(\"./data/paul_graham\").load_data()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba1558b3",
"metadata": {}, | |
164954 | {
"cells": [
{
"cell_type": "markdown",
"id": "f5860e4e-3775-41b9-8329-46a6d9568056",
"metadata": {},
"source": [
"## Couchbase Vector Store\n",
"[Couchbase](https://couchbase.com/) is an award-winning distributed NoSQL cloud database that delivers unmatched versatility, performance, scalability, and financial value for all of your cloud, mobile, AI, and edge computing applications. Couchbase embraces AI with coding assistance for developers and vector search for their applications.\n",
"\n",
"Vector Search is a part of the [Full Text Search Service](https://docs.couchbase.com/server/current/learn/services-and-indexes/services/search-service.html) (Search Service) in Couchbase.\n",
"\n",
"This tutorial explains how to use Vector Search in Couchbase. You can work with both [Couchbase Capella](https://www.couchbase.com/products/capella/) and your self-managed Couchbase Server."
]
},
{
"cell_type": "markdown",
"id": "0fb82a99-965c-4a04-80d0-2baa91f5dcf0",
"metadata": {},
"source": [
"### Installation\n",
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6e0e1c57-f30c-4dd2-b1c0-91f80df7012a",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-vector-stores-couchbase"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "629d75ac-c7c2-444f-9a3d-adbdc6533160",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "markdown",
"id": "3321020f-849a-4848-9747-b154ecd0a15e",
"metadata": {},
"source": [
"### Creating Couchbase Connection\n",
"We create a connection to the Couchbase cluster initially and then pass the cluster object to the Vector Store.\n",
"\n",
"Here, we are connecting using the username and password. You can also connect using any other supported way to your cluster.\n",
"\n",
"For more information on connecting to the Couchbase cluster, please check the [Python SDK documentation](https://docs.couchbase.com/python-sdk/current/hello-world/start-using-sdk.html#connect).\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "97805d01-3bd5-4933-82e1-6876c5101e13",
"metadata": {},
"outputs": [],
"source": [
"COUCHBASE_CONNECTION_STRING = (\n",
" \"couchbase://localhost\" # or \"couchbases://localhost\" if using TLS\n",
")\n",
"DB_USERNAME = \"Administrator\"\n",
"DB_PASSWORD = \"P@ssword1!\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "22874017-6010-4a2f-899e-08904a3108ec",
"metadata": {},
"outputs": [],
"source": [
"from datetime import timedelta\n",
"\n",
"from couchbase.auth import PasswordAuthenticator\n",
"from couchbase.cluster import Cluster\n",
"from couchbase.options import ClusterOptions\n",
"\n",
"auth = PasswordAuthenticator(DB_USERNAME, DB_PASSWORD)\n",
"options = ClusterOptions(auth)\n",
"cluster = Cluster(COUCHBASE_CONNECTION_STRING, options)\n",
"\n",
"# Wait until the cluster is ready for use.\n",
"cluster.wait_until_ready(timedelta(seconds=5))"
]
},
{
"cell_type": "markdown",
"id": "5368c1c4-3f31-40e2-afee-5b6567445ff0",
"metadata": {},
"source": [
"### Creating the Search Index\n",
"Currently, the Search index needs to be created from the Couchbase Capella or Server UI or using the REST interface.\n",
"\n",
"Let us define a Search index with the name `vector-index` on the `testing` bucket\n",
"\n",
"For this example, let us use the Import Index feature on the Search Service on the UI.\n",
"\n",
"We are defining an index on the testing bucket’s `_default` scope on the `_default` collection with the vector field set to `embedding` with 1536 dimensions and the text field set to text. We are also indexing and storing all the fields under metadata in the document as a dynamic mapping to account for varying document structures. The similarity metric is set to `dot_product`.\n"
]
},
{
"cell_type": "markdown",
"id": "2b944c44-a891-455a-b6d9-9c5c44fdd397",
"metadata": {},
"source": [
"#### How to Import an Index to the Full Text Search service?\n",
"\n",
"- [Couchbase Server](https://docs.couchbase.com/server/current/search/import-search-index.html)\n",
" - Click on Search -> Add Index -> Import\n",
" - Copy the following Index definition in the Import screen\n",
" - Click on Create Index to create the index.\n",
"\n",
"\n",
"- [Couchbase Capella](https://docs.couchbase.com/cloud/search/import-search-index.html)\n",
" - Copy the index definition to a new file `index.json`\n",
" - Import the file in Capella using the instructions in the documentation.\n",
" - Click on Create Index to create the index."
]
},
{
"cell_type": "markdown",
"id": "6984c13c-149e-461d-9859-be50bce17bab",
"metadata": {},
"source": [
"#### Index Definition\n",
"```\n",
"{\n",
" \"name\": \"vector-index\",\n",
" \"type\": \"fulltext-index\",\n",
" \"params\": {\n",
" \"doc_config\": {\n",
" \"docid_prefix_delim\": \"\",\n",
" \"docid_regexp\": \"\",\n",
" \"mode\": \"type_field\",\n",
" \"type_field\": \"type\"\n",
" },\n",
" \"mapping\": {\n",
" \"default_analyzer\": \"standard\",\n",
" \"default_datetime_parser\": \"dateTimeOptional\",\n",
" \"default_field\": \"_all\",\n",
" \"default_mapping\": {\n",
" \"dynamic\": true,\n",
" \"enabled\": true,\n",
" \"properties\": {\n",
" \"metadata\": {\n",
" \"dynamic\": true,\n",
" \"enabled\": true\n",
" },\n",
" \"embedding\": {\n",
" \"enabled\": true,\n",
" \"dynamic\": false,\n",
" \"fields\": [\n",
" {\n",
" \"dims\": 1536,\n",
" \"index\": true,\n",
" \"name\": \"embedding\",\n",
" \"similarity\": \"dot_product\",\n",
" \"type\": \"vector\",\n",
" \"vector_index_optimized_for\": \"recall\"\n",
" }\n",
" ]\n",
" },\n",
" \"text\": {\n",
" \"enabled\": true,\n",
" \"dynamic\": false,\n",
" \"fields\": [\n",
" {\n",
" \"index\": true,\n",
" \"name\": \"text\",\n",
" \"store\": true,\n",
" \"type\": \"text\"\n",
" }\n",
" ]\n",
" }\n",
" }\n",
" },\n",
" \"default_type\": \"_default\",\n",
" \"docvalues_dynamic\": false,\n",
" \"index_dynamic\": true,\n",
" \"store_dynamic\": true,\n",
" \"type_field\": \"_type\"\n",
" },\n",
" \"store\": {\n", | |
165063 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Hybrid Search with Qdrant BM42\n",
"\n",
"Qdrant recently released a new lightweight approach to sparse embeddings, [BM42](https://qdrant.tech/articles/bm42/).\n",
"\n",
"In this notebook, we walk through how to use BM42 with llama-index, for effecient hybrid search."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"First, we need a few packages\n",
"- `llama-index`\n",
"- `llama-index-vector-stores-qdrant`\n",
"- `fastembed` or `fastembed-gpu`\n",
"\n",
"`llama-index` will automatically run fastembed models on GPU if the provided libraries are installed. Check out their [full installation guide](https://qdrant.github.io/fastembed/examples/FastEmbed_GPU/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index llama-index-vector-stores-qdrant fastembed"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## (Optional) Test the fastembed package\n",
"\n",
"To confirm the installation worked (and also to confirm GPU usage, if used), we can run the following code.\n",
"\n",
"This will first download (and cache) the model locally, and then embed it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "238632fe2708433c9cbc8fffd4ccbc05",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Fetching 6 files: 0%| | 0/6 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[613153351, 74040069] [0.3703993395381275, 0.3338314745830077]\n"
]
}
],
"source": [
"from fastembed import SparseTextEmbedding\n",
"\n",
"model = SparseTextEmbedding(\n",
" model_name=\"Qdrant/bm42-all-minilm-l6-v2-attentions\",\n",
" # if using fastembed-gpu with cuda+onnx installed\n",
" # providers=[\"CudaExecutionProvider\"],\n",
")\n",
"\n",
"embeddings = model.embed([\"hello world\", \"goodbye world\"])\n",
"\n",
"indices, values = zip(\n",
" *[\n",
" (embedding.indices.tolist(), embedding.values.tolist())\n",
" for embedding in embeddings\n",
" ]\n",
")\n",
"\n",
"print(indices[0], values[0])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Construct our Hybrid Index\n",
"\n",
"In llama-index, we can construct a hybrid index in just a few lines of code.\n",
"\n",
"If you've tried hybrid in the past with splade, you will notice that this is much faster!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Loading Data\n",
"\n",
"Here, we use `llama-parse` to read in the Llama2 paper! Using the JSON result mode, we can get detailed data about each page, including layout and images. For now, we will use the page numbers and text.\n",
"\n",
"You can get a free api key for `llama-parse` by visiting [https://cloud.llamaindex.ai](https://cloud.llamaindex.ai)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!mkdir -p 'data/'\n",
"!wget --user-agent \"Mozilla\" \"https://arxiv.org/pdf/2307.09288.pdf\" -O \"data/llama2.pdf\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Started parsing the file under job_id cac11eca-4058-4a89-a94a-5603dea3d851\n"
]
}
],
"source": [
"from llama_parse import LlamaParse\n",
"from llama_index.core import Document\n",
"\n",
"parser = LlamaParse(result_type=\"text\", api_key=\"llx-...\")\n",
"\n",
"# get per-page results, along with detailed layout info and metadata\n",
"json_data = parser.get_json_result(\"data/llama2.pdf\")\n",
"\n",
"documents = []\n",
"for document_json in json_data:\n",
" for page in document_json[\"pages\"]:\n",
" documents.append(\n",
" Document(text=page[\"text\"], metadata={\"page_number\": page[\"page\"]})\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Construct the Index /w Qdrant\n",
"\n",
"With our nodes, we can construct our index with Qdrant and BM42!\n",
"\n",
"In this case, Qdrant is being hosted in a docker container.\n",
"\n",
"You can pull the latest:\n",
"\n",
"```\n",
"docker pull qdrant/qdrant\n",
"```\n",
"\n",
"And then to launch:\n",
"\n",
"```\n",
"docker run -p 6333:6333 -p 6334:6334 \\\n",
" -v $(pwd)/qdrant_storage:/qdrant/storage:z \\\n",
" qdrant/qdrant\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Both client and aclient are provided. If using `:memory:` mode, the data between clients is not synced.\n"
]
}
],
"source": [
"import qdrant_client\n",
"from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
"\n",
"client = qdrant_client.QdrantClient(\"http://localhost:6333\")\n",
"aclient = qdrant_client.AsyncQdrantClient(\"http://localhost:6333\")\n",
"\n",
"# delete collection if it exists\n",
"if client.collection_exists(\"llama2_bm42\"):\n",
" client.delete_collection(\"llama2_bm42\")\n",
"\n",
"vector_store = QdrantVectorStore(\n",
" collection_name=\"llama2_bm42\",\n",
" client=client,\n",
" aclient=aclient,\n",
" fastembed_sparse_model=\"Qdrant/bm42-all-minilm-l6-v2-attentions\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, StorageContext\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"index = VectorStoreIndex.from_documents(\n",
" documents,\n",
" # our dense embedding model\n", | |
165064 | " embed_model=OpenAIEmbedding(\n",
" model_name=\"text-embedding-3-small\", api_key=\"sk-proj-...\"\n",
" ),\n",
" storage_context=storage_context,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As we can see, both the dense and sparse embeddings were generated super quickly!\n",
"\n",
"Even though the sparse model is running locally on CPU, its very small and fast."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test out the Index\n",
"\n",
"Using the powers of sparse embeddings, we can query for some very specific facts, and get the correct data."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"chat_engine = index.as_chat_engine(\n",
" chat_mode=\"condense_plus_context\",\n",
" llm=OpenAI(model=\"gpt-4o\", api_key=\"sk-proj-...\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The training hardware for Llama 2 included Meta’s Research Super Cluster (RSC) and internal production clusters. Both clusters utilized NVIDIA A100 GPUs. There were two key differences between these clusters:\n",
"\n",
"1. **Interconnect Type**:\n",
" - RSC used NVIDIA Quantum InfiniBand.\n",
" - The internal production cluster used a RoCE (RDMA over Converged Ethernet) solution based on commodity Ethernet switches.\n",
"\n",
"2. **Per-GPU Power Consumption Cap**:\n",
" - RSC had a power consumption cap of 400W per GPU.\n",
" - The internal production cluster had a power consumption cap of 350W per GPU.\n",
"\n",
"This setup allowed for a comparison of the suitability of these different types of interconnects for large-scale training.\n"
]
}
],
"source": [
"response = chat_engine.chat(\"What training hardware was used for Llama2?\")\n",
"print(str(response))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The main idea of Llama 2 is to provide an updated and improved version of the original Llama model, designed to be more efficient, scalable, and safe for various applications, including research and commercial use. Here are the key aspects of Llama 2:\n",
"\n",
"1. **Enhanced Pretraining**: Llama 2 is trained on a new mix of publicly available data, with a 40% increase in the size of the pretraining corpus compared to Llama 1. This aims to improve the model's performance and knowledge base.\n",
"\n",
"2. **Improved Architecture**: The model incorporates several architectural enhancements, such as increased context length and grouped-query attention (GQA), to improve inference scalability and overall performance.\n",
"\n",
"3. **Safety and Responsiveness**: Llama 2-Chat, a fine-tuned version of Llama 2, is optimized for dialogue use cases. It undergoes supervised fine-tuning and iterative refinement using Reinforcement Learning with Human Feedback (RLHF) to ensure safer and more helpful interactions.\n",
"\n",
"4. **Open Release**: Meta is releasing Llama 2 models with 7B, 13B, and 70B parameters to the general public for research and commercial use, promoting transparency and collaboration in the AI community.\n",
"\n",
"5. **Responsible Use**: The release includes guidelines and code examples to facilitate the safe deployment of Llama 2 and Llama 2-Chat, emphasizing the importance of safety testing and tuning tailored to specific applications.\n",
"\n",
"Overall, Llama 2 aims to be a more robust, scalable, and safer large language model that can be widely used and further developed by the AI community.\n"
]
}
],
"source": [
"response = chat_engine.chat(\"What is the main idea of Llama2?\")\n",
"print(str(response))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Llama 2 was evaluated and compared against several other models, both open-source and closed-source, across a variety of benchmarks. Here are the key comparisons:\n",
"\n",
"### Open-Source Models:\n",
"1. **Llama 1**: Llama 2 models were compared to their predecessors, Llama 1 models. For example, Llama 2 70B showed improvements of approximately 5 points on MMLU and 8 points on BBH compared to Llama 1 65B.\n",
"2. **MPT Models**: Llama 2 7B and 30B models outperformed MPT models of corresponding sizes in all categories except code benchmarks.\n",
"3. **Falcon Models**: Llama 2 7B and 34B models outperformed Falcon 7B and 40B models across all benchmark categories.\n",
"\n",
"### Closed-Source Models:\n",
"1. **GPT-3.5**: Llama 2 70B was compared to GPT-3.5, showing close performance on MMLU and GSM8K but a significant gap on coding benchmarks.\n",
"2. **PaLM (540B)**: Llama 2 70B performed on par or better than PaLM (540B) on almost all benchmarks.\n",
"3. **GPT-4 and PaLM-2-L**: There remains a large performance gap between Llama 2 70B and these more advanced models.\n",
"\n",
"### Benchmarks:\n",
"Llama 2 was evaluated on a variety of benchmarks, including:\n",
"1. **MMLU (Massive Multitask Language Understanding)**: Evaluated in a 5-shot setting.\n",
"2. **BBH (Big Bench Hard)**: Evaluated in a 3-shot setting.\n",
"3. **AGI Eval**: Evaluated in 3-5 shot settings, focusing on English tasks.\n",
"4. **GSM8K**: For math problem-solving.\n",
"5. **Human-Eval and MBPP**: For code generation.\n",
"6. **NaturalQuestions and TriviaQA**: For world knowledge.\n",
"7. **SQUAD and QUAC**: For reading comprehension.\n",
"8. **BoolQ, PIQA, SIQA, Hella-Swag, ARC-e, ARC-c, NQ, TQA**: Various other benchmarks for different aspects of language understanding and reasoning.\n",
"\n",
"These evaluations demonstrate that Llama 2 models generally outperform their predecessors and other open-source models, while also being competitive with some of the leading closed-source models.\n"
]
}
],
"source": [
"response = chat_engine.chat(\"What was Llama2 evaluated and compared against?\")\n",
"print(str(response))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Loading from existing store\n",
"\n",
"With your vector index created, we can easily connect back to it!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import qdrant_client\n",
"from llama_index.core import VectorStoreIndex\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
"\n",
"client = qdrant_client.QdrantClient(\"http://localhost:6333\")\n",
"aclient = qdrant_client.AsyncQdrantClient(\"http://localhost:6333\")\n",
"\n",
"# delete collection if it exists\n",
"if client.collection_exists(\"llama2_bm42\"):\n",
" client.delete_collection(\"llama2_bm42\")\n",
"\n",
"vector_store = QdrantVectorStore(\n", | |
165184 | "chroma_client = chromadb.EphemeralClient()\n",
"chroma_collection = chroma_client.create_collection(\"quickstart\")\n",
"\n",
"# define embedding function\n",
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-base-en-v1.5\")\n",
"\n",
"# load documents\n",
"documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()\n",
"\n",
"# set up ChromaVectorStore and load in data\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context, embed_model=embed_model\n",
")\n",
"\n",
"# Query Data\n",
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"What did the author do growing up?\")\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "349de571",
"metadata": {},
"source": [
"## Basic Example (including saving to disk)\n",
"\n",
"Extending the previous example, if you want to save to disk, simply initialize the Chroma client and pass the directory where you want the data to be saved to. \n",
"\n",
"`Caution`: Chroma makes a best-effort to automatically save data to disk, however multiple in-memory clients can stomp each other's work. As a best practice, only have one client per path running at any given time."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9c3a56a5",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"<b>The author worked on writing and programming growing up. They wrote short stories and tried writing programs on an IBM 1401 computer. Later, they got a microcomputer and started programming games and a word processor.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# save to disk\n",
"\n",
"db = chromadb.PersistentClient(path=\"./chroma_db\")\n",
"chroma_collection = db.get_or_create_collection(\"quickstart\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context, embed_model=embed_model\n",
")\n",
"\n",
"# load from disk\n",
"db2 = chromadb.PersistentClient(path=\"./chroma_db\")\n",
"chroma_collection = db2.get_or_create_collection(\"quickstart\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"index = VectorStoreIndex.from_vector_store(\n",
" vector_store,\n",
" embed_model=embed_model,\n",
")\n",
"\n",
"# Query Data from the persisted index\n",
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"What did the author do growing up?\")\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "d596e475",
"metadata": {},
"source": [
"## Basic Example (using the Docker Container)\n",
"\n",
"You can also run the Chroma Server in a Docker container separately, create a Client to connect to it, and then pass that to LlamaIndex. \n",
"\n",
"Here is how to clone, build, and run the Docker Image:\n",
"```\n",
"git clone git@github.com:chroma-core/chroma.git\n",
"docker-compose up -d --build\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d6c9bd64",
"metadata": {},
"outputs": [],
"source": [
"# create the chroma client and add our data\n",
"import chromadb\n",
"\n",
"remote_db = chromadb.HttpClient()\n",
"chroma_collection = remote_db.get_or_create_collection(\"quickstart\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context, embed_model=embed_model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "88e10c26",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"<b>\n",
"Growing up, the author wrote short stories, programmed on an IBM 1401, and wrote programs on a TRS-80 microcomputer. He also took painting classes at Harvard and worked as a de facto studio assistant for a painter. He also tried to start a company to put art galleries online, and wrote software to build online stores.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Query Data from the Chroma Docker index\n",
"query_engine = index.as_query_engine()\n",
"response = query_engine.query(\"What did the author do growing up?\")\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "0a0e79f7",
"metadata": {},
"source": [
"## Update and Delete\n",
"\n",
"While building toward a real application, you want to go beyond adding data, and also update and delete data. \n",
"\n",
"Chroma has users provide `ids` to simplify the bookkeeping here. `ids` can be the name of the file, or a combined has like `filename_paragraphNumber`, etc.\n",
"\n",
"Here is a basic example showing how to do various operations:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d9411826",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [ | |
165194 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "1496f9de",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/MilvusIndexDemo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "0b692c73",
"metadata": {},
"source": [
"# Milvus Vector Store\n",
"\n",
"This guide demonstrates how to build a Retrieval-Augmented Generation (RAG) system using LlamaIndex and Milvus.\n",
"\n",
"The RAG system combines a retrieval system with a generative model to generate new text based on a given prompt. The system first retrieves relevant documents from a corpus using a vector similarity search engine like Milvus, and then uses a generative model to generate new text based on the retrieved documents.\n",
"\n",
"[Milvus](https://milvus.io/) is the world's most advanced open-source vector database, built to power embedding similarity search and AI applications.\n",
"\n",
"In this notebook we are going to show a quick demo of using the MilvusVectorStore."
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f81e2c81",
"metadata": {},
"source": [
"## Before you begin\n",
"\n",
"### Install dependencies"
]
},
{
"cell_type": "markdown",
"id": "0d0e46d8",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e0c18ca",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-vector-stores-milvus"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b80700a",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index"
]
},
{
"cell_type": "markdown",
"id": "eab0d1a3",
"metadata": {},
"source": [
"This notebook will use [Milvus Lite](https://milvus.io/docs/milvus_lite.md) requiring a higher version of pymilvus:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7661b098",
"metadata": {},
"outputs": [],
"source": [
"%pip install pymilvus>=2.4.2"
]
},
{
"cell_type": "markdown",
"id": "70cc8c56",
"metadata": {},
"source": [
"> If you are using Google Colab, to enable dependencies just installed, you may need to **restart the runtime** (click on the \"Runtime\" menu at the top of the screen, and select \"Restart session\" from the dropdown menu)."
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f9b97a89",
"metadata": {},
"source": [
"### Setup OpenAI\n",
"\n",
"Lets first begin by adding the openai api key. This will allow us to access chatgpt."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0c9f4d21-145a-401e-95ff-ccb259e8ef84",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"\n",
"openai.api_key = \"sk-***********\""
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "a3d4e638",
"metadata": {},
"source": [
"### Prepare data\n",
"\n",
"You can download sample data with the following commands:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2a2e24d1",
"metadata": {},
"outputs": [],
"source": [
"! mkdir -p 'data/'\n",
"! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n",
"! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "59ff935d",
"metadata": {},
"source": [
"## Getting Started\n",
"\n",
"### Generate our data\n",
"As a first example, lets generate a document from the file `paul_graham_essay.txt`. It is a single essay from Paul Graham titled `What I Worked On`. To generate the documents we will use the SimpleDirectoryReader."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "68cbd239-880e-41a3-98d8-dbb3fab55431",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Document ID: 95f25e4d-f270-4650-87ce-006d69d82033\n"
]
}
],
"source": [
"from llama_index.core import SimpleDirectoryReader\n",
"\n",
"# load documents\n",
"documents = SimpleDirectoryReader(\n",
" input_files=[\"./data/paul_graham_essay.txt\"]\n",
").load_data()\n",
"\n",
"print(\"Document ID:\", documents[0].doc_id)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "dd270925",
"metadata": {},
"source": [
"### Create an index across the data\n",
"\n",
"Now that we have a document, we can can create an index and insert the document.\n",
"\n",
"> Please note that **Milvus Lite** requires `pymilvus>=2.4.2`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba1558b3",
"metadata": {},
"outputs": [],
"source": [
"# Create an index over the documents\n",
"from llama_index.core import VectorStoreIndex, StorageContext\n",
"from llama_index.vector_stores.milvus import MilvusVectorStore\n",
"\n",
"\n",
"vector_store = MilvusVectorStore(\n",
" uri=\"./milvus_demo.db\", dim=1536, overwrite=True\n",
")\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"index = VectorStoreIndex.from_documents(\n",
" documents, storage_context=storage_context\n",
")"
]
},
{
"cell_type": "markdown",
"id": "a75a5773",
"metadata": {},
"source": [
"> For the parameters of `MilvusVectorStore`:\n",
"> - Setting the `uri` as a local file, e.g.`./milvus.db`, is the most convenient method, as it automatically utilizes [Milvus Lite](https://milvus.io/docs/milvus_lite.md) to store all data in this file.\n",
"> - If you have large scale of data, you can set up a more performant Milvus server on [docker or kubernetes](https://milvus.io/docs/quickstart.md). In this setup, please use the server uri, e.g.`http://localhost:19530`, as your `uri`.\n", | |
165197 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/vector_stores/AzureAISearchIndexDemo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure AI Search"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Example\n",
"\n",
"In this notebook, we take a Paul Graham essay, split it into chunks, embed it using an Azure OpenAI embedding model, load it into an Azure AI Search index, and then query it."
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index\n",
"!pip install wget\n",
"%pip install llama-index-vector-stores-azureaisearch\n",
"%pip install azure-search-documents==11.5.1\n",
"%llama-index-embeddings-azure-openai\n",
"%llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import sys\n",
"from azure.core.credentials import AzureKeyCredential\n",
"from azure.search.documents import SearchClient\n",
"from azure.search.documents.indexes import SearchIndexClient\n",
"from IPython.display import Markdown, display\n",
"from llama_index.core import (\n",
" SimpleDirectoryReader,\n",
" StorageContext,\n",
" VectorStoreIndex,\n",
")\n",
"from llama_index.core.settings import Settings\n",
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n",
"from llama_index.vector_stores.azureaisearch import AzureAISearchVectorStore\n",
"from llama_index.vector_stores.azureaisearch import (\n",
" IndexManagement,\n",
" MetadataIndexFieldType,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup Azure OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aoai_api_key = \"YOUR_AZURE_OPENAI_API_KEY\"\n",
"aoai_endpoint = \"YOUR_AZURE_OPENAI_ENDPOINT\"\n",
"aoai_api_version = \"2024-02-01\"\n",
"\n",
"llm = AzureOpenAI(\n",
" model=\"YOUR_AZURE_OPENAI_COMPLETION_MODEL_NAME\",\n",
" deployment_name=\"YOUR_AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME\",\n",
" api_key=aoai_api_key,\n",
" azure_endpoint=aoai_endpoint,\n",
" api_version=aoai_api_version,\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embed_model = AzureOpenAIEmbedding(\n",
" model=\"YOUR_AZURE_OPENAI_EMBEDDING_MODEL_NAME\",\n",
" deployment_name=\"YOUR_AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME\",\n",
" api_key=aoai_api_key,\n",
" azure_endpoint=aoai_endpoint,\n",
" api_version=aoai_api_version,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup Azure AI Search"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"search_service_api_key = \"YOUR-AZURE-SEARCH-SERVICE-ADMIN-KEY\"\n",
"search_service_endpoint = \"YOUR-AZURE-SEARCH-SERVICE-ENDPOINT\"\n",
"search_service_api_version = \"2024-07-01\"\n",
"credential = AzureKeyCredential(search_service_api_key)\n",
"\n",
"\n",
"# Index name to use\n",
"index_name = \"llamaindex-vector-demo\"\n",
"\n",
"# Use index client to demonstrate creating an index\n",
"index_client = SearchIndexClient(\n",
" endpoint=search_service_endpoint,\n",
" credential=credential,\n",
")\n",
"\n",
"# Use search client to demonstration using existing index\n",
"search_client = SearchClient(\n",
" endpoint=search_service_endpoint,\n",
" index_name=index_name,\n",
" credential=credential,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create Index (if it does not exist)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Demonstrates creating a vector index named \"llamaindex-vector-demo\" if one doesn't exist. The index has the following fields:\n",
"| Field Name | OData Type | \n",
"|------------|---------------------------| \n",
"| id | `Edm.String` | \n",
"| chunk | `Edm.String` | \n",
"| embedding | `Collection(Edm.Single)` | \n",
"| metadata | `Edm.String` | \n",
"| doc_id | `Edm.String` | \n",
"| author | `Edm.String` | \n",
"| theme | `Edm.String` | \n",
"| director | `Edm.String` | "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"metadata_fields = {\n",
" \"author\": \"author\",\n",
" \"theme\": (\"topic\", MetadataIndexFieldType.STRING),\n",
" \"director\": \"director\",\n",
"}\n",
"\n",
"vector_store = AzureAISearchVectorStore(\n",
" search_or_index_client=index_client,\n",
" filterable_metadata_field_keys=metadata_fields,\n",
" index_name=index_name,\n",
" index_management=IndexManagement.CREATE_IF_NOT_EXISTS,\n",
" id_field_key=\"id\",\n",
" chunk_field_key=\"chunk\",\n",
" embedding_field_key=\"embedding\",\n",
" embedding_dimensionality=1536,\n",
" metadata_string_field_key=\"metadata\",\n",
" doc_id_field_key=\"doc_id\",\n",
" language_analyzer=\"en.lucene\",\n",
" vector_algorithm_type=\"exhaustiveKnn\",\n",
" # compression_type=\"binary\" # Option to use \"scalar\" or \"binary\". NOTE: compression is only supported for HNSW\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Loading documents\n",
"Load the documents stored in the `data/paul_graham/` using the SimpleDirectoryReader"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Load documents\n",
"documents = SimpleDirectoryReader(\"../data/paul_graham/\").load_data()\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"Settings.llm = llm\n", | |
165270 | {
"cells": [
{
"cell_type": "markdown",
"id": "0d6aba5a-abde-4e41-8a54-ef7e0de9e8a3",
"metadata": {},
"source": [
"# Query Pipeline over Pandas DataFrames\n",
"\n",
"This is a simple example that builds a query pipeline that can perform structured operations over a Pandas DataFrame to satisfy a user query, using LLMs to infer the set of operations.\n",
"\n",
"This can be treated as the \"from-scratch\" version of our `PandasQueryEngine`.\n",
"\n",
"WARNING: This tool provides the LLM access to the `eval` function.\n",
"Arbitrary code execution is possible on the machine running this tool.\n",
"This tool is not recommended to be used in a production setting, and would\n",
"require heavy sandboxing or virtual machines."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "beea049c",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai llama-index-experimental"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e16e7f46-6a3f-4390-b149-51d49a255d54",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.query_pipeline import (\n",
" QueryPipeline as QP,\n",
" Link,\n",
" InputComponent,\n",
")\n",
"from llama_index.experimental.query_engine.pandas import (\n",
" PandasInstructionParser,\n",
")\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import PromptTemplate"
]
},
{
"cell_type": "markdown",
"id": "844f1db7-8eaa-4fb2-91f4-a363db66bd5b",
"metadata": {},
"source": [
"## Download Data\n",
"\n",
"Here we load the Titanic CSV dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "31c3d6f4-ec01-4992-8dbf-696b6feeda5f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-01-13 18:39:07-- https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/docs/examples/data/csv/titanic_train.csv\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 2606:50c0:8003::154, 2606:50c0:8001::154, 2606:50c0:8002::154, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|2606:50c0:8003::154|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 57726 (56K) [text/plain]\n",
"Saving to: ‘titanic_train.csv’\n",
"\n",
"titanic_train.csv 100%[===================>] 56.37K --.-KB/s in 0.007s \n",
"\n",
"2024-01-13 18:39:07 (7.93 MB/s) - ‘titanic_train.csv’ saved [57726/57726]\n",
"\n"
]
}
],
"source": [
"!wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b2f94d45-607c-4f04-a05c-b66ecedfe233",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"df = pd.read_csv(\"./titanic_train.csv\")"
]
},
{
"cell_type": "markdown",
"id": "b42ccfe1-fd13-4356-b5b1-c9eabac5f391",
"metadata": {},
"source": [
"## Define Modules\n",
"\n",
"Here we define the set of modules: \n",
"1. Pandas prompt to infer pandas instructions from user query\n",
"2. Pandas output parser to execute pandas instructions on dataframe, get back dataframe\n",
"3. Response synthesis prompt to synthesize a final response given the dataframe\n",
"4. LLM\n",
"\n",
"The pandas output parser specifically is designed to safely execute Python code. It includes a lot of safety checks that may be annoying to write from scratch. This includes only importing from a set of approved modules (e.g. no modules that would alter the file system like `os`), and also making sure that no private/dunder methods are being called."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d47ac723-e1fc-47c2-9736-19deb50d87ae",
"metadata": {},
"outputs": [],
"source": [
"instruction_str = (\n",
" \"1. Convert the query to executable Python code using Pandas.\\n\"\n",
" \"2. The final line of code should be a Python expression that can be called with the `eval()` function.\\n\"\n",
" \"3. The code should represent a solution to the query.\\n\"\n",
" \"4. PRINT ONLY THE EXPRESSION.\\n\"\n",
" \"5. Do not quote the expression.\\n\"\n",
")\n",
"\n",
"pandas_prompt_str = (\n",
" \"You are working with a pandas dataframe in Python.\\n\"\n",
" \"The name of the dataframe is `df`.\\n\"\n",
" \"This is the result of `print(df.head())`:\\n\"\n",
" \"{df_str}\\n\\n\"\n",
" \"Follow these instructions:\\n\"\n",
" \"{instruction_str}\\n\"\n",
" \"Query: {query_str}\\n\\n\"\n",
" \"Expression:\"\n",
")\n",
"response_synthesis_prompt_str = (\n",
" \"Given an input question, synthesize a response from the query results.\\n\"\n",
" \"Query: {query_str}\\n\\n\"\n",
" \"Pandas Instructions (optional):\\n{pandas_instructions}\\n\\n\"\n",
" \"Pandas Output: {pandas_output}\\n\\n\"\n",
" \"Response: \"\n",
")\n",
"\n",
"pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(\n",
" instruction_str=instruction_str, df_str=df.head(5)\n",
")\n",
"pandas_output_parser = PandasInstructionParser(df)\n",
"response_synthesis_prompt = PromptTemplate(response_synthesis_prompt_str)\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")"
]
},
{
"cell_type": "markdown",
"id": "1b7907f6-2926-4c83-9541-d089d0b0af88",
"metadata": {},
"source": [
"## Build Query Pipeline\n",
"\n",
"Looks like this:\n",
"input query_str -> pandas_prompt -> llm1 -> pandas_output_parser -> response_synthesis_prompt -> llm2\n",
"\n",
"Additional connections to response_synthesis_prompt: llm1 -> pandas_instructions, and pandas_output_parser -> pandas_output."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3544a6a8-3866-4806-8108-d908bd5d17e6",
"metadata": {},
"outputs": [],
"source": [
"qp = QP(\n",
" modules={\n",
" \"input\": InputComponent(),\n",
" \"pandas_prompt\": pandas_prompt,\n",
" \"llm1\": llm,\n",
" \"pandas_output_parser\": pandas_output_parser,\n",
" \"response_synthesis_prompt\": response_synthesis_prompt,\n", | |
165280 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Query Pipeline Chat Engine\n",
"\n",
"By combining a query pipeline with a memory buffer, we can design our own custom chat engine loop."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-core\n",
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-embeddings-openai\n",
"%pip install llama-index-postprocessor-colbert-rerank\n",
"%pip install llama-index-readers-web"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Index Construction\n",
"\n",
"As a test, we will index Anthropic's latest documentation about tool/function calling."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.readers.web import BeautifulSoupWebReader\n",
"\n",
"reader = BeautifulSoupWebReader()\n",
"\n",
"documents = reader.load_data(\n",
" [\"https://docs.anthropic.com/claude/docs/tool-use\"]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you inspected the document text, you'd notice that there are way too many blank lines, lets clean that up a bit."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"lines = documents[0].text.split(\"\\n\")\n",
"\n",
"# remove sections with more than two empty lines in a row\n",
"fixed_lines = [lines[0]]\n",
"for idx in range(1, len(lines)):\n",
" if lines[idx].strip() == \"\" and lines[idx - 1].strip() == \"\":\n",
" continue\n",
" fixed_lines.append(lines[idx])\n",
"\n",
"documents[0].text = \"\\n\".join(fixed_lines)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, we can create our index using OpenAI embeddings."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"index = VectorStoreIndex.from_documents(\n",
" documents,\n",
" embed_model=OpenAIEmbedding(\n",
" model=\"text-embedding-3-large\", embed_batch_size=256\n",
" ),\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Query Pipeline Contruction\n",
"\n",
"As a demonstration, lets make a robust query pipeline with HyDE for retrieval and Colbert for reranking."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.query_pipeline import (\n",
" QueryPipeline,\n",
" InputComponent,\n",
" ArgPackComponent,\n",
")\n",
"from llama_index.core.prompts import PromptTemplate\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.postprocessor.colbert_rerank import ColbertRerank\n",
"\n",
"# First, we create an input component to capture the user query\n",
"input_component = InputComponent()\n",
"\n",
"# Next, we use the LLM to rewrite a user query\n",
"rewrite = (\n",
" \"Please write a query to a semantic search engine using the current conversation.\\n\"\n",
" \"\\n\"\n",
" \"\\n\"\n",
" \"{chat_history_str}\"\n",
" \"\\n\"\n",
" \"\\n\"\n",
" \"Latest message: {query_str}\\n\"\n",
" 'Query:\"\"\"\\n'\n",
")\n",
"rewrite_template = PromptTemplate(rewrite)\n",
"llm = OpenAI(\n",
" model=\"gpt-4-turbo-preview\",\n",
" temperature=0.2,\n",
")\n",
"\n",
"# we will retrieve two times, so we need to pack the retrieved nodes into a single list\n",
"argpack_component = ArgPackComponent()\n",
"\n",
"# using that, we will retrieve...\n",
"retriever = index.as_retriever(similarity_top_k=6)\n",
"\n",
"# then postprocess/rerank with Colbert\n",
"reranker = ColbertRerank(top_n=3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For generating a response using chat history + retrieved nodes, lets create a custom component."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# then lastly, we need to create a response using the nodes AND chat history\n",
"from typing import Any, Dict, List, Optional\n",
"from llama_index.core.bridge.pydantic import Field\n",
"from llama_index.core.llms import ChatMessage\n",
"from llama_index.core.query_pipeline import CustomQueryComponent\n",
"from llama_index.core.schema import NodeWithScore\n",
"\n",
"DEFAULT_CONTEXT_PROMPT = (\n",
" \"Here is some context that may be relevant:\\n\"\n",
" \"-----\\n\"\n",
" \"{node_context}\\n\"\n",
" \"-----\\n\"\n",
" \"Please write a response to the following question, using the above context:\\n\"\n",
" \"{query_str}\\n\"\n",
")\n",
"\n",
"\n",
"class ResponseWithChatHistory(CustomQueryComponent):\n",
" llm: OpenAI = Field(..., description=\"OpenAI LLM\")\n",
" system_prompt: Optional[str] = Field(\n",
" default=None, description=\"System prompt to use for the LLM\"\n",
" )\n",
" context_prompt: str = Field(\n",
" default=DEFAULT_CONTEXT_PROMPT,\n",
" description=\"Context prompt to use for the LLM\",\n",
" )\n",
"\n",
" def _validate_component_inputs(\n",
" self, input: Dict[str, Any]\n",
" ) -> Dict[str, Any]:\n",
" \"\"\"Validate component inputs during run_component.\"\"\"\n",
" # NOTE: this is OPTIONAL but we show you where to do validation as an example\n",
" return input\n",
"\n",
" @property\n",
" def _input_keys(self) -> set:\n",
" \"\"\"Input keys dict.\"\"\"\n",
" # NOTE: These are required inputs. If you have optional inputs please override\n",
" # `optional_input_keys_dict`\n",
" return {\"chat_history\", \"nodes\", \"query_str\"}\n",
"\n",
" @property\n",
" def _output_keys(self) -> set:\n",
" return {\"response\"}\n",
"\n",
" def _prepare_context(\n",
" self,\n",
" chat_history: List[ChatMessage],\n",
" nodes: List[NodeWithScore],\n",
" query_str: str,\n",
" ) -> List[ChatMessage]:\n",
" node_context = \"\"\n",
" for idx, node in enumerate(nodes):\n",
" node_text = node.get_content(metadata_mode=\"llm\")\n",
" node_context += f\"Context Chunk {idx}:\\n{node_text}\\n\\n\"\n",
"\n",
" formatted_context = self.context_prompt.format(\n", | |
165283 | {
"cells": [
{
"cell_type": "markdown",
"id": "cd032bcb-fefb-48ec-94da-08d49ac26120",
"metadata": {},
"source": [
"# Query Pipeline with Async/Parallel Execution\n",
"\n",
"Here we showcase our query pipeline with async + parallel execution.\n",
"\n",
"We do this by setting up a RAG pipeline that does the following:\n",
"1. Send query to multiple RAG query engines.\n",
"2. Combine results.\n",
"\n",
"In the process we'll also show some nice abstractions for joining results (e.g. our `ArgPackComponent()`)"
]
},
{
"cell_type": "markdown",
"id": "3531eedc-4f65-457e-8844-55fcc1773154",
"metadata": {},
"source": [
"## Load Data\n",
"\n",
"Load in the Paul Graham essay as an example."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "462a4f6f",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2a441905-9007-44d6-b71a-6fc3e5023e49",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-01-10 12:31:00-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.110.133, 185.199.108.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: ‘pg_essay.txt’\n",
"\n",
"pg_essay.txt 100%[===================>] 73.28K --.-KB/s in 0.01s \n",
"\n",
"2024-01-10 12:31:00 (6.32 MB/s) - ‘pg_essay.txt’ saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3533149c-4312-4444-9b45-52afe21731ed",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import SimpleDirectoryReader\n",
"\n",
"reader = SimpleDirectoryReader(input_files=[\"pg_essay.txt\"])\n",
"documents = reader.load_data()"
]
},
{
"cell_type": "markdown",
"id": "6c1d5ff8-ae04-4ea3-bbe0-2c097af71efd",
"metadata": {},
"source": [
"## Setup Query Pipeline\n",
"\n",
"We setup a parallel query pipeline that executes multiple chunk sizes at once, and combines the results."
]
},
{
"cell_type": "markdown",
"id": "63caf998-0a88-4c50-b6a4-2a0c412bde5b",
"metadata": {},
"source": [
"### Define Modules\n",
"\n",
"This includes:\n",
"- LLM\n",
"- Chunk Sizes\n",
"- Query Engines"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01fcbdb2-6747-4e65-b1ce-5d40febccb81",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.query_pipeline import (\n",
" QueryPipeline,\n",
" InputComponent,\n",
" ArgPackComponent,\n",
")\n",
"from typing import Dict, Any, List, Optional\n",
"from llama_index.core.llama_pack import BaseLlamaPack\n",
"from llama_index.core.llms import LLM\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Document, VectorStoreIndex\n",
"from llama_index.core.response_synthesizers import TreeSummarize\n",
"from llama_index.core.schema import NodeWithScore, TextNode\n",
"from llama_index.core.node_parser import SentenceSplitter\n",
"\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"chunk_sizes = [128, 256, 512, 1024]\n",
"query_engines = {}\n",
"for chunk_size in chunk_sizes:\n",
" splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=0)\n",
" nodes = splitter.get_nodes_from_documents(documents)\n",
" vector_index = VectorStoreIndex(nodes)\n",
" query_engines[str(chunk_size)] = vector_index.as_query_engine(llm=llm)"
]
},
{
"cell_type": "markdown",
"id": "7a87a439-88e6-4130-b28f-45268330d3e4",
"metadata": {},
"source": [
"### Construct Query Pipeline\n",
"\n",
"Connect input to multiple query engines, and join the results."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ff95be2e-517f-4632-a7b8-a2e0dec11d73",
"metadata": {},
"outputs": [],
"source": [
"# construct query pipeline\n",
"p = QueryPipeline(verbose=True)\n",
"module_dict = {\n",
" **query_engines,\n",
" \"input\": InputComponent(),\n",
" \"summarizer\": TreeSummarize(),\n",
" \"join\": ArgPackComponent(\n",
" convert_fn=lambda x: NodeWithScore(node=TextNode(text=str(x)))\n",
" ),\n",
"}\n",
"p.add_modules(module_dict)\n",
"# add links from input to query engine (id'ed by chunk_size)\n",
"for chunk_size in chunk_sizes:\n",
" p.add_link(\"input\", str(chunk_size))\n",
" p.add_link(str(chunk_size), \"join\", dest_key=str(chunk_size))\n",
"p.add_link(\"join\", \"summarizer\", dest_key=\"nodes\")\n",
"p.add_link(\"input\", \"summarizer\", dest_key=\"query_str\")"
]
},
{
"cell_type": "markdown",
"id": "bda05274-09c5-4b56-b2ba-57f445346e73",
"metadata": {},
"source": [
"## Try out Queries\n",
"\n",
"Let's compare the async performance vs. synchronous performance!\n",
"\n",
"In our experiments we get a 2x speedup."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d3e161ce-ef10-446f-acfb-f6d3a1d291bb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1;3;38;2;155;135;227m> Running modules and inputs in parallel: \n",
"Module key: input. Input: \n",
"input: What did the author do during his time in YC?\n",
"\n",
"\n",
"\u001b[0m\u001b[1;3;38;2;155;135;227m> Running modules and inputs in parallel: \n",
"Module key: 128. Input: \n", | |
165291 | },
{
"cell_type": "code",
"execution_count": null,
"id": "de0e3aa5",
"metadata": {},
"outputs": [],
"source": [
"db = chromadb.PersistentClient(path=\"./chroma_db\")\n",
"chroma_collection = db.get_or_create_collection(\"dense_vectors\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"\n",
"docstore = SimpleDocumentStore.from_persist_path(\"./docstore.json\")\n",
"\n",
"storage_context = StorageContext.from_defaults(\n",
" docstore=docstore, vector_store=vector_store\n",
")\n",
"\n",
"index = VectorStoreIndex(nodes=[], storage_context=storage_context)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llama-index-caVs7DDe-py3.11",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 5
} | |
165314 | "Doc 8 (node score, doc similarity, full similarity): (0.5672766061523489, 0.24827341793941335, 0.4077750120458811)\n",
"Doc 9 (node score, doc similarity, full similarity): (0.5671131641337652, 0.24827341793941335, 0.4076932910365893)\n",
"The LLM interface is a unified interface provided by LlamaIndex for defining Large Language Models (LLMs) from different sources such as OpenAI, Hugging Face, or LangChain. This interface eliminates the need to write the boilerplate code for defining the LLM interface yourself. The LLM interface supports text completion and chat endpoints, as well as streaming and non-streaming endpoints. It also supports both synchronous and asynchronous endpoints.\n",
"\n",
"LLMs are a core component of LlamaIndex and can be used as standalone modules or plugged into other core LlamaIndex modules such as indices, retrievers, and query engines. They are primarily used during the response synthesis step, which occurs after retrieval. Depending on the type of index being used, LLMs may also be used during index construction, insertion, and query traversal.\n",
"\n",
"To use LLMs, you can import the necessary modules and instantiate the LLM object. You can then use the LLM object to generate responses or complete text prompts. LlamaIndex provides examples and code snippets to help you get started with using LLMs.\n",
"\n",
"It's important to note that tokenization plays a crucial role in LLMs. LlamaIndex uses a global tokenizer by default, but if you change the LLM, you may need to update the tokenizer to ensure accurate token counts, chunking, and prompting. LlamaIndex provides instructions on how to set a global tokenizer using libraries like tiktoken or Hugging Face's AutoTokenizer.\n",
"\n",
"Overall, LLMs are powerful tools for building LlamaIndex applications and can be customized within the LlamaIndex abstractions. While LLMs from paid APIs like OpenAI and Anthropic are generally considered more reliable, local open-source models are gaining popularity due to their customizability and transparency. LlamaIndex offers integrations with various LLMs and provides documentation on their compatibility and performance. Contributions to improve the setup and performance of existing LLMs or to add new LLMs are welcome.\n"
]
}
],
"source": [
"response = query_engine.query(query_str)\n",
"print(str(response))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The LLM interface is a unified interface provided by LlamaIndex for defining Large Language Model (LLM) modules. It allows users to easily integrate LLMs from different providers such as OpenAI, Hugging Face, or LangChain into their applications without having to write the boilerplate code for defining the LLM interface themselves.\n",
"\n",
"LLMs are a core component of LlamaIndex and can be used as standalone modules or plugged into other core LlamaIndex modules such as indices, retrievers, and query engines. They are primarily used during the response synthesis step, which occurs after retrieval. Depending on the type of index being used, LLMs may also be used during index construction, insertion, and query traversal.\n",
"\n",
"The LLM interface supports various functionalities, including text completion and chat endpoints. It also provides support for streaming and non-streaming endpoints, as well as synchronous and asynchronous endpoints.\n",
"\n",
"To use LLMs, you can import the necessary modules and make use of the provided functions. For example, you can use the OpenAI module to interact with the gpt-3.5-turbo LLM by calling the `OpenAI()` function. You can then use the `complete()` function to generate completions based on a given prompt.\n",
"\n",
"It's important to note that LlamaIndex uses a global tokenizer called cl100k from tiktoken by default for all token counting. If you change the LLM being used, you may need to update the tokenizer to ensure accurate token counts, chunking, and prompting.\n",
"\n",
"Overall, LLMs and the LLM interface provided by LlamaIndex are essential for building LLM applications and integrating them into the LlamaIndex ecosystem.\n"
]
}
],
"source": [
"base_response = base_query_engine.query(query_str)\n",
"print(str(base_response))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llama_index_v2",
"language": "python",
"name": "llama_index_v2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | |
165535 | {
"cells": [
{
"cell_type": "markdown",
"id": "6340e329",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/llm/azure_openai.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "8d09c269-526b-4858-979d-d77285c25260",
"metadata": {},
"source": [
"# Azure OpenAI"
]
},
{
"cell_type": "markdown",
"id": "d981ca8e",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "24e9114d",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02fcb2e6",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "markdown",
"id": "5386685e-fa9e-46b4-8bd9-2cdec2d9903e",
"metadata": {},
"source": [
"## Prerequisites"
]
},
{
"cell_type": "markdown",
"id": "5a3491e8-11e8-4548-b6b9-d08246c7ef9b",
"metadata": {},
"source": [
"1. Setup an Azure subscription - you can create one for free [here](https://azure.microsoft.com/en-us/free/cognitive-services/)\n",
"2. Apply for access to Azure OpenAI Service [here](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUOFA5Qk1UWDRBMjg0WFhPMkIzTzhKQ1dWNyQlQCN0PWcu) \n",
"3. Create a resource in the Azure portal [here](https://portal.azure.com/?microsoft_azure_marketplace_ItemHideKey=microsoft_openai_tip#create/Microsoft.CognitiveServicesOpenAI)\n",
"4. Deploy a model in Azure OpenAI Studio [here](https://oai.azure.com/)\n",
"\n",
"\n",
"You can find more details in [this guide.](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal)\n",
"\n",
"Note down the **\"model name\"** and **\"deployment name\"**, you'll need it when connecting to your LLM."
]
},
{
"cell_type": "markdown",
"id": "14050fdc-890d-464c-89ff-9f444672de1d",
"metadata": {},
"source": [
"## Environment Setup"
]
},
{
"cell_type": "markdown",
"id": "3635cdc5-d27c-48c1-9041-ccb91d239956",
"metadata": {},
"source": [
"### Find your setup information - API base, API key, deployment name (i.e. engine), etc"
]
},
{
"cell_type": "markdown",
"id": "f7b24c58-b8fe-4687-bfa7-a659c2a86d93",
"metadata": {},
"source": [
"To find the setup information necessary, do the following setups:\n",
"1. Go to the Azure OpenAI Studio [here](https://oai.azure.com/)\n",
"2. Go to the chat or completions playground (depending on which LLM you are setting up)\n",
"3. Click \"view code\" (shown in image below)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aa6cd770-8b22-425d-b5cc-af2613a71c10",
"metadata": {},
"outputs": [
{
"data": {
"",
"text/plain": [
"<IPython.core.display.Image object>"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from IPython.display import Image\n",
"\n",
"Image(filename=\"./azure_playground.png\")"
]
},
{
"cell_type": "markdown",
"id": "271c6132-dc8f-4c5e-a440-471c81bb0302",
"metadata": {},
"source": [
"4. Note down the `api_type`, `api_base`, `api_version`, `engine` (this should be the same as the \"deployment name\" from before), and the `key`"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "434775cb-2ca0-408c-b6f9-a08b3f0c63b9",
"metadata": {},
"outputs": [
{
"data": {
"",
"text/plain": [
"<IPython.core.display.Image object>"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from IPython.display import Image\n",
"\n",
"Image(filename=\"./azure_env.png\")"
]
},
{
"cell_type": "markdown",
"id": "7a5bf13f-4d64-446c-bb0e-fdf3e03d22d4",
"metadata": {},
"source": [
"### Configure environment variables"
]
},
{
"cell_type": "markdown",
"id": "ae5dd044-6c03-4854-b643-84e79a1c05f2",
"metadata": {},
"source": [
"Using Azure deployment of OpenAI models is very similar to normal OpenAI. \n",
"You just need to configure a couple more environment variables.\n",
"\n",
"- `OPENAI_API_VERSION`: set this to `2023-07-01-preview`\n",
" This may change in the future.\n",
"- `AZURE_OPENAI_ENDPOINT`: your endpoint should look like the following\n",
" https://YOUR_RESOURCE_NAME.openai.azure.com/\n",
"- `OPENAI_API_KEY`: your API key"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "82233222-5a91-473b-b968-10bf8b7105e7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"<your-api-key>\"\n",
"os.environ[\n",
" \"AZURE_OPENAI_ENDPOINT\"\n",
"] = \"https://<your-resource-name>.openai.azure.com/\"\n",
"os.environ[\"OPENAI_API_VERSION\"] = \"2023-07-01-preview\""
]
},
{
"cell_type": "markdown",
"id": "a593031b-c872-4360-8775-dff4844ccead",
"metadata": {},
"source": [
"## Use your LLM"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd389e2c-a3d5-4b47-acbe-b22b3da17670",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.azure_openai import AzureOpenAI"
]
},
{
"cell_type": "markdown",
"id": "ae049806-09b3-46fe-b589-2ae2f33beda9",
"metadata": {},
"source": [
"Unlike normal `OpenAI`, you need to pass a `engine` argument in addition to `model`. The `engine` is the name of your model deployment you selected in Azure OpenAI Studio. See previous section on \"find your setup information\" for more details."
]
},
{ | |
165605 | {
"cells": [
{
"cell_type": "markdown",
"id": "3ac9adb4",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/llm/llama_2_llama_cpp.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "368686b4-f487-4dd4-aeff-37823976529d",
"metadata": {},
"source": [
"# LlamaCPP \n",
"\n",
"In this short notebook, we show how to use the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) library with LlamaIndex.\n",
"\n",
"In this notebook, we use the [`llama-2-chat-13b-ggml`](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML) model, along with the proper prompt formatting. \n",
"\n",
"Note that if you're using a version of `llama-cpp-python` after version `0.1.79`, the model format has changed from `ggmlv3` to `gguf`. Old model files like the used in this notebook can be converted using scripts in the [`llama.cpp`](https://github.com/ggerganov/llama.cpp) repo. Alternatively, you can download the GGUF version of the model above from [huggingface](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF).\n",
"\n",
"By default, if model_path and model_url are blank, the `LlamaCPP` module will load llama2-chat-13B in either format depending on your version.\n",
"\n",
"## Installation\n",
"\n",
"To get the best performance out of `LlamaCPP`, it is recomended to install the package so that it is compilied with GPU support. A full guide for installing this way is [here](https://github.com/abetlen/llama-cpp-python#installation-with-openblas--cublas--clblast--metal).\n",
"\n",
"Full MACOS instructions are also [here](https://llama-cpp-python.readthedocs.io/en/latest/install/macos/).\n",
"\n",
"In general:\n",
"- Use `CuBLAS` if you have CUDA and an NVidia GPU\n",
"- Use `METAL` if you are running on an M1/M2 MacBook\n",
"- Use `CLBLAST` if you are running on an AMD/Intel GPU"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aff273be",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-huggingface\n",
"%pip install llama-index-llms-llama-cpp"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40a33749",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n",
"from llama_index.llms.llama_cpp import LlamaCPP\n",
"from llama_index.llms.llama_cpp.llama_utils import (\n",
" messages_to_prompt,\n",
" completion_to_prompt,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "e7927630-0044-41fb-a8a6-8dc3d2adb608",
"metadata": {},
"source": [
"## Setup LLM\n",
"\n",
"The LlamaCPP llm is highly configurable. Depending on the model being used, you'll want to pass in `messages_to_prompt` and `completion_to_prompt` functions to help format the model inputs.\n",
"\n",
"Since the default model is llama2-chat, we use the util functions found in [`llama_index.llms.llama_utils`](https://github.com/jerryjliu/llama_index/blob/main/llama_index/llms/llama_utils.py).\n",
"\n",
"For any kwargs that need to be passed in during initialization, set them in `model_kwargs`. A full list of available model kwargs is available in the [LlamaCPP docs](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.llama.Llama.__init__).\n",
"\n",
"For any kwargs that need to be passed in during inference, you can set them in `generate_kwargs`. See the full list of [generate kwargs here](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.llama.Llama.__call__).\n",
"\n",
"In general, the defaults are a great starting point. The example below shows configuration with all defaults.\n",
"\n",
"As noted above, we're using the [`llama-2-chat-13b-ggml`](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML) model in this notebook which uses the `ggmlv3` model format. If you are running a version of `llama-cpp-python` greater than `0.1.79`, you can replace the `model_url` below with `\"https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf\"`."
]
},
{
"cell_type": "markdown",
"id": "59b27895",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "439960c5",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2640c7a4",
"metadata": {},
"outputs": [],
"source": [
"model_url = \"https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fa0ec4f-03ff-4e28-957f-b4b99a0faa20",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"llama.cpp: loading model from /Users/rchan/Library/Caches/llama_index/models/llama-2-13b-chat.ggmlv3.q4_0.bin\n",
"llama_model_load_internal: format = ggjt v3 (latest)\n",
"llama_model_load_internal: n_vocab = 32000\n",
"llama_model_load_internal: n_ctx = 3900\n",
"llama_model_load_internal: n_embd = 5120\n",
"llama_model_load_internal: n_mult = 256\n",
"llama_model_load_internal: n_head = 40\n",
"llama_model_load_internal: n_head_kv = 40\n",
"llama_model_load_internal: n_layer = 40\n",
"llama_model_load_internal: n_rot = 128\n",
"llama_model_load_internal: n_gqa = 1\n",
"llama_model_load_internal: rnorm_eps = 5.0e-06\n",
"llama_model_load_internal: n_ff = 13824\n",
"llama_model_load_internal: freq_base = 10000.0\n",
"llama_model_load_internal: freq_scale = 1\n",
"llama_model_load_internal: ftype = 2 (mostly Q4_0)\n",
"llama_model_load_internal: model size = 13B\n",
"llama_model_load_internal: ggml ctx size = 0.11 MB\n",
"llama_model_load_internal: mem required = 6983.72 MB (+ 3046.88 MB per state)\n",
"llama_new_context_with_model: kv self size = 3046.88 MB\n",
"ggml_metal_init: allocating\n", | |
165672 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/embeddings/custom_embeddings.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Custom Embeddings\n",
"LlamaIndex supports embeddings from OpenAI, Azure, and Langchain. But if this isn't enough, you can also implement any embeddings model!\n",
"\n",
"The example below uses Instructor Embeddings ([install/setup details here](https://huggingface.co/hkunlp/instructor-large)), and implements a custom embeddings class. Instructor embeddings work by providing text, as well as \"instructions\" on the domain of the text to embed. This is helpful when embedding text from a very specific and specialized topic.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Install dependencies\n",
"# !pip install InstructorEmbedding torch transformers sentence-transformers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
"openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Custom Embeddings Implementation"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Any, List\n",
"from InstructorEmbedding import INSTRUCTOR\n",
"\n",
"from llama_index.core.bridge.pydantic import PrivateAttr\n",
"from llama_index.core.embeddings import BaseEmbedding\n",
"\n",
"\n",
"class InstructorEmbeddings(BaseEmbedding):\n",
" _model: INSTRUCTOR = PrivateAttr()\n",
" _instruction: str = PrivateAttr()\n",
"\n",
" def __init__(\n",
" self,\n",
" instructor_model_name: str = \"hkunlp/instructor-large\",\n",
" instruction: str = \"Represent a document for semantic search:\",\n",
" **kwargs: Any,\n",
" ) -> None:\n",
" super().__init__(**kwargs)\n",
" self._model = INSTRUCTOR(instructor_model_name)\n",
" self._instruction = instruction\n",
"\n",
" @classmethod\n",
" def class_name(cls) -> str:\n",
" return \"instructor\"\n",
"\n",
" async def _aget_query_embedding(self, query: str) -> List[float]:\n",
" return self._get_query_embedding(query)\n",
"\n",
" async def _aget_text_embedding(self, text: str) -> List[float]:\n",
" return self._get_text_embedding(text)\n",
"\n",
" def _get_query_embedding(self, query: str) -> List[float]:\n",
" embeddings = self._model.encode([[self._instruction, query]])\n",
" return embeddings[0]\n",
"\n",
" def _get_text_embedding(self, text: str) -> List[float]:\n",
" embeddings = self._model.encode([[self._instruction, text]])\n",
" return embeddings[0]\n",
"\n",
" def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:\n",
" embeddings = self._model.encode(\n",
" [[self._instruction, text] for text in texts]\n",
" )\n",
" return embeddings"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage Example"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n",
"from llama_index.core import Settings"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Load Documents"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"load INSTRUCTOR_Transformer\n",
"max_seq_length 512\n"
]
}
],
"source": [
"embed_model = InstructorEmbeddings(embed_batch_size=2)\n",
"\n",
"Settings.embed_model = embed_model\n",
"Settings.chunk_size = 512\n",
"\n",
"# if running for the first time, will download model weights first!\n",
"index = VectorStoreIndex.from_documents(documents)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The author wrote short stories and also worked on programming, specifically on an IBM 1401 computer in 9th grade. They used an early version of Fortran and had to type programs on punch cards. Later on, they got a microcomputer, a TRS-80, and started programming more extensively, writing simple games and a word processor. They initially planned to study philosophy in college but eventually switched to AI.\n"
]
}
],
"source": [
"response = index.as_query_engine().query(\"What did the author do growing up?\")\n",
"print(response)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | |
165675 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/embeddings/huggingface.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Local Embeddings with HuggingFace\n",
"\n",
"LlamaIndex has support for HuggingFace embedding models, including BGE, Instructor, and more.\n",
"\n",
"Furthermore, we provide utilities to create and use ONNX models using the [Optimum library](https://huggingface.co/docs/transformers/serialization#exporting-a-transformers-model-to-onnx-with-optimumonnxruntime) from HuggingFace."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## HuggingFaceEmbedding\n",
"\n",
"The base `HuggingFaceEmbedding` class is a generic wrapper around any HuggingFace model for embeddings. All [embedding models](https://huggingface.co/models?library=sentence-transformers) on Hugging Face should work. You can refer to the [embeddings leaderboard](https://huggingface.co/spaces/mteb/leaderboard) for more recommendations.\n",
"\n",
"This class depends on the sentence-transformers package, which you can install with `pip install sentence-transformers`.\n",
"\n",
"NOTE: if you were previously using a `HuggingFaceEmbeddings` from LangChain, this should give equivalent results."
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-huggingface\n",
"%pip install llama-index-embeddings-instructor"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/loganm/miniconda3/envs/llama-index/lib/python3.11/site-packages/torch/cuda/__init__.py:546: UserWarning: Can't initialize NVML\n",
" warnings.warn(\"Can't initialize NVML\")\n"
]
}
],
"source": [
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
"\n",
"# loads BAAI/bge-small-en\n",
"# embed_model = HuggingFaceEmbedding()\n",
"\n",
"# loads BAAI/bge-small-en-v1.5\n",
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello World!\n",
"384\n",
"[-0.030880315229296684, -0.11021008342504501, 0.3917851448059082, -0.35962796211242676, 0.22797748446464539]\n"
]
}
],
"source": [
"embeddings = embed_model.get_text_embedding(\"Hello World!\")\n",
"print(len(embeddings))\n",
"print(embeddings[:5])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## InstructorEmbedding\n",
"\n",
"Instructor Embeddings are a class of embeddings specifically trained to augment their embeddings according to an instruction. By default, queries are given `query_instruction=\"Represent the question for retrieving supporting documents: \"` and text is given `text_instruction=\"Represent the document for retrieval: \"`.\n",
"\n",
"They rely on the `Instructor` and `SentenceTransformers` (version 2.2.2) pip package, which you can install with `pip install InstructorEmbedding` and `pip install -U sentence-transformers==2.2.2`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/loganm/miniconda3/envs/llama-index/lib/python3.11/site-packages/InstructorEmbedding/instructor.py:7: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
" from tqdm.autonotebook import trange\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"load INSTRUCTOR_Transformer\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/loganm/miniconda3/envs/llama-index/lib/python3.11/site-packages/torch/cuda/__init__.py:546: UserWarning: Can't initialize NVML\n",
" warnings.warn(\"Can't initialize NVML\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"max_seq_length 512\n"
]
}
],
"source": [
"from llama_index.embeddings.instructor import InstructorEmbedding\n",
"\n",
"embed_model = InstructorEmbedding(model_name=\"hkunlp/instructor-base\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"768\n",
"[ 0.02155361 -0.06098218 0.01796207 0.05490903 0.01526906]\n"
]
}
],
"source": [
"embeddings = embed_model.get_text_embedding(\"Hello World!\")\n",
"print(len(embeddings))\n",
"print(embeddings[:5])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## OptimumEmbedding\n",
"\n",
"Optimum in a HuggingFace library for exporting and running HuggingFace models in the ONNX format.\n",
"\n",
"You can install the dependencies with `pip install transformers optimum[exporters]`.\n",
"\n",
"First, we need to create the ONNX model. ONNX models provide improved inference speeds, and can be used across platforms (i.e. in TransformersJS)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/loganm/miniconda3/envs/llama-index/lib/python3.11/site-packages/torch/cuda/__init__.py:546: UserWarning: Can't initialize NVML\n",
" warnings.warn(\"Can't initialize NVML\")\n",
"Framework not specified. Using pt to export to ONNX.\n",
"Using the export variant default. Available variants are:\n",
"\t- default: The default ONNX variant.\n",
"Using framework PyTorch: 2.0.1+cu117\n",
"Overriding 1 configuration item(s)\n",
"\t- use_cache -> False\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"============= Diagnostic Run torch.onnx.export version 2.0.1+cu117 =============\n",
"verbose: False, log level: Level.ERROR\n",
"======================= 0 NONE 0 NOTE 0 WARNING 0 ERROR ========================\n",
"\n", | |
165706 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# LangChain Embeddings\n",
"\n",
"This guide shows you how to use embedding models from [LangChain](https://python.langchain.com/docs/integrations/text_embedding/).\n",
"\n",
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/embeddings/Langchain.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-langchain"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.embeddings import HuggingFaceEmbeddings\n",
"from llama_index.embeddings.langchain import LangchainEmbedding\n",
"\n",
"lc_embed_model = HuggingFaceEmbeddings(\n",
" model_name=\"sentence-transformers/all-mpnet-base-v2\"\n",
")\n",
"embed_model = LangchainEmbedding(lc_embed_model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"768 [-0.005906202830374241, 0.04911914840340614, -0.04757878929376602, -0.04320324584841728, 0.02837090566754341, -0.017371710389852524, -0.04422023147344589, -0.019035547971725464, 0.04941621795296669, -0.03839121758937836]\n"
]
}
],
"source": [
"# Basic embedding example\n",
"embeddings = embed_model.get_text_embedding(\n",
" \"It is raining cats and dogs here!\"\n",
")\n",
"print(len(embeddings), embeddings[:10])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llama_index_v2",
"language": "python",
"name": "llama_index_v2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | |
165710 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/embeddings/OpenAI.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# OpenAI Embeddings"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core import Settings\n",
"\n",
"embed_model = OpenAIEmbedding(embed_batch_size=10)\n",
"Settings.embed_model = embed_model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using OpenAI `text-embedding-3-large` and `text-embedding-3-small`\n",
"\n",
"Note, you may have to update your openai client: `pip install -U openai`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# get API key and create embeddings\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"embed_model = OpenAIEmbedding(model=\"text-embedding-3-large\")\n",
"\n",
"embeddings = embed_model.get_text_embedding(\n",
" \"Open AI new Embeddings models is great.\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[-0.011500772088766098, 0.02457442320883274, -0.01760469563305378, -0.017763426527380943, 0.029841400682926178]\n"
]
}
],
"source": [
"print(embeddings[:5])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3072\n"
]
}
],
"source": [
"print(len(embeddings))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# get API key and create embeddings\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"embed_model = OpenAIEmbedding(\n",
" model=\"text-embedding-3-small\",\n",
")\n",
"\n",
"embeddings = embed_model.get_text_embedding(\n",
" \"Open AI new Embeddings models is awesome.\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1536\n"
]
}
],
"source": [
"print(len(embeddings))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Change the dimension of output embeddings\n",
"Note: Make sure you have the latest OpenAI client"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"512\n"
]
}
],
"source": [
"# get API key and create embeddings\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"\n",
"embed_model = OpenAIEmbedding(\n",
" model=\"text-embedding-3-large\",\n",
" dimensions=512,\n",
")\n",
"\n",
"embeddings = embed_model.get_text_embedding(\n",
" \"Open AI new Embeddings models with different dimensions is awesome.\"\n",
")\n",
"print(len(embeddings))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 4
} | |
165721 | {
"cells": [
{
"cell_type": "markdown",
"id": "c62c1447-0afb-4fad-8dc6-389949c3496e",
"metadata": {},
"source": [
"# Chat Summary Memory Buffer\n",
"In this demo, we use the new *ChatSummaryMemoryBuffer* to limit the chat history to a certain token length, and iteratively summarize all messages that do not fit in the memory buffer. This can be useful if you want to limit costs and latency (assuming the summarization prompt uses and generates fewer tokens than including the entire history). \n",
"\n",
"The original *ChatMemoryBuffer* gives you the option to truncate the history after a certain number of tokens, which is useful to limit costs and latency, but also removes potentially relevant information from the chat history. \n",
"\n",
"The newer *ChatSummaryMemoryBuffer* aims to makes this a bit more flexible, so the user has more control over which chat_history is retained."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c00a753b-df2c-4164-90c3-76b8a15f74c9",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai\n",
"%pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1f24186-b86e-4580-b7b4-072e719d424f",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "db66e3bc-9791-497b-9e7d-386765dccf74",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.memory import ChatSummaryMemoryBuffer\n",
"from llama_index.core.llms import ChatMessage, MessageRole\n",
"from llama_index.llms.openai import OpenAI as OpenAiLlm\n",
"import tiktoken"
]
},
{
"cell_type": "markdown",
"id": "68e26f76-f819-4e1a-bc47-f2ea855ee189",
"metadata": {},
"source": [
"First, we simulate some chat history that will not fit in the memory buffer in its entirety."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6402621a-4131-465c-b92d-1d9a8e7ee985",
"metadata": {},
"outputs": [],
"source": [
"chat_history = [\n",
" ChatMessage(role=\"user\", content=\"What is LlamaIndex?\"),\n",
" ChatMessage(\n",
" role=\"assistant\",\n",
" content=\"LlamaaIndex is the leading data framework for building LLM applications\",\n",
" ),\n",
" ChatMessage(role=\"user\", content=\"Can you give me some more details?\"),\n",
" ChatMessage(\n",
" role=\"assistant\",\n",
" content=\"\"\"LlamaIndex is a framework for building context-augmented LLM applications. Context augmentation refers to any use case that applies LLMs on top of your private or domain-specific data. Some popular use cases include the following: \n",
" Question-Answering Chatbots (commonly referred to as RAG systems, which stands for \"Retrieval-Augmented Generation\"), Document Understanding and Extraction, Autonomous Agents that can perform research and take actions\n",
" LlamaIndex provides the tools to build any of these above use cases from prototype to production. The tools allow you to both ingest/process this data and implement complex query workflows combining data access with LLM prompting.\"\"\",\n",
" ),\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "f057a791-9d8e-43e5-b40a-6675b28f6fd0",
"metadata": {},
"source": [
"By supplying an *llm* and *token_limit* for summarization, we create a *ChatSummaryMemoryBuffer* instance."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "67dbb654-4a66-43cf-9f6a-daead87a1084",
"metadata": {},
"outputs": [],
"source": [
"model = \"gpt-4-0125-preview\"\n",
"summarizer_llm = OpenAiLlm(model_name=model, max_tokens=256)\n",
"tokenizer_fn = tiktoken.encoding_for_model(model).encode\n",
"memory = ChatSummaryMemoryBuffer.from_defaults(\n",
" chat_history=chat_history,\n",
" llm=summarizer_llm,\n",
" token_limit=2,\n",
" tokenizer_fn=tokenizer_fn,\n",
")\n",
"\n",
"history = memory.get()"
]
},
{
"cell_type": "markdown",
"id": "4e4e333c-6c33-4e01-b1a8-750d21800076",
"metadata": {},
"source": [
"When printing the history, we can observe that older messages have been summarized."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0821eb89-4164-4a06-b66c-ea2632706e11",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ChatMessage(role=<MessageRole.SYSTEM: 'system'>, content='The user inquired about LlamaIndex, a leading data framework for developing LLM applications. The assistant explained that LlamaIndex is used for building context-augmented LLM applications, giving examples such as Question-Answering Chatbots, Document Understanding and Extraction, and Autonomous Agents. It was mentioned that LlamaIndex provides tools for ingesting and processing data, as well as implementing complex query workflows combining data access with LLM prompting.', additional_kwargs={})]\n"
]
}
],
"source": [
"print(history)"
]
},
{
"cell_type": "markdown",
"id": "fae3efe0-6889-49f7-9f21-6ced186f0609",
"metadata": {},
"source": [
"Let's add some new chat history."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ddb295c-c5c0-4faf-b0e5-a451d1d26d60",
"metadata": {},
"outputs": [],
"source": [
"new_chat_history = [\n",
" ChatMessage(role=\"user\", content=\"Why context augmentation?\"),\n",
" ChatMessage(\n",
" role=\"assistant\",\n",
" content=\"LLMs offer a natural language interface between humans and data. Widely available models come pre-trained on huge amounts of publicly available data. However, they are not trained on your data, which may be private or specific to the problem you're trying to solve. It's behind APIs, in SQL databases, or trapped in PDFs and slide decks. LlamaIndex provides tooling to enable context augmentation. A popular example is Retrieval-Augmented Generation (RAG) which combines context with LLMs at inference time. Another is finetuning.\",\n",
" ),\n",
" ChatMessage(role=\"user\", content=\"Who is LlamaIndex for?\"),\n",
" ChatMessage(\n",
" role=\"assistant\",\n",
" content=\"LlamaIndex provides tools for beginners, advanced users, and everyone in between. Our high-level API allows beginner users to use LlamaIndex to ingest and query their data in 5 lines of code. For more complex applications, our lower-level APIs allow advanced users to customize and extend any module—data connectors, indices, retrievers, query engines, reranking modules—to fit their needs.\",\n",
" ),\n",
"]\n",
"memory.put(new_chat_history[0])\n",
"memory.put(new_chat_history[1])\n",
"memory.put(new_chat_history[2])\n",
"memory.put(new_chat_history[3])\n",
"history = memory.get()"
]
},
{
"cell_type": "markdown", | |
165847 | {
"cells": [
{
"cell_type": "markdown",
"id": "7ae43f8b",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "9c48213d-6e6a-4c10-838a-2a7c710c3a05",
"metadata": {},
"source": [
"# ChatGPT"
]
},
{
"cell_type": "markdown",
"id": "cf6108b0",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "590874bc",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "14790912",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "690a6918-7c75-4f95-9ccc-d2c4a1fe00d7",
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"import sys\n",
"\n",
"logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.core import Settings\n",
"from llama_index.llms.openai import OpenAI\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "markdown",
"id": "9bc1c58d",
"metadata": {},
"source": [
"#### Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18c8d851",
"metadata": {},
"outputs": [],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "50d3b817-b70e-4667-be4f-d3a0fe4bd119",
"metadata": {},
"source": [
"#### Load documents, build the VectorStoreIndex"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "03d1691e-544b-454f-825b-5ee12f7faa8a",
"metadata": {},
"outputs": [],
"source": [
"# load documents\n",
"documents = SimpleDirectoryReader(\"./data/paul_graham\").load_data()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6cc980e8-f4e1-4fad-93f8-ab1bbaa874f3",
"metadata": {},
"outputs": [],
"source": [
"# set global settings config\n",
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"Settings.llm = llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad144ee7-96da-4dd6-be00-fd6cf0c78e58",
"metadata": {},
"outputs": [],
"source": [
"index = VectorStoreIndex.from_documents(documents)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "b6caf93b-6345-4c65-a346-a95b0f1746c4",
"metadata": {},
"source": [
"#### Query Index"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "83e2905e-3789-4793-82b9-0ac488246824",
"metadata": {},
"source": [
"By default, with the help of langchain's PromptSelector abstraction, we use \n",
"a modified refine prompt tailored for ChatGPT-use if the ChatGPT model is used."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "85466fdf-93f3-4cb1-a5f9-0056a8245a6f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:llama_index.token_counter.token_counter:> [retrieve] Total LLM token usage: 0 tokens\n",
"> [retrieve] Total LLM token usage: 0 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [retrieve] Total embedding token usage: 8 tokens\n",
"> [retrieve] Total embedding token usage: 8 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [get_response] Total LLM token usage: 0 tokens\n",
"> [get_response] Total LLM token usage: 0 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [get_response] Total embedding token usage: 0 tokens\n",
"> [get_response] Total embedding token usage: 0 tokens\n"
]
}
],
"source": [
"query_engine = index.as_query_engine(\n",
" similarity_top_k=3,\n",
" streaming=True,\n",
")\n",
"response = query_engine.query(\n",
" \"What did the author do growing up?\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0262da8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Before college, the author worked on writing short stories and programming on an IBM 1401 using an early version of Fortran. They also worked on programming with microcomputers and eventually created a new dialect of Lisp called Arc. They later realized the potential of publishing essays on the web and began writing and publishing them. The author also worked on spam filters, painting, and cooking for groups."
]
}
],
"source": [
"response.print_response_stream()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ec88df57",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:llama_index.token_counter.token_counter:> [retrieve] Total LLM token usage: 0 tokens\n",
"> [retrieve] Total LLM token usage: 0 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [retrieve] Total embedding token usage: 12 tokens\n",
"> [retrieve] Total embedding token usage: 12 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [get_response] Total LLM token usage: 0 tokens\n",
"> [get_response] Total LLM token usage: 0 tokens\n",
"INFO:llama_index.token_counter.token_counter:> [get_response] Total embedding token usage: 0 tokens\n",
"> [get_response] Total embedding token usage: 0 tokens\n"
]
}
],
"source": [
"query_engine = index.as_query_engine(\n",
" similarity_top_k=5,\n",
" streaming=True,\n",
")\n",
"response = query_engine.query(\n",
" \"What did the author do during his time at RISD?\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null, | |
165853 | {
"cells": [
{
"cell_type": "markdown",
"id": "9af12a30",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/customization/llms/AzureOpenAI.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "ec51f656",
"metadata": {},
"source": [
"# Azure OpenAI"
]
},
{
"cell_type": "markdown",
"id": "ef3d26db",
"metadata": {},
"source": [
"Azure openAI resources unfortunately differ from standard openAI resources as you can't generate embeddings unless you use an embedding model. The regions where these models are available can be found here: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models#embeddings-models\n",
"\n",
"Furthermore the regions that support embedding models unfortunately don't support the latest versions (<*>-003) of openAI models, so we are forced to use one region for embeddings and another for the text generation."
]
},
{
"cell_type": "markdown",
"id": "710d6708",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "94613811",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-azure-openai\n",
"%pip install llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de32bc14",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b05e71d5",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n",
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"import logging\n",
"import sys\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))"
]
},
{
"cell_type": "markdown",
"id": "fcaaafdf",
"metadata": {},
"source": [
"Here, we setup the embedding model (for retrieval) and llm (for text generation).\n",
"Note that you need not only model names (e.g. \"text-embedding-ada-002\"), but also model deployment names (the one you chose when deploying the model in Azure.\n",
"You must pass the deployment name as a parameter when you initialize `AzureOpenAI` and `OpenAIEmbedding`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2569cb0",
"metadata": {},
"outputs": [],
"source": [
"api_key = \"<api-key>\"\n",
"azure_endpoint = \"https://<your-resource-name>.openai.azure.com/\"\n",
"api_version = \"2023-07-01-preview\"\n",
"\n",
"llm = AzureOpenAI(\n",
" model=\"gpt-35-turbo-16k\",\n",
" deployment_name=\"my-custom-llm\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embed_model = AzureOpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"my-custom-embedding\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "72aac5a6-495e-40d2-82d3-bda8688ae919",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import Settings\n",
"\n",
"Settings.llm = llm\n",
"Settings.embed_model = embed_model"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1cf0e9c9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n"
]
}
],
"source": [
"documents = SimpleDirectoryReader(\n",
" input_files=[\"../../data/paul_graham/paul_graham_essay.txt\"]\n",
").load_data()\n",
"index = VectorStoreIndex.from_documents(documents)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "98d9d3fd",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-embedding/embeddings?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n",
"HTTP Request: POST https://test-simon.openai.azure.com//openai/deployments/my-custom-llm/chat/completions?api-version=2023-07-01-preview \"HTTP/1.1 200 OK\"\n", | |
165923 | {
"cells": [
{
"cell_type": "markdown",
"id": "30815a85",
"metadata": {},
"source": [
"# LLM Pydantic Program - NVIDIA"
]
},
{
"cell_type": "markdown",
"id": "311e16cb",
"metadata": {},
"source": [
"This guide shows you how to generate structured data with our `LLMTextCompletionProgram`. Given an LLM as well as an output Pydantic class, generate a structured Pydantic object.\n",
"\n",
"In terms of the target object, you can choose to directly specify `output_cls`, or specify a `PydanticOutputParser` or any other BaseOutputParser that generates a Pydantic object.\n",
"\n",
"in the examples below, we show you different ways of extracting into the `Album` object (which can contain a list of Song objects)"
]
},
{
"cell_type": "markdown",
"id": "e0611198",
"metadata": {},
"source": [
"## Extract into `Album` class\n",
"\n",
"This is a simple example of parsing an output into an `Album` schema, which can contain multiple songs.\n",
"\n",
"Just pass `Album` into the `output_cls` property on initialization of the `LLMTextCompletionProgram`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "511a8171",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-readers-file llama-index-embeddings-nvidia llama-index-llms-nvidia"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b029b7e6",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# del os.environ['NVIDIA_API_KEY'] ## delete key and reset\n",
"if os.environ.get(\"NVIDIA_API_KEY\", \"\").startswith(\"nvapi-\"):\n",
" print(\"Valid NVIDIA_API_KEY already in environment. Delete to reset\")\n",
"else:\n",
" nvapi_key = getpass.getpass(\"NVAPI Key (starts with nvapi-): \")\n",
" assert nvapi_key.startswith(\n",
" \"nvapi-\"\n",
" ), f\"{nvapi_key[:5]}... is not a valid key\"\n",
" os.environ[\"NVIDIA_API_KEY\"] = nvapi_key"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7a83b49-5c34-45d5-8cf4-62f348fb1299",
"metadata": {},
"outputs": [],
"source": [
"from pydantic import BaseModel\n",
"from typing import List\n",
"from llama_index.core import Settings\n",
"from llama_index.llms.nvidia import NVIDIA\n",
"from llama_index.embeddings.nvidia import NVIDIAEmbedding\n",
"from llama_index.core.program import LLMTextCompletionProgram\n",
"from llama_index.core.program import FunctionCallingProgram"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e4fc4b9",
"metadata": {},
"outputs": [],
"source": [
"llm = NVIDIA()\n",
"\n",
"embedder = NVIDIAEmbedding(model=\"NV-Embed-QA\", truncate=\"END\")\n",
"Settings.embed_model = embedder\n",
"Settings.llm = llm"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d92e739",
"metadata": {},
"outputs": [],
"source": [
"class Song(BaseModel):\n",
" \"\"\"Data model for a song.\"\"\"\n",
"\n",
" title: str\n",
" length_seconds: int\n",
"\n",
"\n",
"class Album(BaseModel):\n",
" \"\"\"Data model for an album.\"\"\"\n",
"\n",
" name: str\n",
" artist: str\n",
" songs: List[Song]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46c2d509",
"metadata": {},
"outputs": [],
"source": [
"prompt_template_str = \"\"\"\\\n",
"Generate an example album, with an artist and a list of songs. \\\n",
"Using the movie {movie_name} as inspiration.\\\n",
"\"\"\"\n",
"program = LLMTextCompletionProgram.from_defaults(\n",
" output_cls=Album,\n",
" prompt_template_str=prompt_template_str,\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "498370f4",
"metadata": {},
"source": [
"Run program to get structured output. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ca490bf8",
"metadata": {},
"outputs": [],
"source": [
"output = program(movie_name=\"The Shining\")"
]
},
{
"cell_type": "markdown",
"id": "40cce83f",
"metadata": {},
"source": [
"The output is a valid Pydantic object that we can then use to call functions/APIs. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53934d3d",
"metadata": {},
"outputs": [],
"source": [
"output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6401ab8d",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.output_parsers import PydanticOutputParser\n",
"\n",
"program = LLMTextCompletionProgram.from_defaults(\n",
" output_parser=PydanticOutputParser(output_cls=Album),\n",
" prompt_template_str=prompt_template_str,\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1adc5b2b",
"metadata": {},
"outputs": [],
"source": [
"output = program(movie_name=\"Lord of the Rings\")\n",
"output"
]
},
{
"cell_type": "markdown",
"id": "a41391d9",
"metadata": {},
"source": [
"## Define a Custom Output Parser\n",
"\n",
"Sometimes you may want to parse an output your own way into a JSON object. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "60b7b669",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.output_parsers import ChainableOutputParser\n",
"\n",
"\n",
"class CustomAlbumOutputParser(ChainableOutputParser):\n",
" \"\"\"Custom Album output parser.\n",
"\n",
" Assume first line is name and artist.\n",
"\n",
" Assume each subsequent line is the song.\n",
"\n",
" \"\"\"\n",
"\n",
" def __init__(self, verbose: bool = False):\n",
" self.verbose = verbose\n",
"\n",
" def parse(self, output: str) -> Album:\n",
" \"\"\"Parse output.\"\"\"\n",
" if self.verbose:\n",
" print(f\"> Raw output: {output}\")\n",
" lines = output.split(\"\\n\")\n",
" lines = list(filter(None, (line.strip() for line in lines)))\n",
" name, artist = lines[1].split(\",\")\n",
" songs = []\n",
" for i in range(2, len(lines)):\n",
" title, length_seconds = lines[i].split(\",\")\n",
" songs.append(Song(title=title, length_seconds=length_seconds))\n",
"\n",
" return Album(name=name, artist=artist, songs=songs)"
]
},
{
"cell_type": "code",
"execution_count": null, | |
165941 | "Cell \u001b[0;32mIn[31], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mllama_index\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mvector_stores\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mchroma\u001b[39;00m \u001b[39mimport\u001b[39;00m ChromaVectorStore\n\u001b[1;32m 3\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mchromadb\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m db \u001b[39m=\u001b[39m chromadb\u001b[39m.\u001b[39;49mPersistentClient(path\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39m./chroma_db2\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[1;32m 6\u001b[0m chroma_collection \u001b[39m=\u001b[39m db\u001b[39m.\u001b[39mget_or_create_collection(\u001b[39m\"\u001b[39m\u001b[39mquickstart2\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m 7\u001b[0m vector_store \u001b[39m=\u001b[39m ChromaVectorStore(chroma_collection\u001b[39m=\u001b[39mchroma_collection)\n",
"File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/__init__.py:146\u001b[0m, in \u001b[0;36mPersistentClient\u001b[0;34m(path, settings, tenant, database)\u001b[0m\n\u001b[1;32m 143\u001b[0m tenant \u001b[39m=\u001b[39m \u001b[39mstr\u001b[39m(tenant)\n\u001b[1;32m 144\u001b[0m database \u001b[39m=\u001b[39m \u001b[39mstr\u001b[39m(database)\n\u001b[0;32m--> 146\u001b[0m \u001b[39mreturn\u001b[39;00m ClientCreator(tenant\u001b[39m=\u001b[39;49mtenant, database\u001b[39m=\u001b[39;49mdatabase, settings\u001b[39m=\u001b[39;49msettings)\n",
"File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/client.py:139\u001b[0m, in \u001b[0;36mClient.__init__\u001b[0;34m(self, tenant, database, settings)\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\n\u001b[1;32m 134\u001b[0m \u001b[39mself\u001b[39m,\n\u001b[1;32m 135\u001b[0m tenant: \u001b[39mstr\u001b[39m \u001b[39m=\u001b[39m DEFAULT_TENANT,\n\u001b[1;32m 136\u001b[0m database: \u001b[39mstr\u001b[39m \u001b[39m=\u001b[39m DEFAULT_DATABASE,\n\u001b[1;32m 137\u001b[0m settings: Settings \u001b[39m=\u001b[39m Settings(),\n\u001b[1;32m 138\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 139\u001b[0m \u001b[39msuper\u001b[39;49m()\u001b[39m.\u001b[39;49m\u001b[39m__init__\u001b[39;49m(settings\u001b[39m=\u001b[39;49msettings)\n\u001b[1;32m 140\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtenant \u001b[39m=\u001b[39m tenant\n\u001b[1;32m 141\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdatabase \u001b[39m=\u001b[39m database\n",
"File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/api/client.py:43\u001b[0m, in \u001b[0;36mSharedSystemClient.__init__\u001b[0;34m(self, settings)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\n\u001b[1;32m 39\u001b[0m \u001b[39mself\u001b[39m,\n\u001b[1;32m 40\u001b[0m settings: Settings \u001b[39m=\u001b[39m Settings(),\n\u001b[1;32m 41\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 42\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_identifier \u001b[39m=\u001b[39m SharedSystemClient\u001b[39m.\u001b[39m_get_identifier_from_settings(settings)\n\u001b[0;32m---> 43\u001b[0m SharedSystemClient\u001b[39m.\u001b[39;49m_create_system_if_not_exists(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_identifier, settings)\n", | |
165943 | "File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/config.py:382\u001b[0m, in \u001b[0;36mSystem.instance\u001b[0;34m(self, type)\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[39mtype\u001b[39m \u001b[39m=\u001b[39m get_class(fqn, \u001b[39mtype\u001b[39m)\n\u001b[1;32m 381\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mtype\u001b[39m \u001b[39mnot\u001b[39;00m \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances:\n\u001b[0;32m--> 382\u001b[0m impl \u001b[39m=\u001b[39m \u001b[39mtype\u001b[39;49m(\u001b[39mself\u001b[39;49m)\n\u001b[1;32m 383\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_instances[\u001b[39mtype\u001b[39m] \u001b[39m=\u001b[39m impl\n\u001b[1;32m 384\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_running:\n",
"File \u001b[0;32m~/giant_change/llama_index/venv/lib/python3.10/site-packages/chromadb/db/impl/sqlite.py:88\u001b[0m, in \u001b[0;36mSqliteDB.__init__\u001b[0;34m(self, system)\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file \u001b[39m=\u001b[39m (\n\u001b[1;32m 85\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_settings\u001b[39m.\u001b[39mrequire(\u001b[39m\"\u001b[39m\u001b[39mpersist_directory\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39m+\u001b[39m \u001b[39m\"\u001b[39m\u001b[39m/chroma.sqlite3\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 86\u001b[0m )\n\u001b[1;32m 87\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m os\u001b[39m.\u001b[39mpath\u001b[39m.\u001b[39mexists(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file):\n\u001b[0;32m---> 88\u001b[0m os\u001b[39m.\u001b[39;49mmakedirs(os\u001b[39m.\u001b[39;49mpath\u001b[39m.\u001b[39;49mdirname(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_db_file), exist_ok\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m)\n\u001b[1;32m 89\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_conn_pool \u001b[39m=\u001b[39m PerThreadPool(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_db_file)\n\u001b[1;32m 90\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_tx_stack \u001b[39m=\u001b[39m local()\n",
"File \u001b[0;32m~/miniforge3/lib/python3.10/os.py:225\u001b[0m, in \u001b[0;36mmakedirs\u001b[0;34m(name, mode, exist_ok)\u001b[0m\n\u001b[1;32m 223\u001b[0m \u001b[39mreturn\u001b[39;00m\n\u001b[1;32m 224\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 225\u001b[0m mkdir(name, mode)\n\u001b[1;32m 226\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mOSError\u001b[39;00m:\n\u001b[1;32m 227\u001b[0m \u001b[39m# Cannot rely on checking for EEXIST, since the operating system\u001b[39;00m\n\u001b[1;32m 228\u001b[0m \u001b[39m# could give priority to other errors like EACCES or EROFS\u001b[39;00m\n\u001b[1;32m 229\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m exist_ok \u001b[39mor\u001b[39;00m \u001b[39mnot\u001b[39;00m path\u001b[39m.\u001b[39misdir(name):\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: './chroma_db2'"
]
}
],
"source": [
"from llama_index.core import StorageContext, VectorStoreIndex\n",
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"import chromadb\n",
"\n",
"db = chromadb.PersistentClient(path=\"./chroma_db\")\n",
"chroma_collection = db.get_or_create_collection(\"quickstart2\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
"\n",
"object_index = ObjectIndex.from_objects(\n",
" arbitrary_objects,\n",
" index_cls=VectorStoreIndex,\n",
" storage_context=storage_context,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "28cda697",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['llamaindex is an awesome library!']"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"object_retriever = object_index.as_retriever(similarity_top_k=1)\n",
"object_retriever.retrieve(\"llamaindex\")"
]
},
{
"cell_type": "markdown",
"id": "358994af",
"metadata": {},
"source": [
"Now, lets \"reload\" the index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61134380", | |
165963 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Controlling Agent Reasoning Loop with Return Direct Tools\n",
"\n",
"All tools have an option for `return_direct` -- if this is set to `True`, and the associated tool is called (without any other tools being called), the agent reasoning loop is ended and the tool output is returned directly.\n",
"\n",
"This can be useful for speeding up response times when you know the tool output is good enough, to avoid the agent re-writing the response, and for ending the reasoning loop."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook walks through a notebook where an agent needs to gather information from a user in order to make a restaurant booking."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index-core llama-index-llms-anthropic"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = \"sk-ant-...\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Tools setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Optional\n",
"\n",
"from llama_index.core.tools import FunctionTool\n",
"from llama_index.core.bridge.pydantic import BaseModel\n",
"\n",
"# we will store booking under random IDs\n",
"bookings = {}\n",
"\n",
"\n",
"# we will represent and track the state of a booking as a Pydantic model\n",
"class Booking(BaseModel):\n",
" name: Optional[str] = None\n",
" email: Optional[str] = None\n",
" phone: Optional[str] = None\n",
" date: Optional[str] = None\n",
" time: Optional[str] = None\n",
"\n",
"\n",
"def get_booking_state(user_id: str) -> str:\n",
" \"\"\"Get the current state of a booking for a given booking ID.\"\"\"\n",
" try:\n",
" return str(bookings[user_id].dict())\n",
" except:\n",
" return f\"Booking ID {user_id} not found\"\n",
"\n",
"\n",
"def update_booking(user_id: str, property: str, value: str) -> str:\n",
" \"\"\"Update a property of a booking for a given booking ID. Only enter details that are explicitly provided.\"\"\"\n",
" booking = bookings[user_id]\n",
" setattr(booking, property, value)\n",
" return f\"Booking ID {user_id} updated with {property} = {value}\"\n",
"\n",
"\n",
"def create_booking(user_id: str) -> str:\n",
" \"\"\"Create a new booking and return the booking ID.\"\"\"\n",
" bookings[user_id] = Booking()\n",
" return \"Booking created, but not yet confirmed. Please provide your name, email, phone, date, and time.\"\n",
"\n",
"\n",
"def confirm_booking(user_id: str) -> str:\n",
" \"\"\"Confirm a booking for a given booking ID.\"\"\"\n",
" booking = bookings[user_id]\n",
"\n",
" if booking.name is None:\n",
" raise ValueError(\"Please provide your name.\")\n",
"\n",
" if booking.email is None:\n",
" raise ValueError(\"Please provide your email.\")\n",
"\n",
" if booking.phone is None:\n",
" raise ValueError(\"Please provide your phone number.\")\n",
"\n",
" if booking.date is None:\n",
" raise ValueError(\"Please provide the date of your booking.\")\n",
"\n",
" if booking.time is None:\n",
" raise ValueError(\"Please provide the time of your booking.\")\n",
"\n",
" return f\"Booking ID {user_id} confirmed!\"\n",
"\n",
"\n",
"# create tools for each function\n",
"get_booking_state_tool = FunctionTool.from_defaults(fn=get_booking_state)\n",
"update_booking_tool = FunctionTool.from_defaults(fn=update_booking)\n",
"create_booking_tool = FunctionTool.from_defaults(\n",
" fn=create_booking, return_direct=True\n",
")\n",
"confirm_booking_tool = FunctionTool.from_defaults(\n",
" fn=confirm_booking, return_direct=True\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## A user has walked in! Let's help them make a booking"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.anthropic import Anthropic\n",
"from llama_index.core.llms import ChatMessage\n",
"from llama_index.core.agent import FunctionCallingAgent\n",
"\n",
"llm = Anthropic(model=\"claude-3-sonnet-20240229\", temperature=0.1)\n",
"\n",
"user = \"user123\"\n",
"prefix_messages = [\n",
" ChatMessage(\n",
" role=\"system\",\n",
" content=(\n",
" f\"You are now connected to the booking system and helping {user} with making a booking. \"\n",
" \"Only enter details that the user has explicitly provided. \"\n",
" \"Do not make up any details.\"\n",
" ),\n",
" )\n",
"]\n",
"\n",
"agent = FunctionCallingAgent.from_tools(\n",
" tools=[\n",
" get_booking_state_tool,\n",
" update_booking_tool,\n",
" create_booking_tool,\n",
" confirm_booking_tool,\n",
" ],\n",
" llm=llm,\n",
" prefix_messages=prefix_messages,\n",
" max_function_calls=10,\n",
" allow_parallel_tool_calls=False,\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Added user message to memory: Hello! I would like to make a booking, around 5pm?\n",
"=== LLM Response ===\n",
"Okay, let's create a new booking for you. To do that, I'll invoke the create_booking tool with your user ID:\n",
"=== Calling Function ===\n",
"Calling function: create_booking with args: {\"user_id\": \"user123\"}\n",
"=== Function Output ===\n",
"Booking created, but not yet confirmed. Please provide your name, email, phone, date, and time.\n"
]
}
],
"source": [
"response = agent.chat(\"Hello! I would like to make a booking, around 5pm?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Booking created, but not yet confirmed. Please provide your name, email, phone, date, and time.\n"
]
}
],
"source": [
"print(str(response))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Perfect, we can see the function output was retruned directly, with no modification or final LLM call!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Added user message to memory: Sure! My name is Logan, and my email is test@gmail.com\n",
"=== LLM Response ===\n", | |
166142 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NebulaGraph Property Graph Index\n",
"\n",
"NebulaGraph is an open-source distributed graph database built for super large-scale graphs with milliseconds of latency.\n",
"\n",
"If you already have an existing graph, please skip to the end of this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index llama-index-graph-stores-nebula jupyter-nebulagraph"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Docker Setup\n",
"\n",
"To launch NebulaGraph locally, first ensure you have docker installed. Then, you can launch the database with the following docker command.\n",
"\n",
"```bash\n",
"mkdir nebula-docker-compose\n",
"cd nebula-docker-compose\n",
"curl --output docker-compose.yaml https://raw.githubusercontent.com/vesoft-inc/nebula-docker-compose/master/docker-compose-lite.yaml\n",
"docker compose up \n",
"```\n",
"\n",
"After this, you are ready to create your first property graph!\n",
"\n",
"> Other options/details for deploying NebulaGraph can be found in the [docs](https://docs.nebula-graph.io/):\n",
">\n",
"> - [ad-hoc cluster in Google Colab](https://docs.nebula-graph.io/master/4.deployment-and-installation/2.compile-and-install-nebula-graph/8.deploy-nebula-graph-with-lite/).\n",
"> - [Docker Desktop Extension](https://docs.nebula-graph.io/master/2.quick-start/1.quick-start-workflow/).\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1;3;38;2;47;75;124mConnection Pool Created\u001b[0m\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"Empty DataFrame\n",
"Columns: []\n",
"Index: []"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# load NebulaGraph Jupyter extension to enable %ngql magic\n",
"%load_ext ngql\n",
"# connect to NebulaGraph service\n",
"%ngql --address 127.0.0.1 --port 9669 --user root --password nebula\n",
"# create a graph space(think of a Database Instance) named: llamaindex_nebula_property_graph\n",
"%ngql CREATE SPACE IF NOT EXISTS llamaindex_nebula_property_graph(vid_type=FIXED_STRING(256));"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
"Empty DataFrame\n",
"Columns: []\n",
"Index: []"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# use the graph space, which is similar to \"use database\" in MySQL\n",
"# The space was created in async way, so we need to wait for a while before using it, retry it if failed\n",
"%ngql USE llamaindex_nebula_property_graph;"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Env Setup\n",
"\n",
"We need just a few environment setups to get started."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-proj-...\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import SimpleDirectoryReader\n",
"\n",
"documents = SimpleDirectoryReader(\"./data/paul_graham/\").load_data()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We choose using gpt-4o and local embedding model intfloat/multilingual-e5-large . You can change to what you like, by editing the following lines:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-huggingface"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import Settings\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
"\n",
"Settings.llm = OpenAI(model=\"gpt-4o\", temperature=0.3)\n",
"Settings.embed_model = HuggingFaceEmbedding(\n",
" model_name=\"intfloat/multilingual-e5-large\"\n",
")\n",
"# Settings.embed_model = OpenAIEmbedding(model_name=\"text-embedding-3-small\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Index Construction"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Prepare property graph store"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ | |
166381 | "[NodeWithScore(node=TextNode(id_='d18bb1f1-466a-443d-98d9-6217bf71ee5a', embedding=None, metadata={'filename': 'README.md', 'category': 'codebase'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='e4c638ce-6757-482e-baed-096574550602', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'filename': 'README.md', 'category': 'codebase'}, hash='3183371414f6a23e9a61e11b45ec45f808b148f9973166cfed62226e3505eb05')}, text='Context\\nLLMs are a phenomenal piece of technology for knowledge generation and reasoning.\\nThey are pre-trained on large amounts of publicly available data.\\nHow do we best augment LLMs with our own private data?\\nWe need a comprehensive toolkit to help perform this data augmentation for LLMs.\\n\\nProposed Solution\\nThat\\'s where LlamaIndex comes in. LlamaIndex is a \"data framework\" to help\\nyou build LLM apps. It provides the following tools:\\n\\nOffers data connectors to ingest your existing data sources and data formats\\n(APIs, PDFs, docs, SQL, etc.)\\nProvides ways to structure your data (indices, graphs) so that this data can be\\neasily used with LLMs.\\nProvides an advanced retrieval/query interface over your data:\\nFeed in any LLM input prompt, get back retrieved context and knowledge-augmented output.\\nAllows easy integrations with your outer application framework\\n(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).\\nLlamaIndex provides tools for both beginner users and advanced users.\\nOur high-level API allows beginner users to use LlamaIndex to ingest and\\nquery their data in 5 lines of code. Our lower-level APIs allow advanced users to\\ncustomize and extend any module (data connectors, indices, retrievers, query engines,\\nreranking modules), to fit their needs.', mimetype='text/plain', start_char_idx=1, end_char_idx=1279, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.850998849877966)]\n"
]
}
],
"source": [
"print(\n",
" \"------------- Example Document used to Enrich LLM Context -------------\"\n",
")\n",
"llama_index_example_document = Document.example()\n",
"print(llama_index_example_document)\n",
"\n",
"index = VectorStoreIndex.from_documents([llama_index_example_document])\n",
"\n",
"print(\"\\n------------- Example Query Engine -------------\")\n",
"query_response = index.as_query_engine().query(\"What is llama_index?\")\n",
"print(query_response)\n",
"\n",
"print(\"\\n------------- Example Chat Engine -------------\")\n",
"chat_response = index.as_chat_engine().chat(\n",
" \"What is llama_index?\",\n",
" chat_history=[\n",
" ChatMessage(role=\"system\", content=\"You are an expert on RAG!\")\n",
" ],\n",
")\n",
"print(chat_response)\n",
"\n",
"\n",
"print(\"\\n------------- Example Retriever -------------\")\n",
"retriever_response = index.as_retriever().retrieve(\"What is llama_index?\")\n",
"print(retriever_response)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Log the Index with MLflow\n",
"\n",
"The below code logs a LlamaIndex model with MLflow, allowing you to persist and manage it across different environments. By using MLflow, you can track, version, and reproduce your model reliably. The script logs parameters, an example input, and registers the model under a specific name. The `model_uri` provides a unique identifier for retrieving the model later. This persistence is essential for ensuring consistency and reproducibility in development, testing, and production. Managing the model with MLflow simplifies loading, deployment, and sharing, maintaining an organized workflow.\n",
"\n",
"Key Parameters\n",
"\n",
"* ``engine_type``: defines the pyfunc and spark_udf inference type\n",
"* ``input_example``: defines the the input signature and infers the output signature via a prediction\n",
"* ``registered_model_name``: defines the name of the model in the MLflow model registry"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024/07/24 17:58:27 INFO mlflow.llama_index.serialize_objects: API key(s) will be removed from the global Settings object during serialization to protect against key leakage. At inference time, the key(s) must be passed as environment variables.\n",
"/Users/michael.berk/opt/anaconda3/envs/mlflow-dev/lib/python3.8/site-packages/_distutils_hack/__init__.py:26: UserWarning: Setuptools is replacing distutils.\n",
" warnings.warn(\"Setuptools is replacing distutils.\")\n",
"Successfully registered model 'my_llama_index_vector_store'.\n",
"Created version '1' of model 'my_llama_index_vector_store'.\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "643e7b6936674e469f98d94004f3424a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading artifacts: 0%| | 0/12 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Unique identifier for the model location for loading: runs:/036936a7ac964f0cb6ab99fa908d6421/llama_index\n"
]
}
],
"source": [
"mlflow.llama_index.autolog() # This is for enabling tracing\n",
"\n",
"with mlflow.start_run() as run:\n",
" mlflow.llama_index.log_model(\n",
" index,\n",
" artifact_path=\"llama_index\",\n",
" engine_type=\"query\", # Defines the pyfunc and spark_udf inference type\n",
" input_example=\"hi\", # Infers signature\n",
" registered_model_name=\"my_llama_index_vector_store\", # Stores an instance in the model registry\n",
" )\n",
"\n",
" run_id = run.info.run_id\n",
" model_uri = f\"runs:/{run_id}/llama_index\"\n",
" print(f\"Unique identifier for the model location for loading: {model_uri}\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load the Index and Perform Inference\n",
"\n",
"The below code demonstrates three core types of inference that can be done with the loaded model.\n",
"\n",
"1. **Load and Perform Inference via LlamaIndex:** This method loads the model using `mlflow.llama_index.load_model` and performs direct querying, chat, or retrieval. It is ideal when you want to leverage the full capabilities of the underlying llama index object.\n",
"2. **Load and Perform Inference via MLflow PyFunc:** This method loads the model using `mlflow.pyfunc.load_model`, enabling model predictions in a generic PyFunc format, with the engine type specified at logging time. It is useful for evaluating the model with `mlflow.evaluate` or deploying the model for serving. \n",
"3. **Load and Perform Inference via MLflow Spark UDF:** This method uses `mlflow.pyfunc.spark_udf` to load the model as a Spark UDF, facilitating distributed inference across large datasets in a Spark DataFrame. It is ideal for handling large-scale data processing and, like with PyFunc inference, only supports the engine type defined when logging.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [ | |
166406 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "2ffc6a2b",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/low_level/oss_ingestion_retrieval.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n"
]
},
{
"cell_type": "markdown",
"id": "dff7db9e-fbf9-4394-9958-35323799a4e3",
"metadata": {},
"source": [
"# Building RAG from Scratch (Open-source only!) \n",
"\n",
"In this tutorial, we show you how to build a data ingestion pipeline into a vector database, and then build a retrieval pipeline from that vector database, from scratch.\n",
"\n",
"Notably, we use a fully open-source stack:\n",
"\n",
"- Sentence Transformers as the embedding model\n",
"- Postgres as the vector store (we support many other [vector stores](https://gpt-index.readthedocs.io/en/stable/module_guides/storing/vector_stores.html) too!)\n",
"- Llama 2 as the LLM (through [llama.cpp](https://github.com/ggerganov/llama.cpp))"
]
},
{
"cell_type": "markdown",
"id": "25764729-40ba-400f-b0f8-08fb9e8bb74a",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We setup our open-source components.\n",
"1. Sentence Transformers\n",
"2. Llama 2\n",
"3. We initialize postgres and wrap it with our wrappers/abstractions."
]
},
{
"cell_type": "markdown",
"id": "63935557-a11c-4a22-9248-9c746cc89c4c",
"metadata": {},
"source": [
"#### Sentence Transformers"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b7108d3f",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-readers-file pymupdf\n",
"%pip install llama-index-vector-stores-postgres\n",
"%pip install llama-index-embeddings-huggingface\n",
"%pip install llama-index-llms-llama-cpp"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4c08162e-5a48-424c-921f-c9e84a59c72f",
"metadata": {},
"outputs": [],
"source": [
"# sentence transformers\n",
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
"\n",
"embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en\")"
]
},
{
"cell_type": "markdown",
"id": "df10089c-917e-4191-a718-0ef7149a6a1e",
"metadata": {},
"source": [
"#### Llama CPP\n",
"\n",
"In this notebook, we use the [`llama-2-chat-13b-ggml`](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML) model, along with the proper prompt formatting. \n",
"\n",
"Check out our [Llama CPP guide](https://gpt-index.readthedocs.io/en/stable/examples/llm/llama_2_llama_cpp.html) for full setup instructions/details."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "85f8a556-9f37-42a3-a88a-f688ad355ee5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: llama-cpp-python in /Users/jerryliu/Programming/gpt_index/.venv/lib/python3.10/site-packages (0.2.7)\n",
"Requirement already satisfied: numpy>=1.20.0 in /Users/jerryliu/Programming/gpt_index/.venv/lib/python3.10/site-packages (from llama-cpp-python) (1.23.5)\n",
"Requirement already satisfied: typing-extensions>=4.5.0 in /Users/jerryliu/Programming/gpt_index/.venv/lib/python3.10/site-packages (from llama-cpp-python) (4.7.1)\n",
"Requirement already satisfied: diskcache>=5.6.1 in /Users/jerryliu/Programming/gpt_index/.venv/lib/python3.10/site-packages (from llama-cpp-python) (5.6.3)\n",
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.2.1\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
]
}
],
"source": [
"!pip install llama-cpp-python"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3cb975f6-c192-4a26-ae50-e9a319d2a66b",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.llama_cpp import LlamaCPP\n",
"\n",
"# model_url = \"https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin\"\n",
"model_url = \"https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf\"\n",
"\n",
"llm = LlamaCPP(\n",
" # You can pass in the URL to a GGML model to download it automatically\n",
" model_url=model_url,\n",
" # optionally, you can set the path to a pre-downloaded model instead of model_url\n",
" model_path=None,\n",
" temperature=0.1,\n",
" max_new_tokens=256,\n",
" # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room\n",
" context_window=3900,\n",
" # kwargs to pass to __call__()\n",
" generate_kwargs={},\n",
" # kwargs to pass to __init__()\n",
" # set to at least 1 to use GPU\n",
" model_kwargs={\"n_gpu_layers\": 1},\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ba02cfe2-8b51-4e01-a840-d6508c76ade3",
"metadata": {},
"source": [
"#### Initialize Postgres\n",
"\n",
"Using an existing postgres running at localhost, create the database we'll be using.\n",
"\n",
"**NOTE**: Of course there are plenty of other open-source/self-hosted databases you can use! e.g. Chroma, Qdrant, Weaviate, and many more. Take a look at our [vector store guide](https://gpt-index.readthedocs.io/en/stable/module_guides/storing/vector_stores.html).\n",
"\n",
"**NOTE**: You will need to setup postgres on your local system. Here's an example of how to set it up on OSX: https://www.sqlshack.com/setting-up-a-postgresql-database-on-mac/.\n",
"\n", | |
166440 | {
"cells": [
{
"cell_type": "markdown",
"id": "57c676db",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/low_level/ingestion.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "c919f307-07b1-41bd-bc5d-51edd8677983",
"metadata": {},
"source": [
"# Building Data Ingestion from Scratch\n",
"\n",
"In this tutorial, we show you how to build a data ingestion pipeline into a vector database.\n",
"\n",
"We use Pinecone as the vector database.\n",
"\n",
"We will show how to do the following:\n",
"1. How to load in documents.\n",
"2. How to use a text splitter to split documents.\n",
"3. How to **manually** construct nodes from each text chunk.\n",
"4. [Optional] Add metadata to each Node.\n",
"5. How to generate embeddings for each text chunk.\n",
"6. How to insert into a vector database."
]
},
{
"cell_type": "markdown",
"id": "tsHaUeqRpflK",
"metadata": {},
"source": [
"## Pinecone\n",
"\n",
"You will need a [pinecone.io](https://www.pinecone.io/) api key for this tutorial. You can [sign up for free](https://app.pinecone.io/?sessionType=signup) to get a Starter account.\n",
"\n",
"If you create a Starter account, you can name your application anything you like.\n",
"\n",
"Once you have an account, navigate to 'API Keys' in the Pinecone console. You can use the default key or create a new one for this tutorial.\n",
"\n",
"Save your api key and its environment (`gcp_starter` for free accounts). You will need them below."
]
},
{
"cell_type": "markdown",
"id": "92b20306",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ae74e61",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-embeddings-openai\n",
"%pip install llama-index-vector-stores-pinecone\n",
"%pip install llama-index-llms-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b60e707a",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "markdown",
"id": "22fb9e0a-566b-4f34-b9cf-72193cb51adb",
"metadata": {},
"source": [
"## OpenAI\n",
"\n",
"You will need an [OpenAI](https://openai.com/) api key for this tutorial. Login to your [platform.openai.com](https://platform.openai.com/) account, click on your profile picture in the upper right corner, and choose 'API Keys' from the menu. Create an API key for this tutorial and save it. You will need it below."
]
},
{
"cell_type": "markdown",
"id": "HPwWNeZwgwE8",
"metadata": {},
"source": [
"## Environment\n",
"\n",
"First we add our dependencies."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "CyTVgLfMgmIZ",
"metadata": {},
"outputs": [],
"source": [
"!pip -q install python-dotenv pinecone-client llama-index pymupdf"
]
},
{
"cell_type": "markdown",
"id": "bCwZFn6_iAR1",
"metadata": {},
"source": [
"#### Set Environment Variables\n",
"\n",
"We create a file for our environment variables. Do not commit this file or share it!\n",
"\n",
"Note: Google Colabs will let you create but not open a .env"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "M1l2emfWgjgE",
"metadata": {},
"outputs": [],
"source": [
"dotenv_path = (\n",
" \"env\" # Google Colabs will not let you open a .env, but you can set\n",
")\n",
"with open(dotenv_path, \"w\") as f:\n",
" f.write('PINECONE_API_KEY=\"<your api key>\"\\n')\n",
" f.write('OPENAI_API_KEY=\"<your api key>\"\\n')"
]
},
{
"cell_type": "markdown",
"id": "PWMbn7GooMm5",
"metadata": {},
"source": [
"Set your OpenAI api key, and Pinecone api key and environment in the file we created."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "QOyfIoXAoVGX",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "leZkMBXYiTl-",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(dotenv_path=dotenv_path)"
]
},
{
"cell_type": "markdown",
"id": "bcb486eb-c0b8-40e2-9038-da97aef63139",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We build an empty Pinecone Index, and define the necessary LlamaIndex wrappers/abstractions so that we can start loading data into Pinecone.\n",
"\n",
"\n",
"Note: Do not save your API keys in the code or add pinecone_env to your repo!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0Izxlt0XkMII",
"metadata": {},
"outputs": [],
"source": [
"from pinecone import Pinecone, Index, ServerlessSpec"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc739b4d-491f-406d-a0e6-f6b1e8c126dc",
"metadata": {},
"outputs": [],
"source": [
"api_key = os.environ[\"PINECONE_API_KEY\"]\n",
"pc = Pinecone(api_key=api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "Whwu7HqqswIq",
"metadata": {},
"outputs": [],
"source": [
"index_name = \"llamaindex-rag-fs\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "yRKkO4g1sBMl",
"metadata": {},
"outputs": [],
"source": [
"# [Optional] Delete the index before re-running the tutorial.\n",
"# pinecone.delete_index(index_name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "20ba2f76-29d8-4dc5-b25c-64dcfe9e8d23",
"metadata": {},
"outputs": [],
"source": [
"# dimensions are for text-embedding-ada-002\n",
"if index_name not in pc.list_indexes().names():\n",
" pc.create_index(\n",
" index_name,\n",
" dimension=1536,\n",
" metric=\"euclidean\",\n",
" spec=ServerlessSpec(cloud=\"aws\", region=\"us-east-1\"),\n",
" )"
]
},
{
"cell_type": "code", | |
166462 | "7182e98f-1b8a-4aba-af18-3982b862c794\n",
"2024-05-06 14:00:35.931813\n",
"BaseEmbedding.get_text_embedding_batch-632972aa-3345-49cb-ae2f-46f3166e3afc\n",
"Event type: EmbeddingStartEvent\n",
"{'model_name': 'text-embedding-ada-002', 'embed_batch_size': 100, 'num_workers': None, 'additional_kwargs': {}, 'api_base': 'https://api.openai.com/v1', 'api_version': '', 'max_retries': 10, 'timeout': 60.0, 'default_headers': None, 'reuse_client': True, 'dimensions': None, 'class_name': 'OpenAIEmbedding'}\n",
"-----------------------\n",
"-----------------------\n",
"ba86e41f-cadf-4f1f-8908-8ee90404d668\n",
"2024-05-06 14:00:36.256237\n",
"BaseEmbedding.get_text_embedding_batch-632972aa-3345-49cb-ae2f-46f3166e3afc\n",
"Event type: EmbeddingEndEvent\n",
"['filename: README.md\\ncategory: codebase\\n\\nContext\\nLLMs are a phenomenal piece of technology for knowledge generation and reasoning.\\nThey are pre-trained on large amounts of publicly available data.\\nHow do we best augment LLMs with our own private data?\\nWe need a comprehensive toolkit to help perform this data augmentation for LLMs.\\n\\nProposed Solution\\nThat\\'s where LlamaIndex comes in. LlamaIndex is a \"data framework\" to help\\nyou build LLM apps. It provides the following tools:\\n\\nOffers data connectors to ingest your existing data sources and data formats\\n(APIs, PDFs, docs, SQL, etc.)\\nProvides ways to structure your data (indices, graphs) so that this data can be\\neasily used with LLMs.\\nProvides an advanced retrieval/query interface over your data:\\nFeed in any LLM input prompt, get back retrieved context and knowledge-augmented output.\\nAllows easy integrations with your outer application framework\\n(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).\\nLlamaIndex provides tools for both beginner users and advanced users.\\nOur high-level API allows beginner users to use LlamaIndex to ingest and\\nquery their data in 5 lines of code. Our lower-level APIs allow advanced users to\\ncustomize and extend any module (data connectors, indices, retrievers, query engines,\\nreranking modules), to fit their needs.']\n",
"[-0.005768016912043095, 0.02242799662053585, -0.020438531413674355, -0.040361806750297546, -0.01757599227130413]\n",
"-----------------------\n",
"-----------------------\n",
"06935377-f1e4-4fb9-b866-86f7520dfe2b\n",
"2024-05-06 14:00:36.305798\n",
"BaseQueryEngine.query-a766ae6c-6445-43b4-b1fc-9c29bae99556\n",
"Event type: QueryStartEvent\n",
"Tell me about LLMs?\n",
"-----------------------\n",
"-----------------------\n",
"62608f4f-67a1-4e2c-a653-24a4430529bb\n",
"2024-05-06 14:00:36.305998\n",
"BaseRetriever.retrieve-4e25a2a3-43a9-45e3-a7b9-59f4d54e8f00\n",
"Event type: RetrievalStartEvent\n",
"Tell me about LLMs?\n",
"-----------------------\n",
"-----------------------\n",
"e984c840-919b-4dc7-943d-5c49fbff48b8\n",
"2024-05-06 14:00:36.306265\n",
"BaseEmbedding.get_query_embedding-d30934f4-7bd2-4425-beda-12b5f55bc38b\n",
"Event type: EmbeddingStartEvent\n",
"{'model_name': 'text-embedding-ada-002', 'embed_batch_size': 100, 'num_workers': None, 'additional_kwargs': {}, 'api_base': 'https://api.openai.com/v1', 'api_version': '', 'max_retries': 10, 'timeout': 60.0, 'default_headers': None, 'reuse_client': True, 'dimensions': None, 'class_name': 'OpenAIEmbedding'}\n",
"-----------------------\n",
"-----------------------\n",
"c09fa993-a892-4efe-9f1b-7238ff6e5c62\n",
"2024-05-06 14:00:36.481459\n",
"BaseEmbedding.get_query_embedding-d30934f4-7bd2-4425-beda-12b5f55bc38b\n",
"Event type: EmbeddingEndEvent\n",
"['Tell me about LLMs?']\n",
"[0.00793155562132597, 0.011421983130276203, -0.010342259891331196, -0.03294854983687401, -0.03647972270846367]\n",
"-----------------------\n",
"-----------------------\n",
"b076d239-628d-4b4c-94ed-25aa2ca4b02b\n",
"2024-05-06 14:00:36.484080\n",
"BaseRetriever.retrieve-4e25a2a3-43a9-45e3-a7b9-59f4d54e8f00\n",
"Event type: RetrievalEndEvent\n",
"Tell me about LLMs?\n",
"[NodeWithScore(node=TextNode(id_='8de2b6b2-3fda-4f9b-95a8-a3ced6cfb0e5', embedding=None, metadata={'filename': 'README.md', 'category': 'codebase'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='29e2bc8f-b62c-4752-b5eb-11346c5cbe50', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'filename': 'README.md', 'category': 'codebase'}, hash='3183371414f6a23e9a61e11b45ec45f808b148f9973166cfed62226e3505eb05')}, text='Context\\nLLMs are a phenomenal piece of technology for knowledge generation and reasoning.\\nThey are pre-trained on large amounts of publicly available data.\\nHow do we best augment LLMs with our own private data?\\nWe need a comprehensive toolkit to help perform this data augmentation for LLMs.\\n\\nProposed Solution\\nThat\\'s where LlamaIndex comes in. LlamaIndex is a \"data framework\" to help\\nyou build LLM apps. It provides the following tools:\\n\\nOffers data connectors to ingest your existing data sources and data formats\\n(APIs, PDFs, docs, SQL, etc.)\\nProvides ways to structure your data (indices, graphs) so that this data can be\\neasily used with LLMs.\\nProvides an advanced retrieval/query interface over your data:\\nFeed in any LLM input prompt, get back retrieved context and knowledge-augmented output.\\nAllows easy integrations with your outer application framework\\n(e.g. with LangChain, Flask, Docker, ChatGPT, anything else).\\nLlamaIndex provides tools for both beginner users and advanced users.\\nOur high-level API allows beginner users to use LlamaIndex to ingest and\\nquery their data in 5 lines of code. Our lower-level APIs allow advanced users to\\ncustomize and extend any module (data connectors, indices, retrievers, query engines,\\nreranking modules), to fit their needs.', start_char_idx=1, end_char_idx=1279, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n'), score=0.807312731672428)]\n",
"-----------------------\n",
"-----------------------\n",
"5e3289be-c597-48e7-ad3f-787722b766ea\n",
"2024-05-06 14:00:36.484436\n", | |
166479 | {
"cells": [
{
"cell_type": "markdown",
"id": "dd006f66",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/node_parsers/topic_parser.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "d617ade9-796f-431f-86ff-6b865e0eb007",
"metadata": {},
"source": [
"# TopicNodeParser\n",
"\n",
"[MedGraphRAG](https://arxiv.org/html/2408.04187) aims to improve the capabilities of LLMs in the medical domain by generating evidence-based results through a novel graph-based Retrieval-Augmented Generation framework, improving safety and reliability in handling private medical data.\n",
"\n",
"`TopicNodeParser` implements an approximate version of the chunking technique described in the paper.\n",
"\n",
"Here is the technique as outlined in the paper:\n",
"\n",
"```\n",
"Large medical documents often contain multiple themes or diverse content. To process these effectively, we first segment the document into data chunks that conform to the context limitations of Large Language Models (LLMs). Traditional methods such as chunking based on token size or fixed characters typically fail to detect subtle shifts in topics accurately. Consequently, these chunks may not fully capture the intended context, leading to a loss in the richness of meaning.\n",
"\n",
"To enhance accuracy, we adopt a mixed method of character separation coupled with topic-based segmentation. Specifically, we utilize static characters (line break symbols) to isolate individual paragraphs within the document. Following this, we apply a derived form of the text for semantic chunking. Our approach includes the use of proposition transfer, which extracts standalone statements from a raw text Chen et al. (2023). Through proposition transfer, each paragraph is transformed into self-sustaining statements. We then conduct a sequential analysis of the document to assess each proposition, deciding whether it should merge with an existing chunk or initiate a new one. This decision is made via a zero-shot approach by an LLM. To reduce noise generated by sequential processing, we implement a sliding window technique, managing five paragraphs at a time. We continuously adjust the window by removing the first paragraph and adding the next, maintaining focus on topic consistency. We set a hard threshold that the longest chunk cannot excess the context length limitation of LLM. After chunking the document, we construct graph on each individual of the data chunk.\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d1c5118",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index llama-index-node-parser-topic"
]
},
{
"cell_type": "markdown",
"id": "12dcc784-f2c6-4c37-8771-57a921ff2eab",
"metadata": {},
"source": [
"## Setup Data\n",
"\n",
"Here we consider a sample text.\n",
"\n",
"Note: The propositions were created by an LLM, which might lead to longer processing times when creating nodes. Exercise caution while experimenting."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7fdcd874",
"metadata": {},
"outputs": [],
"source": [
"text = \"\"\"In this paper, we introduce a novel graph RAG method for applying LLMs to the medical domain, which we refer to as Medical Graph RAG (MedRAG). This technique improves LLM performance in the medical domain by response queries with grounded source citations and clear interpretations of medical terminology, boosting the transparency and interpretability of the results. This approach involves a three-tier hierarchical graph construction method. Initially, we use documents provided by users as our top-level source to extract entities. These entities are then linked to a second level consisting of more basic entities previously abstracted from credible medical books and papers. Subsequently, these entities are connected to a third level—the fundamental medical dictionary graph—that provides detailed explanations of each medical term and their semantic relationships. We then construct a comprehensive graph at the highest level by linking entities based on their content and hierarchical connections. This method ensures that the knowledge can be traced back to its sources and the results are factually accurate.\n",
"\n",
"To respond to user queries, we implement a U-retrieve strategy that combines top-down retrieval with bottom-up response generation. The process begins by structuring the query using predefined medical tags and indexing them through the graphs in a top-down manner. The system then generates responses based on these queries, pulling from meta-graphs—nodes retrieved along with their TopK related nodes and relationships—and summarizing the information into a detailed response. This technique maintains a balance between global context awareness and the contextual limitations inherent in LLMs.\n",
"\n",
"Our medical graph RAG provides Intrinsic source citation can enhance LLM transparency, interpretability, and verifiability. The results provides the provenance, or source grounding information, as it generates each response, and demonstrates that an answer is grounded in the dataset. Having the cited source for each assertion readily available also enables a human user to quickly and accurately audit the LLM’s output directly against the original source material. It is super useful in the field of medicine that security is very important, and each of the reasoning should be evidence-based. By using such a method, we construct an evidence-based Medical LLM that the clinician could easiely check the source of the reasoning and calibrate the model response to ensure the safty usage of llm in the clinical senarios.\n",
"\n",
"To evaluate our medical graph RAG, we implemented the method on several popular open and closed-source LLMs, including ChatGPT OpenAI (2023a) and LLaMA Touvron et al. (2023), testing them across mainstream medical Q&A benchmarks such as PubMedQA Jin et al. (2019), MedMCQA Pal et al. (2022), and USMLE Kung et al. (2023). For the RAG process, we supplied a comprehensive medical dictionary as the foundational knowledge layer, the UMLS medical knowledge graph Lindberg et al. (1993) as the foundamental layer detailing semantic relationships, and a curated MedC-K dataset Wu et al. (2023) —comprising the latest medical papers and books—as the intermediate level of data to simulate user-provided private data. Our experiments demonstrate that our model significantly enhances the performance of general-purpose LLMs on medical questions. Remarkably, it even surpasses many fine-tuned or specially trained LLMs on medical corpora, solely using the RAG approach without additional training.\n",
"\"\"\"\n",
"\n",
"from llama_index.core import Document\n",
"\n",
"documents = [Document(text=text)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "717bd52c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"In this paper, we introduce a novel graph RAG method for applying LLMs to the medical domain, which we refer to as Medical Graph RAG (MedRAG). This technique improves LLM performance in the medical domain by response queries with grounded source citations and clear interpretations of medical terminology, boosting the transparency and interpretability of the results. This approach involves a three-tier hierarchical graph construction method. Initially, we use documents provided by users as our top-level source to extract entities. These entities are then linked to a second level consisting of more basic entities previously abstracted from credible medical books and papers. Subsequently, these entities are connected to a third level—the fundamental medical dictionary graph—that provides detailed explanations of each medical term and their semantic relationships. We then construct a comprehensive graph at the highest level by linking entities based on their content and hierarchical connections. This method ensures that the knowledge can be traced back to its sources and the results are factually accurate.\n",
"\n",
"To respond to user queries, we implement a U-retrieve strategy that combines top-down retrieval with bottom-up response generation. The process begins by structuring the query using predefined medical tags and indexing them through the graphs in a top-down manner. The system then generates responses based on these queries, pulling from meta-graphs—nodes retrieved along with their TopK related nodes and relationships—and summarizing the information into a detailed response. This technique maintains a balance between global context awareness and the contextual limitations inherent in LLMs.\n",
"\n", | |
166495 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/workflow/router_query_engine.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Router Query Engine\n",
"\n",
"`RouterQueryEngine` chooses the most appropriate query engine from multiple options to process a given query.\n",
"\n",
"This notebook walks through implementation of Router Query Engine, using workflows.\n",
"\n",
"Specifically we will implement [RouterQueryEngine](https://docs.llamaindex.ai/en/stable/examples/query_engine/RouterQueryEngine/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install -U llama-index"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-..\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Since workflows are async first, this all runs fine in a notebook. If you were running in your own code, you would want to use `asyncio.run()` to start an async event loop if one isn't already running.\n",
"\n",
"```python\n",
"async def main():\n",
" <async code>\n",
"\n",
"if __name__ == \"__main__\":\n",
" import asyncio\n",
" asyncio.run(main())\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define Events"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.workflow import Event\n",
"from llama_index.core.base.base_selector import SelectorResult\n",
"from typing import Dict, List, Any\n",
"from llama_index.core.base.response.schema import RESPONSE_TYPE\n",
"\n",
"\n",
"class QueryEngineSelectionEvent(Event):\n",
" \"\"\"Result of selecting the query engine tools.\"\"\"\n",
"\n",
" selected_query_engines: SelectorResult\n",
"\n",
"\n",
"class SynthesizeEvent(Event):\n",
" \"\"\"Event for synthesizing the response from different query engines.\"\"\"\n",
"\n",
" result: List[RESPONSE_TYPE]\n",
" selected_query_engines: SelectorResult"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## The Workflow\n",
"\n",
"`selector:`\n",
"\n",
"1. It takes a StartEvent as input and returns a QueryEngineSelectionEvent.\n",
"2. The `LLMSingleSelector`/ `PydanticSingleSelector`/ `PydanticMultiSelector` will select one/ multiple query engine tools.\n",
"\n",
"`generate_responses:`\n",
"\n",
"This function uses the selected query engines to generate responses and returns SynthesizeEvent.\n",
"\n",
"`synthesize_responses:`\n",
"\n",
"This function combines the generated responses and synthesizes the final response if multiple query engines are selected otherwise returns the single generated response.\n",
"\n",
"\n",
"The steps will use the built-in `StartEvent` and `StopEvent` events.\n",
"\n",
"With our events defined, we can construct our workflow and steps. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.workflow import (\n",
" Context,\n",
" Workflow,\n",
" StartEvent,\n",
" StopEvent,\n",
" step,\n",
")\n",
"\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core.selectors.utils import get_selector_from_llm\n",
"from llama_index.core.base.response.schema import (\n",
" PydanticResponse,\n",
" Response,\n",
" AsyncStreamingResponse,\n",
")\n",
"from llama_index.core.bridge.pydantic import BaseModel\n",
"from llama_index.core.response_synthesizers import TreeSummarize\n",
"from llama_index.core.schema import QueryBundle\n",
"from llama_index.core import Settings\n",
"\n",
"from IPython.display import Markdown, display\n",
"import asyncio\n",
"\n",
"\n",
"class RouterQueryEngineWorkflow(Workflow):\n",
" @step\n",
" async def selector(\n",
" self, ctx: Context, ev: StartEvent\n",
" ) -> QueryEngineSelectionEvent:\n",
" \"\"\"\n",
" Selects a single/ multiple query engines based on the query.\n",
" \"\"\"\n",
"\n",
" await ctx.set(\"query\", ev.get(\"query\"))\n",
" await ctx.set(\"llm\", ev.get(\"llm\"))\n",
" await ctx.set(\"query_engine_tools\", ev.get(\"query_engine_tools\"))\n",
" await ctx.set(\"summarizer\", ev.get(\"summarizer\"))\n",
"\n",
" llm = Settings.llm\n",
" select_multiple_query_engines = ev.get(\"select_multi\")\n",
" query = ev.get(\"query\")\n",
" query_engine_tools = ev.get(\"query_engine_tools\")\n",
"\n",
" selector = get_selector_from_llm(\n",
" llm, is_multi=select_multiple_query_engines\n",
" )\n",
"\n",
" query_engines_metadata = [\n",
" query_engine.metadata for query_engine in query_engine_tools\n",
" ]\n",
"\n",
" selected_query_engines = await selector.aselect(\n",
" query_engines_metadata, query\n",
" )\n",
"\n",
" return QueryEngineSelectionEvent(\n",
" selected_query_engines=selected_query_engines\n",
" )\n",
"\n",
" @step\n",
" async def generate_responses(\n",
" self, ctx: Context, ev: QueryEngineSelectionEvent\n",
" ) -> SynthesizeEvent:\n",
" \"\"\"Generate the responses from the selected query engines.\"\"\"\n",
"\n",
" query = await ctx.get(\"query\", default=None)\n",
" selected_query_engines = ev.selected_query_engines\n",
" query_engine_tools = await ctx.get(\"query_engine_tools\")\n",
"\n",
" query_engines = [engine.query_engine for engine in query_engine_tools]\n",
"\n",
" print(\n",
" f\"number of selected query engines: {len(selected_query_engines.selections)}\"\n",
" )\n",
"\n",
" if len(selected_query_engines.selections) > 1:\n",
" tasks = []\n",
" for selected_query_engine in selected_query_engines.selections:\n",
" print(\n",
" f\"Selected query engine: {selected_query_engine.index}: {selected_query_engine.reason}\"\n",
" )\n",
" query_engine = query_engines[selected_query_engine.index]\n",
" tasks.append(query_engine.aquery(query))\n",
"\n",
" response_generated = await asyncio.gather(*tasks)\n",
"\n",
" else:\n",
" query_engine = query_engines[\n",
" selected_query_engines.selections[0].index\n",
" ]\n",
"\n",
" print(\n",
" f\"Selected query engine: {selected_query_engines.ind}: {selected_query_engines.reason}\"\n",
" )\n",
"\n",
" response_generated = [await query_engine.aquery(query)]\n",
"\n",
" return SynthesizeEvent(\n",
" result=response_generated,\n", | |
166694 | {
"cells": [
{
"cell_type": "markdown",
"id": "8ea05db5-944c-4bad-80a6-54841ccc0d42",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/multi_modal/llava_multi_modal_tesla_10q.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n",
"# Retrieval-Augmented Image Captioning\n",
"\n",
"In this example, we show how to leverage [LLaVa + Replicate](https://replicate.com/yorickvp/llava-13b) for image understanding/captioning and retrieve relevant unstructured text and embedded tables from Tesla 10K file according to the image understanding.\n",
"\n",
"1. LlaVa can provide image understanding based on user prompt.\n",
"2. We use Unstructured to parse out the tables, and use LlamaIndex recursive retrieval to index/retrieve tables and texts.\n",
"3. We can leverage the image understanding from Step 1 to retrieve relevant information from knowledge base generated by Step 2 (which is indexed by LlamaIndex)\n",
"\n",
"Context for LLaVA: Large Language and Vision Assistant\n",
"* [Website](https://llava-vl.github.io/)\n",
"* [Paper](https://arxiv.org/abs/2304.08485)\n",
"* [Github](https://github.com/haotian-liu/LLaVA)\n",
"* LLaVA is now supported in llama.cpp with 4-bit / 5-bit quantization support: [See here.](https://github.com/ggerganov/llama.cpp/pull/3436) [Deprecated]\n",
"* LLaVA 13b is now supported in Replicate: [See here.](https://replicate.com/yorickvp/llava-13b)\n",
"\n",
"For LlamaIndex:\n",
"LlaVa+Replicate enables us to run image understanding locally and combine the multi-modal knowledge with our RAG knowledge base system.\n",
"\n",
"TODO:\n",
"Waiting for [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) supporting LlaVa model in python wrapper.\n",
"So LlamaIndex can leverage `LlamaCPP` class for serving LlaVa model directly/locally."
]
},
{
"cell_type": "markdown",
"id": "e6811a8c",
"metadata": {},
"source": [
"## Using Replicate serving LLaVa model through LlamaIndex\n"
]
},
{
"cell_type": "markdown",
"id": "2dc27373",
"metadata": {},
"source": [
"### Build and Run LLaVa models locally through Llama.cpp (Deprecated)\n",
"\n",
"1. git clone [https://github.com/ggerganov/llama.cpp.git](https://github.com/ggerganov/llama.cpp.git)\n",
"2. `cd llama.cpp`. Checkout llama.cpp repo for more details.\n",
"3. `make`\n",
"4. Download Llava models including `ggml-model-*` and `mmproj-model-*` from [this Hugging Face repo](https://huggingface.co/mys/ggml_llava-v1.5-7b/tree/main). Please select one model based on your own local configuration\n",
"5. `./llava` for checking whether llava is running locally"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c13f2559",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-readers-file\n",
"%pip install llama-index-multi-modal-llms-replicate"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "06cbe141-1780-47df-95e5-91ebde44f778",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"UsageError: Line magic function `%` not found.\n"
]
}
],
"source": [
"%load_ext autoreload\n",
"% autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f72eaf19",
"metadata": {},
"outputs": [],
"source": [
"!pip install unstructured"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f6499c7-138b-4849-9163-5fa2e3f373c7",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING: CPU random generator seem to be failing, disabling hardware random number generation\n",
"WARNING: RDRND generated: 0xffffffff 0xffffffff 0xffffffff 0xffffffff\n"
]
}
],
"source": [
"from unstructured.partition.html import partition_html\n",
"import pandas as pd\n",
"\n",
"pd.set_option(\"display.max_rows\", None)\n",
"pd.set_option(\"display.max_columns\", None)\n",
"pd.set_option(\"display.width\", None)\n",
"pd.set_option(\"display.max_colwidth\", None)"
]
},
{
"cell_type": "markdown",
"id": "1d3b4020-7e57-4392-ba29-52a14debd822",
"metadata": {},
"source": [
"## Perform Data Extraction from Tesla 10K file\n",
"\n",
"\n",
"In these sections we use Unstructured to parse out the table and non-table elements."
]
},
{
"cell_type": "markdown",
"id": "2249e49b-fea3-424a-9d3a-955c968899a6",
"metadata": {},
"source": [
"### Extract Elements\n",
"\n",
"We use Unstructured to extract table and non-table elements from the 10-K filing."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f416d23-42ba-4ce7-8d10-28e309f591c2",
"metadata": {},
"outputs": [],
"source": [
"!wget \"https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1\" -O tesla_2021_10k.htm\n",
"!wget \"https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf\" -O shanghai.jpg\n",
"!wget \"https://docs.google.com/uc?export=download&id=1PDVCf_CzLWXNnNoRV8CFgoJxv6U0sHAO\" -O tesla_supercharger.jpg"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6e8cbbfd-38df-4499-9bb2-36efdeeed42a",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.readers.file import FlatReader\n",
"from pathlib import Path\n",
"\n",
"reader = FlatReader()\n",
"docs_2021 = reader.load_data(Path(\"tesla_2021_10k.htm\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4765017f-cd26-4d40-8154-3f8c1619d46f",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.node_parser import UnstructuredElementNodeParser\n",
"\n",
"node_parser = UnstructuredElementNodeParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5ee2897f",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n", | |
166866 | "cell_type": "code",
"execution_count": null,
"id": "958515ad-af10-4163-89bb-0ceb12ca48d8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The potential risks associated with the use of Llama 2, as mentioned in the context, include the generation of misinformation and the retrieval of information about topics such as bioterrorism or cybercrime. The models have been tuned to avoid these topics and diminish any capabilities they might have offered for those use cases. However, there is a possibility that the safety tuning of the models may go too far, resulting in an overly cautious approach where the model declines certain requests or responds with too many safety details. Users of Llama 2 and Llama 2-Chat need to be cautious and take extra steps in tuning and deployment to ensure responsible use.\n"
]
}
],
"source": [
"response = query_engine.query(query_str)\n",
"print(str(response))"
]
},
{
"cell_type": "markdown",
"id": "25e672a6-ee16-49f7-a166-2a8a9a298936",
"metadata": {},
"source": [
"## Viewing/Customizing Prompts\n",
"\n",
"First, let's take a look at the query engine prompts, and see how we can customize it."
]
},
{
"cell_type": "markdown",
"id": "4441d2d4-b719-4737-8dac-7696b0fafba4",
"metadata": {},
"source": [
"### View Prompts"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02253e5f-b2fc-4afd-bfc1-f4e55d78352b",
"metadata": {},
"outputs": [],
"source": [
"# define prompt viewing function\n",
"def display_prompt_dict(prompts_dict):\n",
" for k, p in prompts_dict.items():\n",
" text_md = f\"**Prompt Key**: {k}<br>\" f\"**Text:** <br>\"\n",
" display(Markdown(text_md))\n",
" print(p.get_template())\n",
" display(Markdown(\"<br><br>\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1d88f692-7017-4ab8-a2ef-6dd8d8b132c5",
"metadata": {},
"outputs": [],
"source": [
"prompts_dict = query_engine.get_prompts()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2463322-db25-4a9e-9560-b89a34706a72",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Prompt Key**: response_synthesizer:text_qa_template<br>**Text:** <br>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Context information is below.\n",
"---------------------\n",
"{context_str}\n",
"---------------------\n",
"Given the context information and not prior knowledge, answer the query.\n",
"Query: {query_str}\n",
"Answer: \n"
]
},
{
"data": {
"text/markdown": [
"<br><br>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"**Prompt Key**: response_synthesizer:refine_template<br>**Text:** <br>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"The original query is as follows: {query_str}\n",
"We have provided an existing answer: {existing_answer}\n",
"We have the opportunity to refine the existing answer (only if needed) with some more context below.\n",
"------------\n",
"{context_msg}\n",
"------------\n",
"Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\n",
"Refined Answer: \n"
]
},
{
"data": {
"text/markdown": [
"<br><br>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_prompt_dict(prompts_dict)"
]
},
{
"cell_type": "markdown",
"id": "ecf5703b-4075-4abe-85d3-83793801a78b",
"metadata": {},
"source": [
"### Customize Prompts\n",
"\n",
"What if we want to do something different than our standard question-answering prompts?\n",
"\n",
"Let's try out the RAG prompt from [LangchainHub](https://smith.langchain.com/hub/rlm/rag-prompt)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f552f06-c72a-4cf4-a131-c694f4908d67",
"metadata": {},
"outputs": [],
"source": [
"# to do this, you need to use the langchain object\n",
"\n",
"from langchain import hub\n",
"\n",
"langchain_prompt = hub.pull(\"rlm/rag-prompt\")"
]
},
{
"cell_type": "markdown",
"id": "912d7e0b-1a6d-4b46-ab2c-8e9079e2532f",
"metadata": {},
"source": [
"One catch is that the template variables in the prompt are different than what's expected by our synthesizer in the query engine:\n",
"- the prompt uses `context` and `question`,\n",
"- we expect `context_str` and `query_str`\n",
"\n",
"This is not a problem! Let's add our template variable mappings to map variables. We use our `LangchainPromptTemplate` to map to LangChain prompts."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ef2bfc3-22c1-41c8-92b9-01914c9bb1d5",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core.prompts import LangchainPromptTemplate\n",
"\n",
"lc_prompt_tmpl = LangchainPromptTemplate(\n",
" template=langchain_prompt,\n",
" template_var_mappings={\"query_str\": \"question\", \"context_str\": \"context\"},\n",
")\n",
"\n",
"query_engine.update_prompts(\n",
" {\"response_synthesizer:text_qa_template\": lc_prompt_tmpl}\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ac909c71-6a63-4923-af0a-406b08ec7731",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Prompt Key**: response_synthesizer:text_qa_template<br>**Text:** <br>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"input_variables=['question', 'context'] messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['question', 'context'], template=\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\\nQuestion: {question} \\nContext: {context} \\nAnswer:\"))]\n"
] | |
166996 | {
"cells": [
{
"cell_type": "markdown",
"id": "23e5dc2d",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/chat_engine/chat_engine_openai.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "4d3e1610-942d-458e-8379-ebb1fe88ac2c",
"metadata": {},
"source": [
"# Chat Engine - OpenAI Agent Mode"
]
},
{
"cell_type": "markdown",
"id": "3a240df6",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "db0f97f4",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bacff25a",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "e8c1c82b",
"metadata": {},
"source": [
"## Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4ad2c64",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2023-11-20 14:52:58-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.108.133, 185.199.109.133, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: ‘data/paul_graham/paul_graham_essay.txt’\n",
"\n",
"data/paul_graham/pa 100%[===================>] 73.28K --.-KB/s in 0.02s \n",
"\n",
"2023-11-20 14:52:58 (2.86 MB/s) - ‘data/paul_graham/paul_graham_essay.txt’ saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "markdown",
"id": "7d3cb595-1bd4-446e-93ba-6e3cfc1d4a29",
"metadata": {},
"source": [
"### Get started in 5 lines of code"
]
},
{
"cell_type": "markdown",
"id": "941f4d3a-b84c-409d-b371-85c33ab7b68f",
"metadata": {},
"source": [
"Load data and build index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a258574-a4d1-42cc-9f1a-bc6f5d4c6a37",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"# Necessary to use the latest OpenAI models that support function calling API\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-0613\")\n",
"data = SimpleDirectoryReader(input_dir=\"../data/paul_graham/\").load_data()\n",
"index = VectorStoreIndex.from_documents(data)"
]
},
{
"cell_type": "markdown",
"id": "9e0dc626-c877-422f-913a-afd3d3c8cdc8",
"metadata": {},
"source": [
"Configure chat engine"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "37717d64-851d-46c2-b64c-2f5efaaa37f9",
"metadata": {},
"outputs": [],
"source": [
"chat_engine = index.as_chat_engine(chat_mode=\"openai\", llm=llm, verbose=True)"
]
},
{
"cell_type": "markdown",
"id": "d5b86747-22ca-4626-9df7-ff123ac57883",
"metadata": {},
"source": [
"Chat with your data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f09367b-375a-47d3-b0b4-0753b52d834d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"STARTING TURN 1\n",
"---------------\n",
"\n",
"Hello! How can I assist you today?\n"
]
}
],
"source": [
"response = chat_engine.chat(\"Hi\")\n",
"print(response)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5b4b6686-21cd-4974-b767-4ebad4cefb36",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"STARTING TURN 1\n",
"---------------\n",
"\n",
"=== Calling Function ===\n",
"Calling function: query_engine_tool with args: {\n",
" \"input\": \"Who did Paul Graham hand over YC to?\"\n",
"}\n",
"Got output: Paul Graham handed over YC to Sam Altman.\n",
"========================\n",
"\n",
"STARTING TURN 2\n",
"---------------\n",
"\n",
"Paul Graham handed over Y Combinator (YC) to Sam Altman.\n"
]
}
],
"source": [
"response = chat_engine.chat(\n",
" \"Use the tool to answer: Who did Paul Graham hand over YC to?\"\n",
")\n",
"print(response)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "774ad72e-8d9d-48c4-b50b-9eac3b21839e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"STARTING TURN 1\n",
"---------------\n",
"\n",
"=== Calling Function ===\n",
"Calling function: query_engine_tool with args: {\n",
" \"input\": \"Who did Paul Graham hand over YC to?\"\n",
"}\n",
"Got output: Paul Graham handed over YC to Sam Altman.\n",
"========================\n",
"\n",
"STARTING TURN 2\n",
"---------------\n",
"\n",
"\n"
]
}
],
"source": [
"response = chat_engine.stream_chat(\n",
" \"Use the tool to answer: Who did Paul Graham hand over YC to?\"\n",
")\n",
"print(response)"
]
},
{
"cell_type": "markdown", | |
167000 | {
"cells": [
{
"cell_type": "markdown",
"id": "d89ae951",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/chat_engine/chat_engine_best.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"id": "f59565a0-62c0-4048-8a7c-e60fba161cd2",
"metadata": {},
"source": [
"# Chat Engine - Best Mode"
]
},
{
"cell_type": "markdown",
"id": "433ea1f0-86e8-4912-8db8-dfe3d6a00b6d",
"metadata": {},
"source": [
"The default chat engine mode is \"best\", which uses the \"openai\" mode if you are using an OpenAI model that supports the latest function calling API, otherwise uses the \"react\" mode"
]
},
{
"cell_type": "markdown",
"id": "3a29935d",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "97eea6c2",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-anthropic\n",
"%pip install llama-index-llms-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6733b9ad",
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "38a1e5ab",
"metadata": {},
"source": [
"## Download Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4950aa4a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--2024-01-27 12:15:55-- https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt\n",
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 2606:50c0:8001::154, 2606:50c0:8002::154, 2606:50c0:8003::154, ...\n",
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|2606:50c0:8001::154|:443... connected.\n",
"HTTP request sent, awaiting response... 200 OK\n",
"Length: 75042 (73K) [text/plain]\n",
"Saving to: ‘data/paul_graham/paul_graham_essay.txt’\n",
"\n",
"data/paul_graham/pa 100%[===================>] 73.28K --.-KB/s in 0.008s \n",
"\n",
"2024-01-27 12:15:55 (9.38 MB/s) - ‘data/paul_graham/paul_graham_essay.txt’ saved [75042/75042]\n",
"\n"
]
}
],
"source": [
"!mkdir -p 'data/paul_graham/'\n",
"!wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'"
]
},
{
"cell_type": "markdown",
"id": "9b036854-2947-4ce9-b662-a17494f659b0",
"metadata": {},
"source": [
"### Get started in 5 lines of code"
]
},
{
"cell_type": "markdown",
"id": "600cf600-c0d4-4f15-9424-0a2b07c34e77",
"metadata": {},
"source": [
"Load data and build index"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e633c998-2d63-41c5-a678-46d7683e324a",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.llms.anthropic import Anthropic\n",
"\n",
"llm = OpenAI(model=\"gpt-4\")\n",
"data = SimpleDirectoryReader(input_dir=\"./data/paul_graham/\").load_data()\n",
"index = VectorStoreIndex.from_documents(data)"
]
},
{
"cell_type": "markdown",
"id": "6bba0968-611f-465b-b98e-73eb8a5bc4e4",
"metadata": {},
"source": [
"Configure chat engine"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "938190e1-bb7c-4dda-895e-f8351026f8d9",
"metadata": {},
"outputs": [],
"source": [
"chat_engine = index.as_chat_engine(chat_mode=\"best\", llm=llm, verbose=True)"
]
},
{
"cell_type": "markdown",
"id": "b13bb12d-b583-4f35-bede-6d95a023a2fd",
"metadata": {},
"source": [
"Chat with your data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "301220a3-0369-4104-bbbb-7bd084b793fc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Added user message to memory: What are the first programs Paul Graham tried writing?\n",
"=== Calling Function ===\n",
"Calling function: query_engine_tool with args: {\n",
" \"input\": \"What are the first programs Paul Graham tried writing?\"\n",
"}\n",
"Got output: The first programs Paul Graham tried writing were on the IBM 1401 that their school district used for what was then called \"data processing.\" The language he used was an early version of Fortran.\n",
"========================\n",
"\n"
]
}
],
"source": [
"response = chat_engine.chat(\n",
" \"What are the first programs Paul Graham tried writing?\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "50051bbf-ef40-4be0-bb74-b42e944a4c38",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The first programs Paul Graham tried writing were on the IBM 1401. He used an early version of Fortran for these initial programs.\n"
]
}
],
"source": [
"print(response)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
"nbformat_minor": 5
} | |
167009 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/query_engine/knowledge_graph_rag_query_engine.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Knowledge Graph RAG Query Engine\n",
"\n",
"\n",
"## Graph RAG\n",
"\n",
"Graph RAG is an Knowledge-enabled RAG approach to retrieve information from Knowledge Graph on given task. Typically, this is to build context based on entities' SubGraph related to the task.\n",
"\n",
"## GraphStore backed RAG vs VectorStore RAG\n",
"\n",
"As we compared how Graph RAG helps in some use cases in [this tutorial](https://gpt-index.readthedocs.io/en/latest/examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.html#id1), it's shown Knowledge Graph as the unique format of information could mitigate several issues caused by the nature of the \"split and embedding\" RAG approach.\n",
"\n",
"## Why Knowledge Graph RAG Query Engine\n",
"\n",
"In Llama Index, there are two scenarios we could apply Graph RAG:\n",
"\n",
"- Build Knowledge Graph from documents with Llama Index, with LLM or even [local models](https://colab.research.google.com/drive/1G6pcR0pXvSkdMQlAK_P-IrYgo-_staxd?usp=sharing), to do this, we should go for `KnowledgeGraphIndex`.\n",
"- Leveraging existing Knowledge Graph, in this case, we should use `KnowledgeGraphRAGQueryEngine`.\n",
"\n",
"> Note, the third query engine that's related to KG in Llama Index is `NL2GraphQuery` or `Text2Cypher`, for either exiting KG or not, it could be done with `KnowledgeGraphQueryEngine`."
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Before we start the `Knowledge Graph RAG QueryEngine` demo, let's first get ready for basic preparation of Llama Index."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-azure-openai\n",
"%pip install llama-index-graph-stores-nebula\n",
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-embeddings-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# For OpenAI\n",
"\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"\n",
"\n",
"# define LLM\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"Settings.llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Azure"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n",
"\n",
"# For Azure OpenAI\n",
"api_key = \"<api-key>\"\n",
"azure_endpoint = \"https://<your-resource-name>.openai.azure.com/\"\n",
"api_version = \"2023-07-01-preview\"\n",
"\n",
"llm = AzureOpenAI(\n",
" model=\"gpt-35-turbo-16k\",\n",
" deployment_name=\"my-custom-llm\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embed_model = AzureOpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"my-custom-embedding\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import Settings\n",
"\n",
"Settings.llm = llm\n",
"Settings.embed_model = embed_model\n",
"Settings.chunk_size = 512"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prepare for NebulaGraph\n",
"\n",
"We take [NebulaGraphStore](https://gpt-index.readthedocs.io/en/stable/examples/index_structs/knowledge_graph/NebulaGraphKGIndexDemo.html) as an example in this demo, thus before next step to perform Graph RAG on existing KG, let's ensure we have a running NebulaGraph with defined data schema.\n",
"\n",
"This step installs the clients of NebulaGraph, and prepare contexts that defines a [NebulaGraph Graph Space](https://docs.nebula-graph.io/3.6.0/1.introduction/2.data-model/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: ipython-ngql in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (0.5)\n",
"Requirement already satisfied: nebula3-python in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (3.4.0)\n",
"Requirement already satisfied: Jinja2 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from ipython-ngql) (3.1.2)\n",
"Requirement already satisfied: pandas in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from ipython-ngql) (2.0.3)\n",
"Requirement already satisfied: httplib2>=0.20.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (0.22.0)\n",
"Requirement already satisfied: six>=1.16.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (1.16.0)\n",
"Requirement already satisfied: pytz>=2021.1 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (2023.3)\n", | |
167062 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/run-llama/llama_index/blob/main/docs/docs/examples/query_engine/knowledge_graph_query_engine.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Knowledge Graph Query Engine\n",
"\n",
"Creating a Knowledge Graph usually involves specialized and complex tasks. However, by utilizing the Llama Index (LLM), the KnowledgeGraphIndex, and the GraphStore, we can facilitate the creation of a relatively effective Knowledge Graph from any data source supported by [Llama Hub](https://llamahub.ai/).\n",
"\n",
"Furthermore, querying a Knowledge Graph often requires domain-specific knowledge related to the storage system, such as Cypher. But, with the assistance of the LLM and the LlamaIndex KnowledgeGraphQueryEngine, this can be accomplished using Natural Language!\n",
"\n",
"In this demonstration, we will guide you through the steps to:\n",
"\n",
"- Extract and Set Up a Knowledge Graph using the Llama Index\n",
"- Query a Knowledge Graph using Cypher\n",
"- Query a Knowledge Graph using Natural Language"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you're opening this Notebook on colab, you will probably need to install LlamaIndex 🦙."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-readers-wikipedia\n",
"%pip install llama-index-llms-azure-openai\n",
"%pip install llama-index-graph-stores-nebula\n",
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-embeddings-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's first get ready for basic preparation of Llama Index."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# For OpenAI\n",
"\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"\n",
"\n",
"# define LLM\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"Settings.llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Azure"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n",
"\n",
"# For Azure OpenAI\n",
"api_key = \"<api-key>\"\n",
"azure_endpoint = \"https://<your-resource-name>.openai.azure.com/\"\n",
"api_version = \"2023-07-01-preview\"\n",
"\n",
"llm = AzureOpenAI(\n",
" model=\"gpt-35-turbo-16k\",\n",
" deployment_name=\"my-custom-llm\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embed_model = AzureOpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"my-custom-embedding\",\n",
" api_key=api_key,\n",
" azure_endpoint=azure_endpoint,\n",
" api_version=api_version,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import Settings\n",
"\n",
"Settings.llm = llm\n",
"Settings.embed_model = embed_model\n",
"Settings.chunk_size = 512"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prepare for NebulaGraph\n",
"\n",
"Before next step to creating the Knowledge Graph, let's ensure we have a running NebulaGraph with defined data schema."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: ipython-ngql in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (0.5)\n",
"Requirement already satisfied: nebula3-python in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (3.4.0)\n",
"Requirement already satisfied: pandas in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from ipython-ngql) (2.0.3)\n",
"Requirement already satisfied: Jinja2 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from ipython-ngql) (3.1.2)\n",
"Requirement already satisfied: pytz>=2021.1 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (2023.3)\n",
"Requirement already satisfied: future>=0.18.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (0.18.3)\n",
"Requirement already satisfied: httplib2>=0.20.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (0.22.0)\n",
"Requirement already satisfied: six>=1.16.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from nebula3-python) (1.16.0)\n",
"Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from httplib2>=0.20.0->nebula3-python) (3.0.9)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from Jinja2->ipython-ngql) (2.1.3)\n",
"Requirement already satisfied: tzdata>=2022.1 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from pandas->ipython-ngql) (2023.3)\n",
"Requirement already satisfied: numpy>=1.20.3 in /Users/loganmarkewich/llama_index/llama-index/lib/python3.9/site-packages (from pandas->ipython-ngql) (1.25.2)\n", | |
167318 | {
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "27bc87b7",
"metadata": {},
"source": [
"# Nebula Graph Store"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bde39e3e",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-embeddings-openai\n",
"%pip install llama-index-graph-stores-nebula\n",
"%pip install llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "032264ce",
"metadata": {},
"outputs": [],
"source": [
"# For OpenAI\n",
"\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"INSERT OPENAI KEY\"\n",
"\n",
"import logging\n",
"import sys\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n",
"\n",
"# define LLM\n",
"# NOTE: at the time of demo, text-davinci-002 did not have rate-limit errors\n",
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"\n",
"Settings.llm = llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fd36e3b",
"metadata": {},
"outputs": [],
"source": [
"# For Azure OpenAI\n",
"import os\n",
"import json\n",
"import openai\n",
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core import (\n",
" VectorStoreIndex,\n",
" SimpleDirectoryReader,\n",
" KnowledgeGraphIndex,\n",
")\n",
"\n",
"from llama_index.core import StorageContext\n",
"from llama_index.graph_stores.nebula import NebulaGraphStore\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"from IPython.display import Markdown, display\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"openai.api_type = \"azure\"\n",
"openai.api_base = \"https://<foo-bar>.openai.azure.com\"\n",
"openai.api_version = \"2022-12-01\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"<your-openai-key>\"\n",
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"llm = AzureOpenAI(\n",
" model=\"<foo-bar-model>\",\n",
" engine=\"<foo-bar-deployment>\",\n",
" temperature=0,\n",
" api_key=openai.api_key,\n",
" api_type=openai.api_type,\n",
" api_base=openai.api_base,\n",
" api_version=openai.api_version,\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embedding_model = OpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"<foo-bar-deployment>\",\n",
" api_key=openai.api_key,\n",
" api_base=openai.api_base,\n",
" api_type=openai.api_type,\n",
" api_version=openai.api_version,\n",
")\n",
"\n",
"Settings.llm = llm\n",
"Settings.chunk_size = chunk_size\n",
"Settings.embed_model = embedding_model"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "be3f7baa-1c0a-430b-981b-83ddca9e71f2",
"metadata": {},
"source": [
"## Using Knowledge Graph with NebulaGraphStore"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "75f1d565-04e8-41bc-9165-166dc89b6b47",
"metadata": {},
"source": [
"#### Building the Knowledge Graph"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d0b2364-4806-4656-81e7-3f6e4b910b5b",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader\n",
"from llama_index.core import StorageContext\n",
"from llama_index.graph_stores.nebula import NebulaGraphStore\n",
"\n",
"\n",
"from llama_index.llms.openai import OpenAI\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1c297fd3-3424-41d8-9d0d-25fe6310ab62",
"metadata": {},
"outputs": [],
"source": [
"documents = SimpleDirectoryReader(\n",
" \"../../../../examples/paul_graham_essay/data\"\n",
").load_data()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "832b4970",
"metadata": {},
"source": [
"## Prepare for NebulaGraph"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7270af8b",
"metadata": {},
"outputs": [],
"source": [
"%pip install nebula3-python\n",
"\n",
"os.environ[\"NEBULA_USER\"] = \"root\"\n",
"os.environ[\n",
" \"NEBULA_PASSWORD\"\n",
"] = \"<password>\" # replace with your password, by default it is \"nebula\"\n",
"os.environ[\n",
" \"NEBULA_ADDRESS\"\n",
"] = \"127.0.0.1:9669\" # assumed we have NebulaGraph 3.5.0 or newer installed locally\n",
"\n",
"# Assume that the graph has already been created\n",
"# Create a NebulaGraph cluster with:\n",
"# Option 0: `curl -fsSL nebula-up.siwei.io/install.sh | bash`\n",
"# Option 1: NebulaGraph Docker Extension https://hub.docker.com/extensions/weygu/nebulagraph-dd-ext\n",
"# and that the graph space is called \"paul_graham_essay\"\n",
"# If not, create it with the following commands from NebulaGraph's console:\n",
"# CREATE SPACE paul_graham_essay(vid_type=FIXED_STRING(256), partition_num=1, replica_factor=1);\n",
"# :sleep 10;\n",
"# USE paul_graham_essay;\n",
"# CREATE TAG entity(name string);\n",
"# CREATE EDGE relationship(relationship string);\n",
"# CREATE TAG INDEX entity_index ON entity(name(256));\n",
"\n",
"space_name = \"paul_graham_essay\"\n",
"edge_types, rel_prop_names = [\"relationship\"], [\n",
" \"relationship\"\n",
"] # default, could be omit if create from an empty kg\n",
"tags = [\"entity\"] # default, could be omit if create from an empty kg"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f0edbc99",
"metadata": {},
"source": [
"## Instantiate GPTNebulaGraph KG Indexes"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "370fd08f-56ff-4c24-b0c4-c93116a6d482",
"metadata": {},
"outputs": [],
"source": [ | |
167327 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# TiDB Graph Store"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-graph-stores-tidb\n",
"%pip install llama-index-embeddings-openai\n",
"%pip install llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# For OpenAI\n",
"\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxxxx\"\n",
"\n",
"import logging\n",
"import sys\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n",
"\n",
"# define LLM\n",
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"Settings.llm = llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# For Azure OpenAI\n",
"import os\n",
"import openai\n",
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"openai.api_type = \"azure\"\n",
"openai.api_base = \"https://<foo-bar>.openai.azure.com\"\n",
"openai.api_version = \"2022-12-01\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"<your-openai-key>\"\n",
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"llm = AzureOpenAI(\n",
" deployment_name=\"<foo-bar-deployment>\",\n",
" temperature=0,\n",
" openai_api_version=openai.api_version,\n",
" model_kwargs={\n",
" \"api_key\": openai.api_key,\n",
" \"api_base\": openai.api_base,\n",
" \"api_type\": openai.api_type,\n",
" \"api_version\": openai.api_version,\n",
" },\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embedding_llm = OpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"<foo-bar-deployment>\",\n",
" api_key=openai.api_key,\n",
" api_base=openai.api_base,\n",
" api_type=openai.api_type,\n",
" api_version=openai.api_version,\n",
")\n",
"\n",
"Settings.llm = llm\n",
"Settings.embed_model = embedding_llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using Knowledge Graph with TiDB"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Prepare a TiDB cluster\n",
"\n",
"- [TiDB Cloud](https://tidb.cloud/) [Recommended], a fully managed TiDB service that frees you from the complexity of database operations.\n",
"- [TiUP](https://docs.pingcap.com/tidb/stable/tiup-overview), use `tiup playground`` to create a local TiDB cluster for testing."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Get TiDB connection string\n",
"\n",
"For example: `mysql+pymysql://user:password@host:4000/dbname`, in TiDBGraphStore we use pymysql as the db driver, so the connection string should be `mysql+pymysql://...`.\n",
"\n",
"If you are using a TiDB Cloud serverless cluster with public endpoint, it requires TLS connection, so the connection string should be like `mysql+pymysql://user:password@host:4000/dbname?ssl_verify_cert=true&ssl_verify_identity=true`.\n",
"\n",
"Replace `user`, `password`, `host`, `dbname` with your own values."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Initialize TiDBGraphStore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.graph_stores.tidb import TiDBGraphStore\n",
"\n",
"graph_store = TiDBGraphStore(\n",
" db_connection_string=\"mysql+pymysql://user:password@host:4000/dbname\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Instantiate TiDB KG Indexes"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import (\n",
" KnowledgeGraphIndex,\n",
" SimpleDirectoryReader,\n",
" StorageContext,\n",
")\n",
"\n",
"documents = SimpleDirectoryReader(\n",
" \"../../../examples/data/paul_graham/\"\n",
").load_data()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"storage_context = StorageContext.from_defaults(graph_store=graph_store)\n",
"\n",
"# NOTE: can take a while!\n",
"index = KnowledgeGraphIndex.from_documents(\n",
" documents=documents,\n",
" storage_context=storage_context,\n",
" max_triplets_per_chunk=2,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Querying the Knowledge Graph"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"WARNING:llama_index.core.indices.knowledge_graph.retrievers:Index was not constructed with embeddings, skipping embedding usage...\n",
"INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
]
}
],
"source": [
"query_engine = index.as_query_engine(\n",
" include_text=False, response_mode=\"tree_summarize\"\n",
")\n",
"response = query_engine.query(\n",
" \"Tell me more about Interleaf\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"<b>Interleaf was a software company that developed a scripting language and was known for its software products. It was inspired by Emacs and faced challenges due to Moore's law. Over time, Interleaf's prominence declined.</b>"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from IPython.display import Markdown, display\n",
"\n",
"display(Markdown(f\"<b>{response}</b>\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python", | |
167469 | {
"cells": [
{
"cell_type": "markdown",
"id": "27bc87b7",
"metadata": {},
"source": [
"# Neo4j Graph Store"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78b60432",
"metadata": {},
"outputs": [],
"source": [
"%pip install llama-index-llms-openai\n",
"%pip install llama-index-graph-stores-neo4j\n",
"%pip install llama-index-embeddings-openai\n",
"%pip install llama-index-llms-azure-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "032264ce",
"metadata": {},
"outputs": [],
"source": [
"# For OpenAI\n",
"\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"API_KEY_HERE\"\n",
"\n",
"import logging\n",
"import sys\n",
"from llama_index.llms.openai import OpenAI\n",
"from llama_index.core import Settings\n",
"\n",
"logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n",
"\n",
"# define LLM\n",
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
"Settings.llm = llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fd36e3b",
"metadata": {},
"outputs": [],
"source": [
"# For Azure OpenAI\n",
"import os\n",
"import json\n",
"import openai\n",
"from llama_index.llms.azure_openai import AzureOpenAI\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core import (\n",
" VectorStoreIndex,\n",
" SimpleDirectoryReader,\n",
" KnowledgeGraphIndex,\n",
")\n",
"\n",
"import logging\n",
"import sys\n",
"\n",
"from IPython.display import Markdown, display\n",
"\n",
"logging.basicConfig(\n",
" stream=sys.stdout, level=logging.INFO\n",
") # logging.DEBUG for more verbose output\n",
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"openai.api_type = \"azure\"\n",
"openai.api_base = \"https://<foo-bar>.openai.azure.com\"\n",
"openai.api_version = \"2022-12-01\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"<your-openai-key>\"\n",
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"llm = AzureOpenAI(\n",
" deployment_name=\"<foo-bar-deployment>\",\n",
" temperature=0,\n",
" openai_api_version=openai.api_version,\n",
" model_kwargs={\n",
" \"api_key\": openai.api_key,\n",
" \"api_base\": openai.api_base,\n",
" \"api_type\": openai.api_type,\n",
" \"api_version\": openai.api_version,\n",
" },\n",
")\n",
"\n",
"# You need to deploy your own embedding model as well as your own chat completion model\n",
"embedding_llm = OpenAIEmbedding(\n",
" model=\"text-embedding-ada-002\",\n",
" deployment_name=\"<foo-bar-deployment>\",\n",
" api_key=openai.api_key,\n",
" api_base=openai.api_base,\n",
" api_type=openai.api_type,\n",
" api_version=openai.api_version,\n",
")\n",
"\n",
"Settings.llm = llm\n",
"Settings.embed_model = embedding_llm\n",
"Settings.chunk_size = 512"
]
},
{
"cell_type": "markdown",
"id": "be3f7baa-1c0a-430b-981b-83ddca9e71f2",
"metadata": {},
"source": [
"## Using Knowledge Graph with Neo4jGraphStore"
]
},
{
"cell_type": "markdown",
"id": "75f1d565-04e8-41bc-9165-166dc89b6b47",
"metadata": {},
"source": [
"#### Building the Knowledge Graph"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d0b2364-4806-4656-81e7-3f6e4b910b5b",
"metadata": {},
"outputs": [],
"source": [
"from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader\n",
"from llama_index.core import StorageContext\n",
"from llama_index.graph_stores.neo4j import Neo4jGraphStore\n",
"\n",
"\n",
"from llama_index.llms.openai import OpenAI\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1c297fd3-3424-41d8-9d0d-25fe6310ab62",
"metadata": {},
"outputs": [],
"source": [
"documents = SimpleDirectoryReader(\n",
" \"../../../../examples/paul_graham_essay/data\"\n",
").load_data()"
]
},
{
"cell_type": "markdown",
"id": "832b4970",
"metadata": {},
"source": [
"## Prepare for Neo4j"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7270af8b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: neo4j in /home/tomaz/anaconda3/envs/snakes/lib/python3.9/site-packages (5.11.0)\n",
"Requirement already satisfied: pytz in /home/tomaz/anaconda3/envs/snakes/lib/python3.9/site-packages (from neo4j) (2023.3)\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install neo4j\n",
"\n",
"username = \"neo4j\"\n",
"password = \"retractor-knot-thermocouples\"\n",
"url = \"bolt://44.211.44.239:7687\"\n",
"database = \"neo4j\""
]
},
{
"cell_type": "markdown",
"id": "f0edbc99",
"metadata": {},
"source": [
"## Instantiate Neo4jGraph KG Indexes"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "370fd08f-56ff-4c24-b0c4-c93116a6d482",
"metadata": {},
"outputs": [],
"source": [
"graph_store = Neo4jGraphStore(\n",
" username=username,\n",
" password=password,\n",
" url=url,\n",
" database=database,\n",
")\n",
"\n",
"storage_context = StorageContext.from_defaults(graph_store=graph_store)\n",
"\n",
"# NOTE: can take a while!\n",
"index = KnowledgeGraphIndex.from_documents(\n",
" documents,\n",
" storage_context=storage_context,\n",
" max_triplets_per_chunk=2,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "c39a0eeb-ef16-4982-8ba8-b37c2c5f4437",
"metadata": {},
"source": [
"#### Querying the Knowledge Graph\n",
"\n",
"First, we can query and send only the triplets to the LLM."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "670300d8-d0a8-4201-bbcd-4a74b199fcdd",
"metadata": {},
"outputs": [
{
"name": "stdout", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.