code stringlengths 161 233k | apis listlengths 1 24 | extract_api stringlengths 162 68.5k |
|---|---|---|
from fastapi import FastAPI
from pydantic import BaseModel
import asyncio
import whisper
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
from llama_index.ingestion import IngestionPipeline
from llama_index.extractors import TitleExtractor, SummaryExtractor
from llama_index.text_sp... | [
"llama_index.vector_stores.AstraDBVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.LangChainLLM",
"llama_index.ingestion.IngestionPipeline",
"llama_index.set_global_service_context",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.embedd... | [((2944, 2953), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2951, 2953), False, 'from fastapi import FastAPI\n'), ((2058, 2120), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (2086, 2120), Fals... |
from llama_index.core.postprocessor import KeywordNodePostprocessor
from llama_index.core.schema import TextNode, NodeWithScore
nodes = [
TextNode(
text="Entry no: 1, <SECRET> - Attack at Dawn"
),
TextNode(
text="Entry no: 2, <RESTRICTED> - Go to point Bravo"
),
TextNode(
te... | [
"llama_index.core.schema.NodeWithScore",
"llama_index.core.schema.TextNode",
"llama_index.core.postprocessor.KeywordNodePostprocessor"
] | [((452, 519), 'llama_index.core.postprocessor.KeywordNodePostprocessor', 'KeywordNodePostprocessor', ([], {'exclude_keywords': "['SECRET', 'RESTRICTED']"}), "(exclude_keywords=['SECRET', 'RESTRICTED'])\n", (476, 519), False, 'from llama_index.core.postprocessor import KeywordNodePostprocessor\n'), ((143, 198), 'llama_i... |
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.llms import Ollama
from llama_index.vector_stores import WeaviateVectorStore
import weaviate
import box
import yaml
def loa... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.llms.Ollama"
] | [((568, 658), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': 'chunk_size', 'llm': 'llm', 'embed_model': 'embed_model'}), '(chunk_size=chunk_size, llm=llm, embed_model=\n embed_model)\n', (596, 658), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'),... |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import KeywordExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_fro... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.extractors.KeywordExtractor",
"llama_index.core.node_parser.SentenceSplitter"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), ... |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import SummaryExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_fro... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.extractors.SummaryExtractor"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), ... |
import argparse
from typing import Optional, Any
from mlx_lm import load, generate
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms import CustomLLM, CompletionResponse, CompletionResponseGen, L... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.CompletionResponse",
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.LLMMetadata",
"llama_index.core.SimpleDirectoryReader"
] | [((1291, 1316), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1314, 1316), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1715, 1740), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([],... |
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
print(f"Metadata: {document[0].metadata}")
print(f"Text: {document[0].text}")
| [
"llama_index.readers.file.FlatReader"
] | [((83, 95), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (93, 95), False, 'from llama_index.readers.file import FlatReader\n'), ((124, 158), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (128, 158), False, 'from pathlib import Path\n')] |
import logging
import os
import sys
from llama_index.core import (
StorageContext,
SummaryIndex,
load_index_from_storage,
)
from llama_index.readers.github import GithubRepositoryReader
from uglychain.llm.llama_index import LlamaIndexLLM
from uglychain import Model
from uglygpt.utils.config import config
i... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.SummaryIndex.from_documents",
"llama_index.readers.github.GithubRepositoryReader"
] | [((340, 360), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (358, 360), False, 'import nest_asyncio\n'), ((361, 419), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (380, 419), False, 'import logging\n')... |
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader('./llama_data').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
print(query_engine.query("Vision Pro はARデバイスですか?"))
# インデックスの保存
index.storage_context.persist() | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((138, 183), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (172, 183), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((80, 117), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./llama_... |
from llama_index import SimpleDirectoryReader
import logging
import sys
from pathlib import Path
from llama_index import download_loader
PandasExcelReader = download_loader("PandasExcelReader")
loader = PandasExcelReader(pandas_config={"header": 0})
documents = loader.load_data(file=Path('src/data/train_data... | [
"llama_index.download_loader"
] | [((164, 200), 'llama_index.download_loader', 'download_loader', (['"""PandasExcelReader"""'], {}), "('PandasExcelReader')\n", (179, 200), False, 'from llama_index import download_loader\n'), ((295, 330), 'pathlib.Path', 'Path', (['"""src/data/train_dataset.xlsx"""'], {}), "('src/data/train_dataset.xlsx')\n", (299, 330)... |
"""
# The core idea of a Multi-Document Agent
The core idea of a Multi-Document Agent is to simulate a knowledgeable assistant that can draw upon information from
multiple separate documents to provide informed, accurate answers to user queries. Unlike a traditional, single-document
agent that can only access and unde... | [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults",
"llama_index.agent.FnRetrieverOpenAIAgent.from_retriever",
"llama_index.objects.ObjectIndex.from_objects",
"llama_index.objects.SimpleToolNodeMapping.f... | [((3825, 3838), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (3836, 3838), False, 'from dotenv import load_dotenv\n'), ((15763, 15789), 'fastapi.FastAPI', 'FastAPI', ([], {'lifespan': 'lifespan'}), '(lifespan=lifespan)\n', (15770, 15789), False, 'from fastapi import FastAPI, Response\n'), ((5877, 5918), 'os.p... |
from typing import Dict, List
from pathlib import Path
from llama_index import download_loader
from llama_index import Document
# Add your OpenAI API Key here before running the script.
import os
if "OPENAI_API_KEY" not in os.environ:
raise RuntimeError("Please add the OPENAI_API_KEY environment variable to run t... | [
"llama_index.GPTVectorStoreIndex",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.download_loader"
] | [((497, 534), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (512, 534), False, 'from llama_index import download_loader\n'), ((3192, 3221), 'ray.data.from_items', 'ray.data.from_items', (['all_docs'], {}), '(all_docs)\n', (3211, 3221), False, 'import r... |
import os
import sys
import openai
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, load_index_from_storage
from llama_index.vector_stores import ChromaVectorStore, FaissVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.embeddings import Hugging... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.vector_stores.ChromaVectorStore"
] | [((367, 378), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (376, 378), False, 'import os\n'), ((962, 1005), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'vector_path'}), '(path=vector_path)\n', (987, 1005), False, 'import chromadb\n'), ((1091, 1145), 'llama_index.vector_stores.ChromaVectorStor... |
import os
from dotenv import load_dotenv
load_dotenv()
openai_key = os.getenv("OPENAI_API_KEY")
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
ServiceContext,
set_global_service_context
)
from llama_index.llms import OpenAI
from llama_index.prompts import PromptTemplate
llm = Open... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context"
] | [((42, 55), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (53, 55), False, 'from dotenv import load_dotenv\n'), ((69, 96), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (78, 96), False, 'import os\n'), ((316, 375), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'system_prompt': ... |
import os
from llama_index import StorageContext, load_index_from_storage
from dotenv import load_dotenv
from llama_index import VectorStoreIndex, SimpleDirectoryReader
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
LIBRARY_DIRECTORY = os.getenv('LIBRARY_DIRECTORY')
documents = SimpleDire... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((170, 183), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (181, 183), False, 'from dotenv import load_dotenv\n'), ((217, 244), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (226, 244), False, 'import os\n'), ((265, 295), 'os.getenv', 'os.getenv', (['"""LIBRARY_DIRECTORY"""... |
import os
import json
from llmsherpa.readers import LayoutPDFReader
from llmsherpa.readers.layout_reader import LayoutReader
from llama_index.core import Document
from pinecone import Pinecone, ServerlessSpec
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
from ... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.vector_stores.pinecone.PineconeVectorStore",
"llama_index.core.StorageContext.from_defaults"
] | [((808, 848), 'spacy_llm.logger.setLevel', 'spacy_llm.logger.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (833, 848), False, 'import spacy_llm\n'), ((855, 877), 'spacy_llm.util.assemble', 'assemble', (['"""config.cfg"""'], {}), "('config.cfg')\n", (863, 877), False, 'from spacy_llm.util import assemble\n'),... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import os.path
import re
import sys
import gin
import shutil
import logging
import tempfile
import requests
import subprocess
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index imp... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults",
"llama_index.SimpleKeywordTableIndex"
] | [((535, 594), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (554, 594), False, 'import logging\n'), ((626, 666), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\... |
from llama_index.core.node_parser import HierarchicalNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
hierarchical_parser = HierarchicalNodeParser.from_defaults(
chunk_sizes=[128, 64, 32],
... | [
"llama_index.readers.file.FlatReader",
"llama_index.core.node_parser.HierarchicalNodeParser.from_defaults"
] | [((147, 159), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (157, 159), False, 'from llama_index.readers.file import FlatReader\n'), ((247, 332), 'llama_index.core.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': '[128, 64, 32]', 'c... |
from typing_extensions import override
import os
from llama_index.indices.query.query_transform.base import BaseQueryTransform
from llama_index.llms import ChatMessage, MessageRole
from llama_index.llms.base import BaseLLM
from llama_index.llms.llm import MessagesToPromptType
from llama_index.postprocessor.types impor... | [
"llama_index.llms.ChatMessage"
] | [((3960, 4046), 're.compile', 're.compile', (['"""^.*?Expert[^:]*:[\\\\s_*]*|\\\\n\\\\n"""'], {'flags': '(re.MULTILINE | re.IGNORECASE)'}), "('^.*?Expert[^:]*:[\\\\s_*]*|\\\\n\\\\n', flags=re.MULTILINE | re.\n IGNORECASE)\n", (3970, 4046), False, 'import re\n'), ((1480, 1499), 'pydantic.Field', 'Field', ([], {'exclu... |
import logging
from typing import Any, Literal
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
Embedding,
)
from llama_index.embeddings.multi_modal_base import MultiModalEmbedding
from llama_index.schema import ImageType
logger = ... | [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((320, 347), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (337, 347), False, 'import logging\n'), ((1086, 1131), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_EMBED_BATCH_SIZE', 'gt': '(0)'}), '(default=DEFAULT_EMBED_BATCH_SIZE, gt=0)\n', (1091, 1131), False, ... |
import json
from pydantic import create_model
from .utility import CreateOutputModel
"""
The MIT License
Copyright (c) Jerry Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restricti... | [
"llama_index.core.response_synthesizers.TreeSummarize"
] | [((15271, 15295), 'json.loads', 'json.loads', (['output_model'], {}), '(output_model)\n', (15281, 15295), False, 'import json\n'), ((15402, 15468), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'verbose': '(True)', 'output_cls': 'OutputModel', 'llm': 'llm_model'}), '(verbose=True, outpu... |
import logging
import os
import sys
from shutil import rmtree
import openai
from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex
from llama_index.llms.openai import OpenAI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.std... | [
"llama_index.SimpleDirectoryReader",
"llama_index.TreeIndex.from_documents",
"llama_index.llms.openai.OpenAI"
] | [((194, 252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (213, 252), False, 'import logging\n'), ((284, 324), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n'... |
from llama_index.core.tools import FunctionTool
import os
note_file = os.path.join('data', 'notes.txt')
def save_notes(note):
if not os.path.exists(note_file):
open(note_file, 'w')
with open(note_file, 'a') as f:
f.writelines([note + '\n'])
return "Note saved"
note_engine = FunctionTool... | [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((70, 103), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (82, 103), False, 'import os\n'), ((308, 432), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_notes', 'name': '"""note_saver"""', 'description': '"""Save... |
import boto3
import os
import json
# from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
from langchain_community.llms import Bedrock
from llama_index.node_parser import SimpleNodeParser
from llama_index.embeddings import LangchainEmbedding
from langchain_community.embeddings import ... | [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.OpenAIEmbedding",
"llama_index.VectorStoreIndex",
"llama_index.postprocessor.cohere_rerank.CohereRerank",
"llama_index.Se... | [((974, 1051), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""rag.log"""', 'encoding': '"""utf-8"""', 'level': 'logging.INFO'}), "(filename='rag.log', encoding='utf-8', level=logging.INFO)\n", (993, 1051), False, 'import logging\n'), ((1113, 1128), 'boto3.Session', 'boto3.Session', ([], {}), '()\n'... |
from llama_index.tools import FunctionTool
import os
note_file = os.path.join("data", "notes.txt")
def save_note(note):
if not os.path.exists(note_file):
open(note_file, "w")
with open(note_file, "a") as f:
f.writelines([note + "\n"])
return "note saved"
note_engine = FunctionTool.fro... | [
"llama_index.tools.FunctionTool.from_defaults"
] | [((66, 99), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (78, 99), False, 'import os\n'), ((304, 443), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_note', 'name': '"""note_saver"""', 'description': '"""this tool ca... |
import asyncio
import json
from typing import Any, Tuple, List
from langchain.base_language import BaseLanguageModel
from langchain.tools import DuckDuckGoSearchResults, BaseTool
from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext
from llama_index.response_synthesizers import ... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.response_synthesizers.TreeSummarize",
"llama_index.download_loader",
"llama_index.LLMPredictor"
] | [((916, 954), 'llama_index.download_loader', 'download_loader', (['"""SimpleWebPageReader"""'], {}), "('SimpleWebPageReader')\n", (931, 954), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((1600, 1622), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'... |
"""
This script demonstrates how to use the llama_index library to create and query a vector store index.
It loads documents from a directory, creates an index, and allows querying the index.
usage: python hello_persist.py "What is the author's name and job now?"
"""
import os
import sys
import argparse
import loggin... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1804, 1870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector store index."""'}), "(description='Query a vector store index.')\n", (1827, 1870), False, 'import argparse\n'), ((628, 641), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (639, 641), False, 'from doten... |
import streamlit as st
from llama_hub.youtube_transcript import YoutubeTranscriptReader
from llama_hub.youtube_transcript import is_youtube_video
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.prompts import ChatMessage, MessageRole
from llama_index.... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults"
] | [((1466, 1499), 'streamlit.session_state.get', 'st.session_state.get', (['"""video_url"""'], {}), "('video_url')\n", (1486, 1499), True, 'import streamlit as st\n'), ((1684, 1749), 'streamlit.header', 'st.header', (['f"""This page has run {st.session_state.counter} times."""'], {}), "(f'This page has run {st.session_st... |
import os
from typing import Any, Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
DEFAULT_TOKEN_JSON_PATH = 'token.json'
DEFAULT_SERVICE_ACCOUNT_JSON_PATH = 'service_account.json'
DEFAULT_CREDENTIALS_JSON_PATH = 'credentials.json'
HEADING_STYLE_TEMPL... | [
"llama_index.core.schema.Document"
] | [((3044, 3098), 'googleapiclient.discovery.build', 'discovery.build', (['"""docs"""', '"""v1"""'], {'credentials': 'credentials'}), "('docs', 'v1', credentials=credentials)\n", (3059, 3098), True, 'import googleapiclient.discovery as discovery\n'), ((4002, 4038), 'os.path.exists', 'os.path.exists', (['self.token_json_p... |
import utils
import os
import openai
import sys
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("API_KEY")
openai.api_key
os.environ['OPENAI_API_KEY'] = api_key
#
# examples
# https://github.com/kevintsai/Building-and-Evaluating-Advanced-RAG-Applications
#
# SimpleDirectoryReader is a class that rea... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((82, 95), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (93, 95), False, 'from dotenv import load_dotenv\n'), ((106, 126), 'os.getenv', 'os.getenv', (['"""API_KEY"""'], {}), "('API_KEY')\n", (115, 126), False, 'import os\n'), ((1774, 1825), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-p... |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen... | [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.base.llms.typ... | [((2866, 2929), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2871, 2929), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validato... |
import streamlit as st
import pandas as pd
from PIL import Image
from utils import *
import os
os.environ['OPENAI_API_KEY'] = st.secrets['openai_key']
#from llama_index.llms.openai import OpenAI
#from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core import Document
from llama_index.core im... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.openai.OpenAI",
"llama_index.core.Document",
"llama_index.core.PromptTemplate"
] | [((549, 587), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model': '"""gpt-4"""'}), "(temperature=0.5, model='gpt-4')\n", (555, 587), False, 'from llama_index.llms.openai import OpenAI\n'), ((801, 1019), 'google.oauth2.service_account.Credentials.from_service_account_info', 'service_accoun... |
# Import the necessary libraries
import random
import time
from llama_index.llms import OpenAI
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import... | [
"llama_index.indices.vector_store.retrievers.VectorIndexRetriever",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.llms.OpenAI",
"llama_index.indices.prompt_helper.PromptHelper",
"llama_index.set_global_service_context",
"llama_index.node_parser.SentenceSplitter"
] | [((855, 895), 'streamlit.title', 'st.title', (['"""🦜🔗 Tourism Assistant Chatbot"""'], {}), "('🦜🔗 Tourism Assistant Chatbot')\n", (863, 895), True, 'import streamlit as st\n'), ((5721, 5781), 'llama_index.set_global_service_context', 'set_global_service_context', (['st.session_state.service_context'], {}), '(st.sess... |
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.we... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document",
"llama_index.tools.brave_search.BraveSearchToolSpec"
] | [((496, 569), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (501, 569), False, 'from urllib3.util.retry import Retry\n'), ((675, 733), 'logging.basicConfig', 'l... |
import qdrant_client
from llama_index.llms import Ollama
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore... | [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.Ollama"
] | [((233, 281), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (259, 281), False, 'import qdrant_client\n'), ((303, 361), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': ... |
import os
from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage
from llama_index.readers.file import PDFReader
# def get_index(data, index_name):
# index = None
# if not os.path.exists(index_name):
# print('Building index', index_name)
# index = VectorStoreIn... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.readers.file.PDFReader"
] | [((1404, 1440), 'os.path.join', 'os.path.join', (['"""data"""', '"""Malaysia.pdf"""'], {}), "('data', 'Malaysia.pdf')\n", (1416, 1440), False, 'import os\n'), ((952, 963), 'llama_index.readers.file.PDFReader', 'PDFReader', ([], {}), '()\n', (961, 963), False, 'from llama_index.readers.file import PDFReader\n'), ((1030,... |
from llama_index.retrievers import BaseRetriever
from llama_index import QueryBundle
from llama_index.schema import NodeWithScore
from llama_index.vector_stores import VectorStoreQuery
from typing import List, Sequence, Any
from llama_index.tools import BaseTool, adapt_to_async_tool
from llama_index import Document, Ve... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.tools.adapt_to_async_tool",
"llama_index.Document"
] | [((1143, 1228), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self._index', 'similarity_top_k': 'self._similarity_top_k'}), '(index=self._index, similarity_top_k=self._similarity_top_k\n )\n', (1163, 1228), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ... |
from typing import List
from fastapi.responses import StreamingResponse
from app.utils.json import json_to_model
from app.utils.index import get_index
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index import VectorStoreIndex
from llama_index.llms.base import MessageRole, ChatMess... | [
"llama_index.llms.base.ChatMessage"
] | [((374, 385), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (383, 385), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((798, 816), 'fastapi.Depends', 'Depends', (['get_index'], {}), '(get_index)\n', (805, 816), False, 'from fastapi import APIRouter, Depends, HTTPException, Re... |
import os
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
BOT_NAME = os.environ["BOT_NAME"]
def construct_index(directory_data, directory_index, force_reload=False):
# check if storage already exists
if not os.path.exists(directory... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((541, 583), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (572, 583), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((871, 928), 'llama_index.StorageContext.from_defaults',... |
from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from dotenv import load_dotenv
import os
load_dotenv()
vector_index = None
history = []
... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults"
] | [((271, 284), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (282, 284), False, 'from dotenv import load_dotenv\n'), ((379, 425), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (385, 425), False, 'from llama_inde... |
import streamlit as st
import openai
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import FaissVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index import load_index_from_storage
from llama_index.storage.storage_context import StorageCo... | [
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.query_engine.CitationQueryEngine.from_args",
"llama_index.vector_stores.FaissVectorStore.from_persist_dir",
"llama_index.load_index_from_storage"
] | [((983, 1050), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Precedents Database"""'}), "(layout='wide', page_title='Precedents Database')\n", (1001, 1050), True, 'import streamlit as st\n'), ((1056, 1084), 'streamlit.title', 'st.title', (['"""Query Precedents"""'], ... |
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
import os
import tempfile
from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index.embeddings.langchain import LangchainEmbedding
from... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((55, 68), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (66, 68), False, 'from dotenv import load_dotenv\n'), ((860, 890), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (888, 890), False, 'from llama_index import SimpleDirectoryReader, StorageContext, LLMPredic... |
from dotenv import load_dotenv
import os
import streamlit as st
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from prompts import ... | [
"llama_index.core.tools.ToolMetadata",
"llama_index.llms.openai.OpenAI",
"llama_index.core.query_engine.PandasQueryEngine",
"llama_index.core.agent.ReActAgent.from_tools"
] | [((468, 481), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (479, 481), False, 'from dotenv import load_dotenv\n'), ((501, 539), 'os.path.join', 'os.path.join', (['"""data"""', '"""population.csv"""'], {}), "('data', 'population.csv')\n", (513, 539), False, 'import os\n'), ((556, 584), 'pandas.read_csv', 'pd.r... |
# general imports
from constants import *
# streamlit imports
import streamlit as st
from utils import *
from streamlit_lottie import st_lottie
# llama index imports
import openai
from llama_index import (
VectorStoreIndex,
download_loader,
ServiceContext,
set_global_service_context,
)
from llama_inde... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context"
] | [((1017, 1080), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'system_prompt': 'system_prompt'}), "(model='gpt-4-1106-preview', system_prompt=system_prompt)\n", (1023, 1080), False, 'from llama_index.llms import OpenAI\n'), ((1187, 1248), 'llama_index.ServiceContext.from_defaults', 'Se... |
import pathlib
import tempfile
from io import BytesIO
import openai
import streamlit as st
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.chat_engine import ContextChatEngine
from llama_index.llms.openai import OpenAI
from sidebar import sidebar_params
st.set_page_config(p... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((300, 386), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with Documents"""', 'layout': '"""wide"""', 'page_icon': '"""🔥"""'}), "(page_title='Chat with Documents', layout='wide',\n page_icon='🔥')\n", (318, 386), True, 'import streamlit as st\n'), ((383, 414), 'streamlit.title', ... |
import logging
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
# from IPython.display import Markdown, display
from llama_index.node_parser import SentenceSplitter
from ... | [
"llama_index.SimpleDirectoryReader",
"llama_index.node_parser.SentenceSplitter"
] | [((373, 412), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (392, 412), False, 'import logging\n'), ((422, 449), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (439, 449), False, 'import logging\n'), ((740, 790), 'llama_index.Simp... |
from dotenv import load_dotenv
import os
from typing import List
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.settings import Settings
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core import VectorStoreIndex
... | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.embeddings.resolve_embed_model",
"llama_index.core.response.notebook_utils.display_source_node"
] | [((1816, 1829), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1827, 1829), False, 'from dotenv import load_dotenv\n'), ((1862, 1889), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (1868, 1889), False, 'from llama_index.llms.openai import OpenAI\n'), (... |
from fastapi import FastAPI, File, UploadFile, HTTPException
import openai
from dotenv import load_dotenv
import os
import json
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_i... | [
"llama_index.vector_stores.weaviate.WeaviateVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((402, 415), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (413, 415), False, 'from dotenv import load_dotenv\n'), ((423, 432), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (430, 432), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n'), ((444, 476), 'os.environ.get', 'os.environ.get',... |
from init import *
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.node_parser import SimpleNodeParser
from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import download_loader
class Index:
def __init__(self, dir="data"):
... | [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.VectorStoreIndex"
] | [((653, 727), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1000)'}), '(llm_predictor=llm_predictor, chunk_size=1000)\n', (681, 727), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((749, 810)... |
def get_agent(list_filters,openai_key,pinecone_key):
import logging
import sys
import os
import pandas as pd
import pinecone
import openai
from llama_index import VectorStoreIndex
from llama_index.vector_stores import PineconeVectorStore
from llama_index.query_engine import Retrieve... | [
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.llms.OpenAI",
"llama_index.tools.ToolMetadata",
"llama_index.llms.ChatMessage",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.query... | [((721, 779), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (740, 779), False, 'import logging\n'), ((1006, 1063), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': '"""gcp-starter"""'}), "(a... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script allows you to ask questions to the Alice in Wonderland book.
It uses the GPT-3 model to create a vector index of the book, and then
allows you to ask questions to the index.
'''
import os
import yaml
import openai
from llama_index import (
GPTVectorSto... | [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.download_loader",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.f... | [((862, 885), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (880, 885), False, 'import os\n'), ((1376, 1404), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1391, 1404), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, Sim... |
from typing import Protocol
import html2text
from bs4 import BeautifulSoup
from llama_index import Document
from playwright.sync_api import sync_playwright
class ISitemapParser(Protocol):
def get_all_urls(self, sitemap_url: str) -> list[str]:
...
class IWebScraper(Protocol):
def scrape(self, urls: l... | [
"llama_index.Document"
] | [((1005, 1022), 'playwright.sync_api.sync_playwright', 'sync_playwright', ([], {}), '()\n', (1020, 1022), False, 'from playwright.sync_api import sync_playwright\n'), ((1345, 1387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_content', '"""html.parser"""'], {}), "(html_content, 'html.parser')\n", (1358, 1387), False,... |
import os
from dotenv import load_dotenv
from IPython.display import Markdown, display
from llama_index.legacy import VectorStoreIndex, ServiceContext
from llama_index.legacy.vector_stores import ChromaVectorStore
from llama_index.legacy.storage.storage_context import StorageContext
from llama_index.legacy.embeddings ... | [
"llama_index.legacy.embeddings.HuggingFaceEmbedding",
"llama_index.legacy.VectorStoreIndex.from_documents",
"llama_index.legacy.storage.storage_context.StorageContext.from_defaults",
"llama_index.legacy.vector_stores.ChromaVectorStore",
"llama_index.legacy.node_parser.SimpleNodeParser",
"llama_index.legac... | [((846, 905), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (865, 905), False, 'import logging\n'), ((1029, 1042), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1040, 1042), False, 'from dotenv import load_... |
import weaviate
from llama_index import StorageContext, SimpleDirectoryReader, ServiceContext, VectorStoreIndex
from llama_index.vector_stores import WeaviateVectorStore
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import box
import yaml
impor... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.WeaviateVectorStore"
] | [((331, 393), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (354, 393), False, 'import warnings\n'), ((3900, 3963), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model'... |
# Just runs .complete to make sure the LLM is listening
from llama_index.llms import Ollama
from pathlib import Path
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llms import Ollama
from llama_index.storage.storage_context import Storage... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((406, 435), 'llama_index.download_loader', 'download_loader', (['"""JSONReader"""'], {}), "('JSONReader')\n", (421, 435), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((539, 558), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': 'model'}), '(model=model)\n', (545, 558), Fa... |
from fastapi import FastAPI
from llama_index import ServiceContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings import GradientEmbedding
from llama_index.llms import GradientBaseModelLLM
from llama_index.vector_stores impo... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.GradientBaseModelLLM",
"llama_index.set_global_service_context",
"llama_index.embeddings.GradientEmbedding"
] | [((439, 448), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (446, 448), False, 'from fastapi import FastAPI\n'), ((1810, 1880), 'llama_index.llms.GradientBaseModelLLM', 'GradientBaseModelLLM', ([], {'base_model_slug': '"""llama2-7b-chat"""', 'max_tokens': '(400)'}), "(base_model_slug='llama2-7b-chat', max_tokens=400)... |
from typing import Optional
from llama_index.storage.kvstore.mongodb_kvstore import MongoDBKVStore
from approaches.index.store.base.cosmos_kv_doc_store import CosmosKVDocumentStore
class CosmosDocumentStore(CosmosKVDocumentStore):
"""Mongo Document (Node) store.
A MongoDB store for Document and Node objects.
... | [
"llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_host_and_port",
"llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_uri"
] | [((1156, 1193), 'llama_index.storage.kvstore.mongodb_kvstore.MongoDBKVStore.from_uri', 'MongoDBKVStore.from_uri', (['uri', 'db_name'], {}), '(uri, db_name)\n', (1179, 1193), False, 'from llama_index.storage.kvstore.mongodb_kvstore import MongoDBKVStore\n'), ((1667, 1721), 'llama_index.storage.kvstore.mongodb_kvstore.Mo... |
import os
import openai
from llama_index.indices.query.schema import QueryBundle
from llama_index.retrievers import VectorIndexRetriever
from chatgpt_long_term_memory.llama_index_helpers.config import \
RetrieversConfig
from chatgpt_long_term_memory.openai_engine.config import TokenCounterConfig
from chatgpt_long... | [
"llama_index.indices.query.schema.QueryBundle",
"llama_index.retrievers.VectorIndexRetriever"
] | [((388, 419), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (397, 419), False, 'import os\n'), ((3648, 3826), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'messages': 'messages', 'max_tokens': 'self.max_tokens... |
from pathlib import Path
from llama_hub.file.unstructured import UnstructuredReader
from pathlib import Path
from llama_index import download_loader
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from dotenv import load_dotenv
import os
from llama_index.node_parser import SimpleNodeParser
import pineco... | [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((796, 809), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (807, 809), False, 'from dotenv import load_dotenv\n'), ((827, 847), 'os.getenv', 'os.getenv', (['"""api_key"""'], {}), "('api_key')\n", (836, 847), False, 'import os\n'), ((926, 955), 'os.getenv', 'os.getenv', (['"""pinecone_api_key"""'], {}), "('pin... |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/l... | [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), ... |
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.indices.service_context import Service... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.indices.service_context.ServiceContext.from_defaults",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.vector_stores.chro... | [((403, 420), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (418, 420), False, 'import chromadb\n'), ((555, 611), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""'}), "(model_name='BAAI/bge-base-en-v1.5')\n", (575, 611), False,... |
from llama_index.llms import OpenAI
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import os
documents = SimpleDirectoryReader("./competition").load_data()
os.environ['OPENAI_API_KEY'] = 'sk-QnjWfyoAPGLysSCIfjozT3BlbkFJ4A0TyC0ZzaV... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((346, 403), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-large-en-v1.5"""'}), "(model_name='BAAI/bge-large-en-v1.5')\n", (366, 403), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((411, 457), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'... |
import os
import openai
import tiktoken
import logging
import sys
from dotenv import load_dotenv
from threading import Lock
from llama_index import (SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
StorageContext,
... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.selectors.pydantic_selectors.PydanticSingleSelector.from_defaults",
"llama_index.download_loader",
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.indices.postprocessor.SentenceTra... | [((991, 1049), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1010, 1049), False, 'import logging\n'), ((1162, 1175), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1173, 1175), False, 'from dotenv import load... |
from dotenv import load_dotenv
import os
load_dotenv()
import pinecone
from llama_index import (
download_loader,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
VectorStoreIndex
)
from llama_index.llms import OpenAI
from llama_index.vector_stores import PineconeVectorStore
from llama_i... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.postprocessor.SentenceEmbeddingOptimizer",
"llama_index.callbacks.CallbackManager"
] | [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((518, 560), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (535, 560), False, 'from llama_index.callbacks impo... |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((134, 176), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (165, 176), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((82, 113), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./... |
from typing import List
from llama_index import Document, TwitterTweetReader
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
class TwitterScraper(SocialScraper):
def scrape(self) -> List[Document]:
TwitterTweetReader() | [
"llama_index.TwitterTweetReader"
] | [((237, 257), 'llama_index.TwitterTweetReader', 'TwitterTweetReader', ([], {}), '()\n', (255, 257), False, 'from llama_index import Document, TwitterTweetReader\n')] |
import os
from typing import List
import googleapiclient
from dotenv import load_dotenv
from llama_index import Document
from progress.bar import IncrementalBar
from youtube_transcript_api import YouTubeTranscriptApi
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
load_dotenv()
YOUTUBE_API_SER... | [
"llama_index.Document"
] | [((290, 303), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (301, 303), False, 'from dotenv import load_dotenv\n'), ((644, 689), 'youtube_transcript_api.YouTubeTranscriptApi.get_transcript', 'YouTubeTranscriptApi.get_transcript', (['video_id'], {}), '(video_id)\n', (679, 689), False, 'from youtube_transcript_a... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import sys
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# read the document of data dir
do... | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((153, 211), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (172, 211), False, 'import logging\n'), ((457, 488), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (477, 4... |
from llama_index.readers import SimpleWebPageReader #as of version 0.9.13
from llama_index.readers import WikipediaReader
cities = [
"Los Angeles", "Houston", "Honolulu", "Tucson", "Mexico City",
"Cincinatti", "Chicago"
]
wiki_docs = []
for city in cities:
try:
doc = WikipediaReader().load_data(... | [
"llama_index.readers.WikipediaReader"
] | [((292, 309), 'llama_index.readers.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (307, 309), False, 'from llama_index.readers import WikipediaReader\n')] |
import dotenv
import os
from llama_index.readers.github import GithubRepositoryReader, GithubClient
from llama_index.core import (VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import ... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.llms.ollama.Ollama",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.readers.github.Git... | [((415, 435), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (433, 435), False, 'import dotenv\n'), ((886, 912), 'llama_index.readers.github.GithubClient', 'GithubClient', (['github_token'], {}), '(github_token)\n', (898, 912), False, 'from llama_index.readers.github import GithubRepositoryReader, Github... |
import sys
import openai
import key
import llama_index
import wikipedia
from llama_index import (
VectorStoreIndex,
get_response_synthesizer,
Document,
SimpleDirectoryReader,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_in... | [
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.VectorStoreIndex",
"llama_index.postprocessor.SimilarityPostprocessor"
] | [((884, 911), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['documents'], {}), '(documents)\n', (900, 911), False, 'from llama_index import VectorStoreIndex, get_response_synthesizer, Document, SimpleDirectoryReader\n'), ((925, 979), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'... |
import os
from dotenv import load_dotenv
load_dotenv()
import s3fs
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage
)
# load documents
documents = SimpleDirectoryReader('../../../examples/paul_graham_essay/data/').load_data()
print(len(documents))... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((329, 371), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (360, 371), False, 'from llama_index import SimpleDirectoryReader,... |
import sys
import logging
import chromadb
import streamlit as st
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index import ServiceCon... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults",
"llama_index.VectorStoreIn... | [((553, 572), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (570, 572), False, 'import logging\n'), ((5510, 5544), 'llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults', 'MarkdownNodeParser.from_defaults', ([], {}), '()\n', (5542, 5544), False, 'from llama_index.node_parser.file.markdow... |
from flask_restx import Resource
from flask import request, render_template, Response
import openai
import os
import json
from llama_index import GPTSimpleVectorIndex
from llama_index import Document
from furl import furl
from PyPDF2 import PdfReader
os.environ["OPENAI_API_KEY"] = "sk-MEVQvovmcLV7uodMC2aTT3BlbkFJRbhfQ... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.Document"
] | [((407, 434), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (416, 434), False, 'import os\n'), ((491, 501), 'furl.furl', 'furl', (['link'], {}), '(link)\n', (495, 501), False, 'from furl import furl\n'), ((653, 664), 'furl.furl', 'furl', (['title'], {}), '(title)\n', (657, 664), Fals... |
from __future__ import annotations
import os
import dataclasses
from typing import TYPE_CHECKING, ClassVar
import time
import httpx
from rich import print
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
from xiaogpt.utils import split_sentences
if TYPE_CHECKING:
import openai
from llama_index.embedd... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.S... | [((1030, 1081), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list', 'init': '(False)'}), '(default_factory=list, init=False)\n', (1047, 1081), False, 'import dataclasses\n'), ((6247, 6286), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (6... |
"""
This file contains functions for loading data into MemGPT's archival storage.
Data can be loaded with the following command, once a load function is defined:
```
memgpt load <data-connector-type> --name <dataset-name> [ADDITIONAL ARGS]
```
"""
from typing import List
from tqdm import tqdm
import typer
from memgp... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.SimpleWebPageReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.readers.database.DatabaseReader",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage"
] | [((586, 599), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (597, 599), False, 'import typer\n'), ((727, 776), 'memgpt.connectors.storage.StorageConnector.get_storage_connector', 'StorageConnector.get_storage_connector', ([], {'name': 'name'}), '(name=name)\n', (765, 776), False, 'from memgpt.connectors.storage impor... |
import logging
import sys
import requests
import os
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import torch
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from langchain.embeddings.huggingface import HuggingFac... | [
"llama_index.llms.LlamaCPP"
] | [((511, 570), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (530, 570), False, 'import logging\n'), ((854, 886), 'os.path.join', 'os.path.join', (['"""Data"""', '"""test.pdf"""'], {}), "('Data', 'test.pdf')\n", (866,... |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
... | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((745, 790), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (779, 790), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((1396, 1411), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (1409, 1411), False, ... |
import argparse
import copy
import logging
import os
import sys
import warnings
from typing import Optional, List, Callable
from langchain.llms import OpenAI
import faiss
import gradio as gr
import torch
import torch.distributed as dist
import transformers
from accelerate import dispatch_model, infer_auto_device_map
fr... | [
"llama_index.PromptHelper",
"llama_index.GPTFaissIndex.load_from_disk",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor"
] | [((13996, 14021), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14019, 14021), False, 'import argparse\n'), ((14291, 14338), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['args.model_path'], {}), '(args.model_path)\n', (14321, 14338), False, 'from transfor... |
from typing import List, Set
from llama_index.core import Document, KnowledgeGraphIndex, StorageContext
from llama_index.core.query_engine import BaseQueryEngine
from llama_index.core import load_index_from_storage
import os
def load_kg_graph_index_storage_context(kg_graph_storage_dir: str) -> StorageContext:
retu... | [
"llama_index.core.KnowledgeGraphIndex.from_documents",
"llama_index.core.StorageContext.from_defaults"
] | [((323, 385), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'kg_graph_storage_dir'}), '(persist_dir=kg_graph_storage_dir)\n', (351, 385), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext\n'), ((782, 818), 'os.path.exists', 'os.pat... |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
... | [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tikto... |
import requests, os, time, datetime
from dotenv import load_dotenv
import pandas as pd
import numpy as np
load_dotenv()
from llama_index.indices import VectaraIndex
from llama_index import Document
#setting up secrets for Vectara
VECTARA_CUSTOMER_ID=os.environ["VECTARA_CUSTOMER_ID"]
VECTARA_CORPUS_ID=os.environ["VEC... | [
"llama_index.indices.VectaraIndex"
] | [((106, 119), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (117, 119), False, 'from dotenv import load_dotenv\n'), ((450, 578), 'llama_index.indices.VectaraIndex', 'VectaraIndex', ([], {'vectara_api_key': 'VECTARA_API_KEY', 'vectara_customer_id': 'VECTARA_CUSTOMER_ID', 'vectara_corpus_id': 'VECTARA_CORPUS_ID'... |
from llama_index.prompts import PromptTemplate
ZERO_SHOT_QA_TEMPLATE = (
"<|system|>: Reponds à une question à partir du contexte en français.\n"
"<|user|>: {query_str} {context_str} \n"
"<|Réponse|>:"
)
ZERO_SHOT_PROMPT = PromptTemplate(ZERO_SHOT_QA_TEMPLATE)
ZERO_SHOT_QUESTION_TEMPLATE = """
<|syst... | [
"llama_index.prompts.PromptTemplate"
] | [((237, 274), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['ZERO_SHOT_QA_TEMPLATE'], {}), '(ZERO_SHOT_QA_TEMPLATE)\n', (251, 274), False, 'from llama_index.prompts import PromptTemplate\n'), ((481, 524), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['ZERO_SHOT_QUESTION_TEMPLATE'], {}), '(ZERO_S... |
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import sys
import os
def construct_index(src_path, out_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 512
# set maximum chu... | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((565, 664), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (577, 664), False, 'from llama_index import SimpleDirectoryReader, G... |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Chat with the docs, powered by LlamaIndex")
openai.api_key = st.secrets.openai_key
st.title("C... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((193, 267), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with the docs, powered by LlamaIndex"""'}), "(page_title='Chat with the docs, powered by LlamaIndex')\n", (211, 267), True, 'import streamlit as st\n'), ((309, 365), 'streamlit.title', 'st.title', (['"""Chat with the custom do... |
import streamlit as st
from pathlib import Path
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llms import Ollama
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStor... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((409, 447), 'streamlit.title', 'st.title', (['"""Confluence Query Interface"""'], {}), "('Confluence Query Interface')\n", (417, 447), True, 'import streamlit as st\n'), ((554, 625), 'streamlit.text_input', 'st.text_input', (['"""Confluence Base URL"""', '"""https://espace.agir.orange.com/"""'], {}), "('Confluence Ba... |
from llama_index import StorageContext, load_index_from_storage, ServiceContext
import gradio as gr
import sys
import os
import logging
from utils import get_automerging_query_engine
from utils import get_sentence_window_query_engine
import configparser
from TTS.api import TTS
from gtts import gTTS
import simpleaudio a... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.base.PromptTemplate",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((1610, 1637), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1635, 1637), False, 'import configparser\n'), ((4094, 4149), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'log_level'}), '(stream=sys.stdout, level=log_level)\n', (4113, 4149), False, 'im... |
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set ma... | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((577, 676), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (589, 676), False, 'from llama_index import SimpleDirectoryReader, G... |
import os
from llama_index import (
Document,
GPTVectorStoreIndex,
StorageContext,
load_index_from_storage,
)
class OpenAI:
"""
OpenAI class to handle all ChatGPT functions
"""
def __init__(self):
self.indices = {}
def add_document(self, room, context):
"""
... | [
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex",
"llama_index.Document"
] | [((475, 492), 'llama_index.Document', 'Document', (['context'], {}), '(context)\n', (483, 492), False, 'from llama_index import Document, GPTVectorStoreIndex, StorageContext, load_index_from_storage\n'), ((827, 852), 'llama_index.Document', 'Document', (['initial_context'], {}), '(initial_context)\n', (835, 852), False... |
import os
import streamlit as st
from dotenv import load_dotenv
from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from biorxiv_manager import BioRxivManager
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
st.title("Ask BioRxiv"... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.PromptHelper"
] | [((238, 251), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (249, 251), False, 'from dotenv import load_dotenv\n'), ((269, 296), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (278, 296), False, 'import os\n'), ((298, 321), 'streamlit.title', 'st.title', (['"""Ask BioRxiv"""'... |
import logging
from llama_index.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from app.llama_index.index import setup_index
from app.llama_index.query_engine import setup_query_engine
from app.database.crud import get_vectorized_elect... | [
"llama_index.langchain_helpers.agents.tools.LlamaIndexTool",
"llama_index.vector_stores.types.ExactMatchFilter"
] | [((424, 433), 'app.database.database.Session', 'Session', ([], {}), '()\n', (431, 433), False, 'from app.database.database import Session\n'), ((469, 518), 'app.database.crud.get_vectorized_election_programs_from_db', 'get_vectorized_election_programs_from_db', (['session'], {}), '(session)\n', (509, 518), False, 'from... |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices imp... | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings... | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}),... |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices imp... | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings... | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}),... |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices imp... | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings... | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}),... |
from components.store import get_storage_context
from llama_index import VectorStoreIndex
from llama_index.retrievers import (
VectorIndexRetriever,
)
from models.gpts import get_gpts_by_uuids
def search_gpts(question):
storage_context = get_storage_context()
index = VectorStoreIndex.from_documents([], st... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever"
] | [((248, 269), 'components.store.get_storage_context', 'get_storage_context', ([], {}), '()\n', (267, 269), False, 'from components.store import get_storage_context\n'), ((282, 350), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[]'], {'storage_context': 'storage_context'}), '([], ... |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.l... | [
"llama_index.vector_stores.lancedb._to_llama_similarities",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.lancedb._to_lance_filter"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGI... |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types... | [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPE... |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types... | [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPE... |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types... | [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPE... |
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... | [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_... | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""#... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.