code stringlengths 161 233k | apis listlengths 1 24 | extract_api stringlengths 162 68.5k |
|---|---|---|
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... | [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_... | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""#... |
import logging
from threading import Thread
from typing import Any, List, Optional, Type
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.response.schema import RESPONSE_TYPE, StreamingResponse
from llam... | [
"llama_index.core.tools.ToolOutput",
"llama_index.core.chat_engine.utils.response_gen_from_query_engine",
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.base.llms.types.C... | [((1220, 1247), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1237, 1247), False, 'import logging\n'), ((1579, 1611), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_TEMPLATE'], {}), '(DEFAULT_TEMPLATE)\n', (1593, 1611), False, 'from llama_index.core.prompts... |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import Y... | [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import Y... | [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import Y... | [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import Y... | [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.... | [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""htm... |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.... | [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""htm... |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.... | [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""htm... |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.... | [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""htm... |
import logging
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.llms.utils import LLMType
from llama_index.prompts import PromptTemplate
from llama_index.prompts.base import Ba... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.prompts.PromptTemplate"
] | [((409, 436), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (426, 436), False, 'import logging\n'), ((1016, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(750)', 'chunk_overlap': '(100)', 'callback_manager': 'callb... |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from llama_index.response.pprint_utils import pprint_response
from langchain.chat_models import ChatOpenAI
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEng... | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.set_global_service_context",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((503, 561), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (522, 561), False, 'import logging... |
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
set_global_handler
)
import phoenix as px
px.launch_app()
set_global_handler("arize_phoenix")
documents = SimpleDirectoryReader('files').load_data()
index = VectorStoreIndex.from_documents(documents)
qe = index.as_query_engine()
res... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.set_global_handler"
] | [((129, 144), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (142, 144), True, 'import phoenix as px\n'), ((145, 180), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""arize_phoenix"""'], {}), "('arize_phoenix')\n", (163, 180), False, 'from llama_index.core import SimpleDirectoryReader, Vect... |
from llama_index.core import Settings, Document, VectorStoreIndex
from llama_index.core.node_parser import SentenceWindowNodeParser
doc = Document(
text="Sentence 1. Sentence 2. Sentence 3."
)
text_splitter = SentenceWindowNodeParser.from_defaults(
window_size=2 ,
window_metadata_key="ContextWindow",
o... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.Document"
] | [((138, 190), 'llama_index.core.Document', 'Document', ([], {'text': '"""Sentence 1. Sentence 2. Sentence 3."""'}), "(text='Sentence 1. Sentence 2. Sentence 3.')\n", (146, 190), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n'), ((213, 348), 'llama_index.core.node_parser.SentenceWindowNodePa... |
import asyncio
from llama_index.core import KeywordTableIndex
from llama_index.core import SimpleDirectoryReader
async def retrieve(retriever, query, label):
response = await retriever.aretrieve(query)
print(f"{label} retrieved {str(len(response))} nodes")
async def main():
reader = SimpleDirectoryReader(... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.KeywordTableIndex.from_documents"
] | [((298, 328), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (319, 328), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((376, 419), 'llama_index.core.KeywordTableIndex.from_documents', 'KeywordTableIndex.from_documents', (['documents'], {}), '(... |
import tiktoken
from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.core.llms.mock import MockLLM
embed_model = MockEmbedding(embed_dim=1536)
llm = MockLLM(max_tokens=256)
token_count... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.MockEmbedding"
] | [((249, 278), 'llama_index.core.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (262, 278), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((285, 308), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '... |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.cor... | [
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer"
] | [((4987, 5055), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5028, 5055), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm... |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.cor... | [
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.response_synthesizers.get_response_synthesizer"
] | [((4987, 5055), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5028, 5055), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm... |
from llama_index.core.postprocessor import KeywordNodePostprocessor
from llama_index.core.schema import TextNode, NodeWithScore
nodes = [
TextNode(
text="Entry no: 1, <SECRET> - Attack at Dawn"
),
TextNode(
text="Entry no: 2, <RESTRICTED> - Go to point Bravo"
),
TextNode(
te... | [
"llama_index.core.schema.NodeWithScore",
"llama_index.core.schema.TextNode",
"llama_index.core.postprocessor.KeywordNodePostprocessor"
] | [((452, 519), 'llama_index.core.postprocessor.KeywordNodePostprocessor', 'KeywordNodePostprocessor', ([], {'exclude_keywords': "['SECRET', 'RESTRICTED']"}), "(exclude_keywords=['SECRET', 'RESTRICTED'])\n", (476, 519), False, 'from llama_index.core.postprocessor import KeywordNodePostprocessor\n'), ((143, 198), 'llama_i... |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import KeywordExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_fro... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.extractors.KeywordExtractor",
"llama_index.core.node_parser.SentenceSplitter"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), ... |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import SummaryExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_fro... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.extractors.SummaryExtractor"
] | [((176, 206), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (197, 206), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((247, 291), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), ... |
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
print(f"Metadata: {document[0].metadata}")
print(f"Text: {document[0].text}")
| [
"llama_index.readers.file.FlatReader"
] | [((83, 95), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (93, 95), False, 'from llama_index.readers.file import FlatReader\n'), ((124, 158), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (128, 158), False, 'from pathlib import Path\n')] |
from llama_index.core.node_parser import HierarchicalNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/sample_document1.txt"))
hierarchical_parser = HierarchicalNodeParser.from_defaults(
chunk_sizes=[128, 64, 32],
... | [
"llama_index.readers.file.FlatReader",
"llama_index.core.node_parser.HierarchicalNodeParser.from_defaults"
] | [((147, 159), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (157, 159), False, 'from llama_index.readers.file import FlatReader\n'), ((247, 332), 'llama_index.core.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': '[128, 64, 32]', 'c... |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen... | [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.base.llms.typ... | [((2866, 2929), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2871, 2929), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validato... |
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.we... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document",
"llama_index.tools.brave_search.BraveSearchToolSpec"
] | [((496, 569), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (501, 569), False, 'from urllib3.util.retry import Retry\n'), ((675, 733), 'logging.basicConfig', 'l... |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/l... | [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), ... |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/l... | [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), ... |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/l... | [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), ... |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/l... | [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), ... |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
... | [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tikto... |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
... | [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tikto... |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
... | [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tikto... |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
... | [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tikto... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/8 14:03
@Author : alexanderwu
@File : document.py
@Desc : Classes and Operations Related to Files in the File System.
"""
from enum import Enum
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from llama_index.cor... | [
"llama_index.core.node_parser.SimpleNodeParser.from_defaults"
] | [((1946, 1965), 'pydantic.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (1951, 1965), False, 'from pydantic import BaseModel, ConfigDict, Field\n'), ((1982, 1999), 'pydantic.Field', 'Field', ([], {'default': '""""""'}), "(default='')\n", (1987, 1999), False, 'from pydantic import BaseModel, ConfigDict... |
from collections.abc import Generator
from typing import Any
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.utils import node_to_metadata_dict
from llama_index.vector_stores.chroma import ChromaVectorStore # type: ignore
def chunk_list(
lst: list[BaseNode], max_ch... | [
"llama_index.core.vector_stores.utils.node_to_metadata_dict"
] | [((2766, 2845), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (2787, 2845), False, 'from llama_index.core.vector_stores.utils import node... |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index ... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper"
] | [((380, 441), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (394, 441), False, 'import os\n'), ((766, 825), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}... |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.llms.azure_openai import AzureOpe... | [
"llama_index.core.base.llms.types.CompletionResponse"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((662, 687), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""', '""""""'], {}), "('HF_TOKEN', '')\n", (671, 687), False, 'import os\n'), ((698, 733), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_TOKEN... |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import Simpl... | [
"llama_index.SimpleDirectoryReader",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.graph_stores.SimpleGraphStore.from_persist_dir",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llam... | [((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((450, 495), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (484, 495), False, 'from llama_index import GPTVectorStoreIn... |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.schema import Document
from autollm.utils.logging import logger
class LangchainPDFReader(BaseReader):
"""Custom PDF reader that uses langchain's PDFMinerLoader."""
def __init__(self, extract_images: bool = False) -> No... | [
"llama_index.schema.Document.from_langchain_format"
] | [((1131, 1181), 'llama_index.schema.Document.from_langchain_format', 'Document.from_langchain_format', (['langchain_document'], {}), '(langchain_document)\n', (1161, 1181), False, 'from llama_index.schema import Document\n')] |
from rag.agents.interface import Pipeline
from rich.progress import Progress, SpinnerColumn, TextColumn
from typing import Any
from pydantic import create_model
from typing import List
import warnings
import box
import yaml
import timeit
from rich import print
from llama_index.core import SimpleDirectoryReader
from lla... | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.multi_modal_llms.ollama.OllamaMultiModal",
"llama_index.core.output_parsers.PydanticOutputParser"
] | [((512, 574), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (535, 574), False, 'import warnings\n'), ((575, 630), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarnin... |
import functools
import os
import random
import tempfile
import traceback
import asyncio
from collections import defaultdict
import aiohttp
import discord
import aiofiles
import httpx
import openai
import tiktoken
from functools import partial
from typing import List, Optional, cast
from pathlib import Path
from datet... | [
"llama_index.langchain_helpers.agents.IndexToolConfig",
"llama_index.download_loader",
"llama_index.retrievers.TreeSelectLeafRetriever",
"llama_index.GithubRepositoryReader",
"llama_index.langchain_helpers.text_splitter.TokenTextSplitter",
"llama_index.BeautifulSoupWebReader",
"llama_index.langchain_hel... | [((2731, 2770), 'services.environment_service.EnvService.get_max_deep_compose_price', 'EnvService.get_max_deep_compose_price', ([], {}), '()\n', (2768, 2770), False, 'from services.environment_service import EnvService\n'), ((2784, 2813), 'llama_index.download_loader', 'download_loader', (['"""EpubReader"""'], {}), "('... |
import os
from langchain import OpenAI
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex
import sqlalchemy
import time
DatabaseReader = download_loader('DatabaseReader')
databasePath = f'sqlite:///{os.path.dirname(__file__)}/vulns.db... | [
"llama_index.GPTSQLStructStoreIndex",
"llama_index.SQLDatabase",
"llama_index.download_loader"
] | [((223, 256), 'llama_index.download_loader', 'download_loader', (['"""DatabaseReader"""'], {}), "('DatabaseReader')\n", (238, 256), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((372, 410), 'sqlalchemy.create_engine'... |
# https://blog.streamlit.io/build-a-chatbot-with-custom-data-sources-powered-by-llamaindex/
import os
import streamlit as st
from llama_index.core import ServiceContext, Document, SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import... | [
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.core.VectorStoreIndex.from_documents"
] | [((357, 394), 'os.getenv', 'os.getenv', (['"""OLLAMA_HOST"""', '"""localhost"""'], {}), "('OLLAMA_HOST', 'localhost')\n", (366, 394), False, 'import os\n'), ((506, 573), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""zephyr"""', 'base_url': "('http://' + OLLAMA_HOST + ':11434')"}), "(model='zephyr', bas... |
import argparse
import logging
import sys
import re
import os
import argparse
import requests
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryRe... | [
"llama_index.SimpleDirectoryReader",
"llama_index.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.ChatMessage",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.llms.OpenAI",
"llama_index.readers.file.flat_reader.FlatReader",
"llama... | [((2448, 2465), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2460, 2465), False, 'import requests\n'), ((2063, 2076), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2071, 2076), False, 'from urllib.parse import urlparse\n'), ((3032, 3049), 'requests.get', 'requests.get', (['url'], {}), '(... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================== #
# This file is a part of PYGPT package #
# Website: https://pygpt.net #
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License ... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.indices.vector_store.base.VectorStoreIndex"
] | [((3613, 3659), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3641, 3659), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3792, 3865), 'llama_index.core.load_index_from_storage', 'load_index_f... |
"""Loads files from GitHub using the LlamaIndex GithubRepositoryReader."""
import os
from typing import ClassVar, Iterable, Optional
from pydantic import Field, field_serializer
from typing_extensions import override
from ..schema import Item
from ..source import Source, SourceSchema
from .llama_index_docs_source imp... | [
"llama_index.core.readers.download_loader"
] | [((1012, 1097), 'pydantic.Field', 'Field', ([], {'description': '"""The GitHub repository to load from. Format: <owner>/<repo>."""'}), "(description='The GitHub repository to load from. Format: <owner>/<repo>.'\n )\n", (1017, 1097), False, 'from pydantic import Field, field_serializer\n'), ((1119, 1214), 'pydantic.F... |
from __future__ import annotations
from typing import Optional
import os
from llama_index.core import ServiceContext
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.... | [
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.llms.openai_utils.CHAT_MODELS.update",
"llama_index.core.llms.OpenAI... | [((948, 998), 'llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update', 'ALL_AVAILABLE_MODELS.update', (['_EXTENDED_CHAT_MODELS'], {}), '(_EXTENDED_CHAT_MODELS)\n', (975, 998), False, 'from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS\n'), ((999, 1040), 'llama_index.core.llms.open... |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
import graphsignal
import logging
import time
import random
load_dotenv()
logging.basicConfig()
logger = logging.getLogger()
logger.set... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.set_global_service_context",
"llama_index.SimpleDirectoryReader"
] | [((244, 257), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (255, 257), False, 'from dotenv import load_dotenv\n'), ((259, 280), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (278, 280), False, 'import logging\n'), ((290, 309), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (307,... |
# Ref https://github.com/amrrs/QABot-LangChain/blob/main/Q%26A_Bot_with_Llama_Index_and_LangChain.ipynb
#from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListInd... | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((716, 815), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (728, 815), False, 'from llama_index import SimpleDirectoryReader, L... |
# chroma.py
import streamlit as st
import os
import re
from pathlib import Path
import chromadb
from chromadb.config import Settings
from llama_index import GPTVectorStoreIndex, load_index_from_storage
from llama_index.vector_stores import ChromaVectorStore
from utils.model_settings import sentenceTransformers, get_se... | [
"llama_index.load_index_from_storage",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore"
] | [((665, 686), 'utils.model_settings.get_service_context', 'get_service_context', ([], {}), '()\n', (684, 686), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n'), ((1363, 1418), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collect... |
# /app/src/tools/doc_search.py
import logging
# Primary Components
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from src.utils.config import load_config, setup_environment_variables
from src.utils.embeddi... | [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((1157, 1170), 'src.utils.config.load_config', 'load_config', ([], {}), '()\n', (1168, 1170), False, 'from src.utils.config import load_config, setup_environment_variables\n'), ((1179, 121... |
import logging
import traceback
from typing import Sequence, List, Optional, Dict
from llama_index import Document
from llama_index.callbacks import CBEventType, CallbackManager
from llama_index.callbacks.schema import EventPayload
from llama_index.node_parser import NodeParser, SimpleNodeParser
from llama_index.node_... | [
"llama_index.utils.get_tqdm_iterable",
"llama_index.callbacks.CallbackManager",
"llama_index.text_splitter.get_default_text_splitter"
] | [((893, 964), 'pydantic.Field', 'Field', ([], {'description': '"""The text splitter to use when splitting documents."""'}), "(description='The text splitter to use when splitting documents.')\n", (898, 964), False, 'from pydantic import Field\n'), ((1008, 1099), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'de... |
# RAG/TAG Tiger - llm.py
# Copyright (c) 2024 Stuart Riffle
# github.com/stuartriffle/ragtag-tiger
import os
import torch
from .files import *
from .lograg import lograg, lograg_verbose, lograg_error
from .timer import TimerUntil
openai_model_default = "gpt-3.5-turbo-instruct"
google_model_default = "models/tex... | [
"llama_index.llms.Gemini",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.PaLM",
"llama_index.llms.OpenAI",
"llama_index.llms.LlamaCPP",
"llama_index.llms.Replicate",
"llama_index.llms.HuggingFaceLLM",
"llama_index.llms.MistralAI",
"llama_index.set_global_service_context",
"llama_in... | [((8029, 8090), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': '"""local"""', 'llm': 'result'}), "(embed_model='local', llm=result)\n", (8057, 8090), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((1986, 2158), 'llama_index.llms.OpenAI'... |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# ht... | [
"llama_index.utilities.token_counting.TokenCounter",
"llama_index.callbacks.token_counting.get_llm_token_counts",
"llama_index.utils.get_tokenizer"
] | [((1450, 1483), 'contextvars.ContextVar', 'ContextVar', (['"""trace"""'], {'default': 'None'}), "('trace', default=None)\n", (1460, 1483), False, 'from contextvars import ContextVar\n'), ((2085, 2105), 'opentelemetry.trace.get_tracer', 'get_tracer', (['__name__'], {}), '(__name__)\n', (2095, 2105), False, 'from opentel... |
from pathlib import Path
from llama_index import download_loader
ImageReader = download_loader("ImageReader")
# If the Image has key-value pairs text, use text_type = "key_value"
loader = ImageReader(text_type = "key_value")
documents = loader.load_data(file=Path('./receipt.webp'))
print(documents) | [
"llama_index.download_loader"
] | [((80, 110), 'llama_index.download_loader', 'download_loader', (['"""ImageReader"""'], {}), "('ImageReader')\n", (95, 110), False, 'from llama_index import download_loader\n'), ((261, 283), 'pathlib.Path', 'Path', (['"""./receipt.webp"""'], {}), "('./receipt.webp')\n", (265, 283), False, 'from pathlib import Path\n')] |
# https://github.com/jerryjliu/llama_index/blob/main/examples/langchain_demo/LangchainDemo.ipynb
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import i... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langch... |
"""This module provides functionality for loading chat prompts.
The main function in this module is `load_chat_prompt`, which loads a chat prompt from a given JSON file.
The JSON file should contain two keys: "system_template" and "human_template", which correspond to the system and user messages respectively.
Typica... | [
"llama_index.llms.ChatMessage",
"llama_index.ChatPromptTemplate"
] | [((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((2217, 2237), 'pathlib.Path', 'pathlib.Path', (['f_name'], {}), '(f_name)\n', (2229, 2237), False, 'import pathlib\n'), ((2706, 2734), 'llama_index.ChatPromptTemplate', 'ChatPromptTemplat... |
from llama_index import RssReader
from flask import Flask, request, render_template
import json
# Load template
with open('app/template.md') as f:
template = f.read()
# Get rss content
def get_rss_content(websites:list) -> list:
reader = RssReader()
results = []
for web in range(len(website... | [
"llama_index.RssReader"
] | [((510, 525), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'from flask import Flask, request, render_template\n'), ((250, 261), 'llama_index.RssReader', 'RssReader', ([], {}), '()\n', (259, 261), False, 'from llama_index import RssReader\n'), ((1308, 1341), 'json.dumps', 'json.dumps', ... |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
#... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.vector_stores.milvus.MilvusVectorStore",
"llama_index.core.storage.storage_context.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((894, 1036), 'llama_index.vector_stores.milvus.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': '"""http://localhost:19530"""', 'port': '(19530)', 'collection_name': '"""llama_index_prompts_large"""', 'dim': '(384)', 'similarity_metric': '"""L2"""'}), "(uri='http://localhost:19530', port=19530, collection_name\n... |
import numpy as np
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.litellm import LiteLLM
from langchain_google_genai import ChatGoogleGenerativeAI
from trulens_eval.feedback.provider.langchain import Langchain
from trulens_eval import Tru, Feedback, TruLlama
from trulens_eva... | [
"llama_index.core.StorageContext.from_defaults",
"llama_index.llms.litellm.LiteLLM"
] | [((519, 570), 'llama_index.llms.litellm.LiteLLM', 'LiteLLM', ([], {'model': '"""gemini/gemini-pro"""', 'temperature': '(0.1)'}), "(model='gemini/gemini-pro', temperature=0.1)\n", (526, 570), False, 'from llama_index.llms.litellm import LiteLLM\n'), ((687, 744), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoog... |
import os
from dotenv import load_dotenv
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from llama_index.llms.openai import OpenAI
from llama_index.llms.types import ChatMessage, MessageRole
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers i... | [
"llama_index.llms.openai.OpenAI",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.llms.types.ChatMessage",
"llama_index.retrievers.PathwayRetriever"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((622, 674), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_HOST"""', 'DEFAULT_PATHWAY_HOST'], {}), "('PATHWAY_HOST', DEFAULT_PATHWAY_HOST)\n", (636, 674), False, 'import os\n'), ((932, 977), ... |
# My OpenAI Key
import logging
import os
import sys
from IPython.display import Markdown, display
from llama_index import GPTTreeIndex, SimpleDirectoryReader
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
os.environ["OPENAI_API_KEY"... | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.GPTTreeIndex.from_documents"
] | [((160, 218), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (179, 218), False, 'import logging\n'), ((324, 351), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (333, 351), False, 'imp... |
from llama_index import Document
import json, os
from llama_index.node_parser import SimpleNodeParser
from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex
from langchain import OpenAI
from llama_index.composability import ComposableGraph
from llama_index.data_structs.node_v2 import Node, Docu... | [
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.composability.ComposableGraph.from_indices",
"llama_index.GPTTreeIndex",
"llama_index.data_structs.node_v2.Node",
"llama_index.PromptHelper",
"llama_index.composability.ComposableGraph.load_from_disk"
] | [((705, 764), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (717, 764), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((853, 879), 'os.listdir', 'os.listdir', (['... |
import logging
from llama_index import SimpleDirectoryReader
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from IPython.display import Markdown, display
from llama_ind... | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.node_parser.SentenceSplitter"
] | [((418, 457), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (437, 457), False, 'import logging\n'), ((467, 494), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (484, 494), False, 'import logging\n'), ((2869, 2918), 'embeddings.Emb... |
import os
from llama_index import (
GPTSimpleVectorIndex,
GPTTreeIndex,
GPTKeywordTableIndex,
GPTListIndex,
)
from llama_index import SimpleDirectoryReader, download_loader
from llama_index import (
Document,
LLMPredictor,
PromptHelper,
QuestionAnswerPrompt,
RefinePrompt,
)
from lang... | [
"llama_index.GPTKeywordTableIndex.load_from_disk",
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTListIndex",
"llama_index.GPTTreeIndex",
"llama_index.GPTListIndex.load_from_disk",
"llama_index.RefinePrompt",
"llama_index.QuestionAnswerPrompt",
"lla... | [((601, 638), 'logging.debug', 'logging.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (614, 638), False, 'import logging\n'), ((2779, 2899), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap', 'embedding_limit', 'chunk_size_limit'], {'separat... |
# This file has been modified by the Nextpy Team in 2023 using AI tools and automation scripts.
# We have rigorously tested these modifications to ensure reliability and performance. Based on successful test results, we are confident in the quality and stability of these changes.
"""Base reader class."""
from abc imp... | [
"llama_index.schema.Document"
] | [((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((2439, 2467), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token'}), '(token=slack_token)\n', (2448, 2467), False, 'from slack_sdk import WebClient\n'), ((2508, 2545), 'slack... |
"""
This is the documentaion of the Llama2-7B-chat model from hugging face models
This model has 7 billion parameters develped by Meta
This is used for QnA purposes on local machine for testing...
Model hardware config:
- GPU: Nvidia RTX 40 Series (12GB) --> CUDA support
- RAM... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.get_response_synthesizer",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrieve... | [((1191, 1204), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1202, 1204), False, 'from dotenv import load_dotenv\n'), ((1216, 1237), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (1225, 1237), False, 'import os\n'), ((5540, 5566), 'os.system', 'os.system', (['"""rm -rf Data_*"""'], {}... |
import os
import re
from llama_index import ListIndex
from llama_index import ServiceContext
from llama_index.llms import OpenAI
from llama_index.llms.palm import PaLM
from llama_index.response_synthesizers import get_response_synthesizer
from llama_index.schema import NodeRelationship
from llama_index.schema import R... | [
"llama_index.ListIndex",
"llama_index.response_synthesizers.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.schema.RelatedNodeInfo",
"llama_index.llms.palm.PaLM"
] | [((722, 764), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm'}), '(llm=self.llm)\n', (750, 764), False, 'from llama_index import ServiceContext\n'), ((2577, 2626), 'llama_index.ListIndex', 'ListIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, serv... |
import sys
sys.stdout.reconfigure(encoding="utf-8")
sys.stdin.reconfigure(encoding="utf-8")
import streamlit as st
import streamlit.components.v1 as components
import re
import random
CODE_BUILD_KG = """
# Prepare for GraphStore
os.environ['NEBULA_USER'] = "root"
os.environ['NEBULA_PASSWORD'] = "nebula" # defaul... | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.LLMPredictor",
"llama_index.graph_stores.NebulaGraphStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.KnowledgeGraphQueryEngine",
"llama_index.llms.AzureOpenAI"
] | [((12, 52), 'sys.stdout.reconfigure', 'sys.stdout.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (34, 52), False, 'import sys\n'), ((53, 92), 'sys.stdin.reconfigure', 'sys.stdin.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (74, 92), False, 'import sys\n'), ((2986, 3... |
import datetime
import uuid
from llama_index.core.memory import ChatMemoryBuffer
class Chat:
def __init__(self, model):
self.model = model
if model.id is None:
self.id = str(uuid.uuid4())
else:
self.id = model.id
self.history = ChatMemoryBuffer.from_defau... | [
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((293, 341), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(3900)'}), '(token_limit=3900)\n', (323, 341), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((366, 389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n'... |
from pathlib import Path
from llama_index import Document, SimpleDirectoryReader, download_loader
from llama_index.query_engine import RetrieverQueryEngine
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores... | [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenA... | [((531, 544), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (542, 544), False, 'from dotenv import load_dotenv\n'), ((1142, 1160), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1158, 1160), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1366, 1472), 'pi... |
from llama_index.node_parser import SimpleNodeParser
from typing import *
from llama_index.data_structs import Node
import requests
from collections import defaultdict
from llama_index import Document
from config import config
def load_and_parse(all_docs):
documents = []
for file_row in all_docs:
url... | [
"llama_index.node_parser.SimpleNodeParser.from_defaults"
] | [((430, 443), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (441, 443), False, 'from collections import defaultdict\n'), ((1033, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'config.node_chunk_size', 'chunk_overlap': '(50)'}), '(chu... |
# bring in our LLAMA_CLOUD_API_KEY
import os
from dotenv import load_dotenv
load_dotenv()
import nest_asyncio # noqa: E402
nest_asyncio.apply()
# bring in deps
from llama_parse import LlamaParse # noqa: E402
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader # noqa: E402
# set up parser
llamapar... | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.embeddings.ollama.OllamaEmbedding"
] | [((76, 89), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (87, 89), False, 'from dotenv import load_dotenv\n'), ((125, 145), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (143, 145), False, 'import nest_asyncio\n'), ((333, 365), 'os.getenv', 'os.getenv', (['"""LLAMA_CLOUD_API_KEY"""'], {}), "('... |
from setup import documents, eval_questions, tru
from utils import get_prebuilt_trulens_recorder, build_automerging_index
#Auto-Merging Retrieval
from llama_index.llms import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
automerging_index = build_automerging_index(
documents,
llm,
embed_mod... | [
"llama_index.llms.OpenAI"
] | [((190, 236), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (196, 236), False, 'from llama_index.llms import OpenAI\n'), ((258, 372), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'... |
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
import sys
import os
import openai
openai.api_base = "https://api.app4gpt.com/v1"
os.environ["OPENAI_A... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper"
] | [((597, 696), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (609, 696), False, 'from llama_index import SimpleDirectoryReader, L... |
import logging
import sys
# Uncomment to see debug logs
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores import MilvusVectorS... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.MilvusVectorStore"
] | [((451, 485), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'overwrite': '(False)'}), '(overwrite=False)\n', (468, 485), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((568, 643), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docu... |
from llama_index.core.llms import ChatMessage
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.prompts import PromptTemplate
from projectgurukul.custom_models import model_utils
import logging
def get_tinyllama_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""):
def m... | [
"llama_index.core.prompts.PromptTemplate",
"llama_index.llms.huggingface.HuggingFaceLLM"
] | [((720, 754), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (752, 754), False, 'from projectgurukul.custom_models import model_utils\n'), ((857, 942), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (["(f'<|system|>{system_prompt}' + ... |
from functools import reduce
from pathlib import Path
from typing import List
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, ServiceContext, StorageContext, \
load_index_from_storage, LLMPredictor, OpenAIEmbedding, download_loader, Document
from llama_index.indices.base import IndexType
from l... | [
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.file.markdown_reader.MarkdownReader",
"llama_index.GPTVector... | [((793, 827), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (799, 827), False, 'from llama_index.llms import OpenAI\n'), ((884, 931), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-emb... |
from llama_index.multi_modal_llms import GeminiMultiModal
from llama_index.program import MultiModalLLMCompletionProgram
from llama_index.output_parsers import PydanticOutputParser
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
from typing_extensions import Annota... | [
"llama_index.multi_modal_llms.GeminiMultiModal",
"llama_index.output_parsers.PydanticOutputParser",
"llama_index.multi_modal_llms.openai.OpenAIMultiModal"
] | [((1607, 1657), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the damaged part"""'}), "(..., description='Name of the damaged part')\n", (1612, 1657), False, 'from pydantic import BaseModel, Field\n'), ((1676, 1726), 'pydantic.Field', 'Field', (['...'], {'description': '"""Estimated cost of repair"""... |
from llama_index.core.storage.chat_store import SimpleChatStore
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
try:
chat_store = SimpleChatStore.from_persist_path(
persist_path="chat_memory.json"
)
except FileNotFoundError:
chat_store ... | [
"llama_index.core.chat_engine.SimpleChatEngine.from_defaults",
"llama_index.core.storage.chat_store.SimpleChatStore",
"llama_index.core.storage.chat_store.SimpleChatStore.from_persist_path",
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((351, 451), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(2000)', 'chat_store': 'chat_store', 'chat_store_key': '"""user_X"""'}), "(token_limit=2000, chat_store=chat_store,\n chat_store_key='user_X')\n", (381, 451), False, 'from llama_index.core.... |
import streamlit as st
import pandas as pd
import os
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from llama_index import (
SimpleDirectoryReader,
VectorSt... | [
"llama_index.llms.LlamaCPP"
] | [((13454, 13514), 'pandas.read_csv', 'pd.read_csv', (['"""src/data/plant_compatibility.csv"""'], {'index_col': '(0)'}), "('src/data/plant_compatibility.csv', index_col=0)\n", (13465, 13514), True, 'import pandas as pd\n'), ((13774, 13825), 'streamlit.session_state.raw_plant_compatibility.to_numpy', 'st.session_state.ra... |
# Derived from example:
# https://gpt-index.readthedocs.io/en/latest/how_to/custom_llms.html
import time
import torch
from langchain.llms.base import LLM
from llama_index import SimpleDirectoryReader, LangchainEmbedding
from llama_index import ListIndex, PromptHelper
from llama_index import LLMPredictor
from transfo... | [
"llama_index.SimpleDirectoryReader",
"llama_index.ListIndex.from_documents",
"llama_index.PromptHelper"
] | [((423, 482), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (435, 482), False, 'from llama_index import ListIndex, PromptHelper\n'), ((1335, 1346), 'time.time', 'time.time', ([], {}), '()\n', (1344, 1346), Fa... |
import os
import json
from tqdm import tqdm
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import TokenTextSplitter
from langchain.document_loaders import UnstructuredAPIFileLoader
from langchain.vectorstores import MyScale, MyScaleSettings
from llama_index import ListIndexRetriev... | [
"llama_index.vector_stores.myscale.MyScaleVectorStore",
"llama_index.ServiceContext",
"llama_index.ListIndexRetriever"
] | [((649, 780), 'langchain.vectorstores.MyScaleSettings', 'MyScaleSettings', ([], {'host': '"""msc-3*****.us-east-1.aws.myscale.com"""', 'port': '(443)', 'username': '"""smatty662"""', 'password': '"""passwd_CAdI******H7GNt"""'}), "(host='msc-3*****.us-east-1.aws.myscale.com', port=443,\n username='smatty662', passwor... |
import requests
from bs4 import BeautifulSoup
from typing import Tuple, Dict, Any
from llama_index import Document
def page_ingest(url) -> Tuple[str, Dict[str, Any]]:
print("url", url)
label = ''
# Fetch the content from url
response = requests.get(url)
# Create a BeautifulSoup object and specif... | [
"llama_index.Document"
] | [((256, 273), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (268, 273), False, 'import requests\n'), ((344, 387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (357, 387), False, 'from bs4 import BeautifulSoup\n'), ((807, 854), 'llama... |
# create OpenAIAssistantAgent
from pydantic import BaseModel, Field# define pydantic model for auto-retrieval function
from typing import Tuple, List
from llama_index.tools import FunctionTool
from llama_index.agent import OpenAIAssistantAgent
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.agent.OpenAIAssistantAgent.from_new",
"llama_index.tools.ToolMetadata"
] | [((501, 514), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (512, 514), False, 'from dotenv import load_dotenv\n'), ((861, 909), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (895, 909), False, 'from llama_index import Simple... |
from pathlib import Path
from llama_index import GPTSimpleVectorIndex, download_loader
import sys
def load_document(file):
RDFReader = download_loader("RDFReader")
loader = RDFReader()
return loader.load_data(file=Path(file))
def query(index, prompt):
print("PROMPT:", prompt)
result = index.query(... | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((140, 168), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (155, 168), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((620, 650), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (640, 650), Fa... |
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage
from langchain.chat_models import ChatOpenAI
import gradio as gr
class ChatbotIndex:
def __init__(self, model_name, directory_path):
self.llm_predictor = LLMPredictor(C... | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((1293, 1393), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'chatbot.query_response', 'inputs': '"""text"""', 'outputs': '"""text"""', 'title': '"""LocalGPT Chatbot"""'}), "(fn=chatbot.query_response, inputs='text', outputs='text',\n title='LocalGPT Chatbot')\n", (1305, 1393), True, 'import gradio as gr\n'), ((3... |
# Constants
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.prompts import ChatPromptTemplate
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.node_... | [
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.indices.postproce... | [((744, 788), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'MODEL', 'temperature': 'TEMPERATURE'}), '(model=MODEL, temperature=TEMPERATURE)\n', (750, 788), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((820, 921), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defa... |
import os
import uvicorn
import asyncio
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext
from fastapi.middleware.cors import CORSMiddleware
from langcha... | [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.LLMPredictor"
] | [((360, 429), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, streaming=True)\n", (370, 429), False, 'from langchain.chat_models import ChatOpenAI\n'), ((446, 467), 'llama_index.LLMPr... |
"""Handles chat interactions for WandBot.
This module contains the Chat class which is responsible for handling chat interactions.
It includes methods for initializing the chat, loading the storage context from an artifact,
loading the chat engine, validating and formatting questions, formatting responses, and getti... | [
"llama_index.callbacks.TokenCountingHandler",
"llama_index.callbacks.WandbCallbackHandler",
"llama_index.llms.ChatMessage",
"llama_index.indices.postprocessor.CohereRerank",
"llama_index.schema.QueryBundle",
"llama_index.llms.generic_utils.messages_to_history_str",
"llama_index.callbacks.trace_method",
... | [((2223, 2243), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (2233, 2243), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((2368, 2415), 'llama_index.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['message_templates[:-1]'], {}),... |
import streamlit as st
from llama_index import VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
import chromadb
st.title('Precident')
# load and prime the index
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = Chr... | [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((137, 158), 'streamlit.title', 'st.title', (['"""Precident"""'], {}), "('Precident')\n", (145, 158), True, 'import streamlit as st\n'), ((193, 238), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (218, 238), False, 'import chromadb\n'), ((317, ... |
import os
import time
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context
import llama_index
from Models import Models
from DocumentClass import DocumentClass
class MediawikiLLM:
service_context = None
mediawiki_url = None
api_url ... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.indices.empty.EmptyIndex",
"llama_index.ServiceContext.from_defaults",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage"
] | [((542, 564), 'DocumentClass.DocumentClass', 'DocumentClass', (['api_url'], {}), '(api_url)\n', (555, 564), False, 'from DocumentClass import DocumentClass\n'), ((795, 870), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""', 'chunk_size': '(1024... |
import os
import shutil
import chromadb
import redis
from llama_index.core.indices import VectorStoreIndex
from llama_index.core.storage import StorageContext
from app.tools import FindEmbeddingsPath
from llama_index.vector_stores.redis import RedisVectorStore
from llama_index.vector_stores.chroma import ChromaVectorS... | [
"llama_index.vector_stores.redis.RedisVectorStore",
"llama_index.core.storage.StorageContext.from_defaults",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((370, 408), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (388, 408), False, 'from app.tools import FindEmbeddingsPath\n'), ((469, 505), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (494, 505), False,... |
#!/usr/bin/env python3
import json
import logging
import re
import requests
import altair as alt
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
from datetime import datetime, timedelta
from langchain.llms import OpenAI
from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, S... | [
"llama_index.ServiceContext.from_defaults"
] | [((419, 451), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (436, 451), False, 'import logging\n'), ((992, 1102), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'TITLE', 'page_icon': 'ICON', 'layout': '"""centered"""', 'initial_sidebar_state': '"""co... |
# https://gpt-index.readthedocs.io/en/latest/examples/query_engine/sub_question_query_engine.html
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import i... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langch... |
from llama_index.core.node_parser import HTMLNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
document = reader.load_data(Path("files/others/sample.html"))
my_tags = ["p", "span"]
html_parser = HTMLNodeParser(tags=my_tags)
nodes = html_parser.get_nodes_from_d... | [
"llama_index.core.node_parser.HTMLNodeParser",
"llama_index.readers.file.FlatReader"
] | [((139, 151), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (149, 151), False, 'from llama_index.readers.file import FlatReader\n'), ((255, 283), 'llama_index.core.node_parser.HTMLNodeParser', 'HTMLNodeParser', ([], {'tags': 'my_tags'}), '(tags=my_tags)\n', (269, 283), False, 'from llama_index.... |
import os
import streamlit as st
from PIL import Image
from llama_index import (
Document,
GPTVectorStoreIndex,
GPTListIndex,
LLMPredictor,
ServiceContext,
SimpleDirectoryReader,
PromptHelper,
StorageContext,
load_index_from_storage,
download_loader,
)
from llama_index.readers.f... | [
"llama_index.SimpleDirectoryReader",
"llama_index.download_loader",
"llama_index.LLMPredictor",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.GPTListIndex.from_documents",
"llama_index.Document"
] | [((2706, 2748), 'streamlit.title', 'st.title', (['"""🦙 Llama Index Term Extractor 🦙"""'], {}), "('🦙 Llama Index Term Extractor 🦙')\n", (2714, 2748), True, 'import streamlit as st\n'), ((2749, 3271), 'streamlit.markdown', 'st.markdown', (['"""This demo allows you to upload your own documents (either a screenshot/ima... |
from django.shortcuts import render
from django.views import generic
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
import os
fr... | [
"llama_index.load_index_from_storage"
] | [((817, 835), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (825, 835), False, 'from rest_framework.decorators import api_view\n'), ((545, 585), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (568, 585), False, ... |
from pathlib import Path
from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex
from llama_index.vector_stores import MilvusVectorStore
from llama_index.readers import PDFReader
from llama_index import StorageContext
from pymilvus import MilvusClient
import os
# Define constants for Mi... | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.PDFReader"
] | [((353, 399), 'os.environ.get', 'os.environ.get', (['"""MILVUS_HOST"""', '"""10.97.151.193"""'], {}), "('MILVUS_HOST', '10.97.151.193')\n", (367, 399), False, 'import os\n'), ((414, 452), 'os.environ.get', 'os.environ.get', (['"""MILVUS_PORT"""', '"""19530"""'], {}), "('MILVUS_PORT', '19530')\n", (428, 452), False, 'im... |
from llama_index.llms.ollama import Ollama
from typing import Any, Sequence
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
)
from llama_index.core.llms.callbacks import llm_chat... | [
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.llms.callbacks.llm_chat_callback"
] | [((396, 473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '""""""', 'description': '"""Default system message to send to the model."""'}), "(default='', description='Default system message to send to the model.')\n", (401, 473), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((5... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.