Spaces:
Sleeping
Sleeping
upload github well compiled files (#1)
Browse files- upload github well compiled files (a26db82602b82ba45fe7e51dd6073c0961edaebd)
- langchain/__init__.py +0 -0
- langchain/manager.py +39 -0
- llama/context.py +61 -0
- llama/index.py +18 -0
- llama/vector_storage.py +18 -0
langchain/__init__.py
ADDED
|
File without changes
|
langchain/manager.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import abstractmethod, ABC
|
| 2 |
+
|
| 3 |
+
from langchain.embeddings.base import Embeddings as LCEmbeddings
|
| 4 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 5 |
+
from langchain.llms import AzureOpenAI
|
| 6 |
+
from langchain.base_language import BaseLanguageModel
|
| 7 |
+
|
| 8 |
+
from core.lifecycle import Lifecycle
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BaseLangChainManager(Lifecycle, ABC):
|
| 12 |
+
def __init__(self) -> None:
|
| 13 |
+
super().__init__()
|
| 14 |
+
|
| 15 |
+
@abstractmethod
|
| 16 |
+
def get_embedding(self) -> LCEmbeddings:
|
| 17 |
+
pass
|
| 18 |
+
|
| 19 |
+
@abstractmethod
|
| 20 |
+
def get_llm(self) -> BaseLanguageModel:
|
| 21 |
+
pass
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LangChainAzureManager(BaseLangChainManager):
|
| 25 |
+
def __init__(self) -> None:
|
| 26 |
+
super().__init__()
|
| 27 |
+
|
| 28 |
+
# Override
|
| 29 |
+
def get_embedding(self) -> LCEmbeddings:
|
| 30 |
+
return OpenAIEmbeddings(client=None, chunk_size=1)
|
| 31 |
+
|
| 32 |
+
# Override
|
| 33 |
+
def get_llm(self) -> BaseLanguageModel:
|
| 34 |
+
return AzureOpenAI(
|
| 35 |
+
deployment_name="text-davinci-003",
|
| 36 |
+
# model_name="text-davinci-003",
|
| 37 |
+
model="text-davinci-003",
|
| 38 |
+
client=None,
|
| 39 |
+
)
|
llama/context.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index import ServiceContext, LLMPredictor, LangchainEmbedding
|
| 2 |
+
from type import Optional
|
| 3 |
+
from core.lifecycle import Lifecycle
|
| 4 |
+
from langchain.manager import BaseLangChainManager
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ServiceContextManager(Lifecycle):
|
| 8 |
+
service_context: Optional[ServiceContext]
|
| 9 |
+
|
| 10 |
+
def __init__(self, manager: BaseLangChainManager) -> None:
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.manager = manager
|
| 13 |
+
self.service_context = None
|
| 14 |
+
|
| 15 |
+
def get_service_context(self) -> ServiceContext:
|
| 16 |
+
if self.lifecycle_state.is_started():
|
| 17 |
+
raise KeyError(
|
| 18 |
+
"incorrect lifecycle state: {}".format(self.lifecycle_state.phase)
|
| 19 |
+
)
|
| 20 |
+
if self.service_context is None:
|
| 21 |
+
raise ValueError(
|
| 22 |
+
"service context is not ready, check for lifecycle statement"
|
| 23 |
+
)
|
| 24 |
+
return self.service_context
|
| 25 |
+
|
| 26 |
+
def do_init(self) -> None:
|
| 27 |
+
# define embedding
|
| 28 |
+
embedding = LangchainEmbedding(self.manager.get_embedding())
|
| 29 |
+
# define LLM
|
| 30 |
+
llm_predictor = LLMPredictor(llm=self.manager.get_llm())
|
| 31 |
+
# configure service context
|
| 32 |
+
self.service_context = ServiceContext.from_defaults(
|
| 33 |
+
llm_predictor=llm_predictor, embed_model=embedding
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
def do_start(self) -> None:
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
def do_stop(self) -> None:
|
| 40 |
+
pass
|
| 41 |
+
|
| 42 |
+
def do_dispose(self) -> None:
|
| 43 |
+
pass
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class StorageContextManager(Lifecycle):
|
| 47 |
+
def __init__(self, dataset_path: Optional[str] = "./dataset") -> None:
|
| 48 |
+
super().__init__()
|
| 49 |
+
self.dataset_path = dataset_path
|
| 50 |
+
|
| 51 |
+
def do_init(self) -> None:
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
def do_start(self) -> None:
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
def do_stop(self) -> None:
|
| 58 |
+
pass
|
| 59 |
+
|
| 60 |
+
def do_dispose(self) -> None:
|
| 61 |
+
pass
|
llama/index.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from core.lifecycle import Lifecycle
|
| 2 |
+
from llama.context import ServiceContextManager
|
| 3 |
+
from llama_index.indices.vector_store import VectorStoreIndex
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class IndexManager(Lifecycle):
|
| 8 |
+
index: Optional[VectorStoreIndex]
|
| 9 |
+
|
| 10 |
+
def __init__(self, context_manager: ServiceContextManager) -> None:
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.index = None
|
| 13 |
+
self.context_manager = context_manager
|
| 14 |
+
|
| 15 |
+
def get_index(self) -> Optional[VectorStoreIndex]:
|
| 16 |
+
if not self.lifecycle_state.is_started():
|
| 17 |
+
raise Exception("Lifecycle state is not correct")
|
| 18 |
+
return self.index
|
llama/vector_storage.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from core.lifecycle import Lifecycle
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class VectorStorageManager(Lifecycle):
|
| 5 |
+
def __init__(self) -> None:
|
| 6 |
+
super().__init__()
|
| 7 |
+
|
| 8 |
+
def do_init(self) -> None:
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
def do_start(self) -> None:
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
def do_stop(self) -> None:
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
def do_dispose(self) -> None:
|
| 18 |
+
pass
|