Spaces:
Runtime error
Runtime error
Update models/llamaCustom.py
Browse files- models/llamaCustom.py +6 -7
models/llamaCustom.py
CHANGED
|
@@ -36,12 +36,6 @@ NUM_OUTPUT = 525
|
|
| 36 |
# set maximum chunk overlap
|
| 37 |
CHUNK_OVERLAP_RATION = 0.2
|
| 38 |
|
| 39 |
-
prompt_helper = PromptHelper(
|
| 40 |
-
context_window=CONTEXT_WINDOW,
|
| 41 |
-
num_output=NUM_OUTPUT,
|
| 42 |
-
chunk_overlap_ratio=CHUNK_OVERLAP_RATION,
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
llm_model_name = "bigscience/bloom-560m"
|
| 46 |
tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
|
| 47 |
model = AutoModelForCausalLM.from_pretrained(llm_model_name, config="T5Config")
|
|
@@ -71,7 +65,7 @@ class CustomLLM(LLM):
|
|
| 71 |
|
| 72 |
@property
|
| 73 |
def _identifying_params(self) -> Mapping[str, Any]:
|
| 74 |
-
return {"name_of_model":
|
| 75 |
|
| 76 |
@property
|
| 77 |
def _llm_type(self) -> str:
|
|
@@ -80,6 +74,11 @@ class CustomLLM(LLM):
|
|
| 80 |
@st.cache_resource
|
| 81 |
class LlamaCustom:
|
| 82 |
# define llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
llm_predictor = LLMPredictor(llm=CustomLLM())
|
| 84 |
service_context = ServiceContext.from_defaults(
|
| 85 |
llm_predictor=llm_predictor, prompt_helper=prompt_helper
|
|
|
|
| 36 |
# set maximum chunk overlap
|
| 37 |
CHUNK_OVERLAP_RATION = 0.2
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
llm_model_name = "bigscience/bloom-560m"
|
| 40 |
tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
|
| 41 |
model = AutoModelForCausalLM.from_pretrained(llm_model_name, config="T5Config")
|
|
|
|
| 65 |
|
| 66 |
@property
|
| 67 |
def _identifying_params(self) -> Mapping[str, Any]:
|
| 68 |
+
return {"name_of_model": llm_model_name}
|
| 69 |
|
| 70 |
@property
|
| 71 |
def _llm_type(self) -> str:
|
|
|
|
| 74 |
@st.cache_resource
|
| 75 |
class LlamaCustom:
|
| 76 |
# define llm
|
| 77 |
+
prompt_helper = PromptHelper(
|
| 78 |
+
context_window=CONTEXT_WINDOW,
|
| 79 |
+
num_output=NUM_OUTPUT,
|
| 80 |
+
chunk_overlap_ratio=CHUNK_OVERLAP_RATION,
|
| 81 |
+
)
|
| 82 |
llm_predictor = LLMPredictor(llm=CustomLLM())
|
| 83 |
service_context = ServiceContext.from_defaults(
|
| 84 |
llm_predictor=llm_predictor, prompt_helper=prompt_helper
|