Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -34,7 +34,7 @@ list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instru
|
|
| 34 |
]
|
| 35 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
| 36 |
|
| 37 |
-
|
| 38 |
|
| 39 |
|
| 40 |
@spaces.GPU
|
|
@@ -149,6 +149,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
|
|
| 149 |
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
| 150 |
temperature = temperature,
|
| 151 |
max_new_tokens = max_tokens,
|
|
|
|
| 152 |
top_k = top_k,
|
| 153 |
)
|
| 154 |
else:
|
|
|
|
| 34 |
]
|
| 35 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
| 36 |
|
| 37 |
+
huggingfacehub_api_token = os.environ.get[HUGGINGFACEHUB_API_TOKEN]
|
| 38 |
|
| 39 |
|
| 40 |
@spaces.GPU
|
|
|
|
| 149 |
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
| 150 |
temperature = temperature,
|
| 151 |
max_new_tokens = max_tokens,
|
| 152 |
+
huggingfacehub_api_token = huggingfacehub_api_token
|
| 153 |
top_k = top_k,
|
| 154 |
)
|
| 155 |
else:
|