degbu embeddings
Browse files
app.py
CHANGED
|
@@ -67,7 +67,7 @@ def main():
|
|
| 67 |
step=0.05,
|
| 68 |
)
|
| 69 |
|
| 70 |
-
max_output_tokens =
|
| 71 |
|
| 72 |
# Enter LLM Token
|
| 73 |
llm_token = st.text_input(
|
|
@@ -88,7 +88,7 @@ def main():
|
|
| 88 |
# https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
|
| 89 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
| 90 |
Settings.num_output = max_output_tokens
|
| 91 |
-
Settings.context_window = 4096 # max possible
|
| 92 |
Settings.embed_model = OpenAIEmbedding()
|
| 93 |
elif provider == 'huggingface':
|
| 94 |
os.environ['HFTOKEN'] = str(llm_token)
|
|
|
|
| 67 |
step=0.05,
|
| 68 |
)
|
| 69 |
|
| 70 |
+
max_output_tokens = 2048
|
| 71 |
|
| 72 |
# Enter LLM Token
|
| 73 |
llm_token = st.text_input(
|
|
|
|
| 88 |
# https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
|
| 89 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
| 90 |
Settings.num_output = max_output_tokens
|
| 91 |
+
# Settings.context_window = 4096 # max possible
|
| 92 |
Settings.embed_model = OpenAIEmbedding()
|
| 93 |
elif provider == 'huggingface':
|
| 94 |
os.environ['HFTOKEN'] = str(llm_token)
|