Commit
·
de4c56a
1
Parent(s):
a9e94b5
Update chat model initialization in main.py to use "qwen-qwq-32b" instead of "llama-3.3-70b-versatile".
Browse files
main.py
CHANGED
|
@@ -31,7 +31,7 @@ if not os.environ.get("GROQ_API_KEY"):
|
|
| 31 |
# print(f"GROQ_API_KEY: {os.getenv('GROQ_API_KEY')}")
|
| 32 |
# print(f"HUGGING_FACE_API_KEY: {os.getenv('HUGGING_FACE_API_KEY')}")
|
| 33 |
|
| 34 |
-
llm = init_chat_model("
|
| 35 |
'''
|
| 36 |
embeddings = HuggingFaceInferenceAPIEmbeddings(
|
| 37 |
api_key = os.getenv('HUGGING_FACE_API_KEY'),
|
|
|
|
| 31 |
# print(f"GROQ_API_KEY: {os.getenv('GROQ_API_KEY')}")
|
| 32 |
# print(f"HUGGING_FACE_API_KEY: {os.getenv('HUGGING_FACE_API_KEY')}")
|
| 33 |
|
| 34 |
+
llm = init_chat_model("qwen-qwq-32b", model_provider="groq", api_key=os.environ["GROQ_API_KEY"])
|
| 35 |
'''
|
| 36 |
embeddings = HuggingFaceInferenceAPIEmbeddings(
|
| 37 |
api_key = os.getenv('HUGGING_FACE_API_KEY'),
|