subashpoudel commited on
Commit
611bb1a
·
1 Parent(s): 1186c31
logs/access.log CHANGED
@@ -1937,3 +1937,9 @@
1937
  2026-02-01 16:45:07,835 | INFO | access_logger | api/main.py:21 | Response status: 200
1938
  2026-02-01 16:46:10,127 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1939
  2026-02-01 16:46:14,731 | INFO | access_logger | api/main.py:21 | Response status: 200
 
 
 
 
 
 
 
1937
  2026-02-01 16:45:07,835 | INFO | access_logger | api/main.py:21 | Response status: 200
1938
  2026-02-01 16:46:10,127 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1939
  2026-02-01 16:46:14,731 | INFO | access_logger | api/main.py:21 | Response status: 200
1940
+ 2026-02-01 16:53:36,037 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1941
+ 2026-02-01 16:53:45,820 | INFO | access_logger | api/main.py:21 | Response status: 200
1942
+ 2026-02-01 16:56:00,092 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1943
+ 2026-02-01 16:56:00,094 | INFO | access_logger | api/main.py:21 | Response status: 422
1944
+ 2026-02-01 16:56:09,838 | INFO | access_logger | api/main.py:19 | Request: POST http://127.0.0.1:8000/api/analytics-chatbot
1945
+ 2026-02-01 16:56:15,071 | INFO | access_logger | api/main.py:21 | Response status: 200
src/genai/utils/models_loader.py CHANGED
@@ -21,7 +21,7 @@ llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7)
21
  llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
22
 
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
- llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
  # encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
27
  encoding_model = 'encoding_model'
 
21
  llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
22
 
23
  llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
24
+ llm_gpt = ChatOpenAI(model="gpt-4o-mini",temperature=0)
25
  llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
26
  # encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
27
  encoding_model = 'encoding_model'