shiva9876 commited on
Commit
8e1982e
·
verified ·
1 Parent(s): 808a339

Update api/endpoints.py

Browse files
Files changed (1) hide show
  1. api/endpoints.py +156 -156
api/endpoints.py CHANGED
@@ -1,157 +1,157 @@
1
- from fastapi import APIRouter, HTTPException, Request
2
- from api.models import PromptRequest, HistoryRequest
3
- from core.llm import llm
4
- from core.memory import get_memory, conversation_memories
5
- from core.utils import classify_message, process_response
6
- import uuid
7
- import time
8
- from langchain.schema import HumanMessage, AIMessage
9
- from langchain.prompts import PromptTemplate
10
- from langchain.chains import LLMChain, ConversationChain
11
-
12
- router = APIRouter()
13
-
14
- # Prompt templates
15
- CONVERSATION_TEMPLATE = """You are a helpful AI assistant. Have a natural conversation with the user.
16
-
17
- Current conversation:
18
- {history}
19
- Human: {input}
20
- AI:"""
21
-
22
- CODE_GENERATION_TEMPLATE = """You are an expert Python programmer. Generate clean, efficient Python code for the given request.
23
-
24
- Request: {input}
25
-
26
- Provide only the Python code without explanation:
27
- ```python"""
28
-
29
- EXPLANATION_TEMPLATE = """You are a programming tutor. Explain how to solve the programming task clearly without providing code.
30
-
31
- Task: {input}
32
-
33
- Explanation:"""
34
-
35
- BOTH_TEMPLATE = """You are an expert Python programmer and tutor. For the given request, provide both a clear explanation and Python code.
36
-
37
- Request: {input}
38
-
39
- First, provide a clear explanation of the approach:
40
- [EXPLANATION]
41
-
42
- Then, provide the Python code:
43
- ```python"""
44
-
45
- def create_chain(template: str, memory=None):
46
- input_vars = ["input"]
47
- if memory:
48
- input_vars.append("history")
49
- prompt = PromptTemplate(
50
- input_variables=input_vars,
51
- template=template
52
- )
53
- if memory:
54
- return ConversationChain(
55
- llm=llm,
56
- prompt=prompt,
57
- memory=memory,
58
- verbose=True,
59
- output_key="output"
60
- )
61
- else:
62
- return LLMChain(
63
- llm=llm,
64
- prompt=prompt,
65
- verbose=True,
66
- output_key="text"
67
- )
68
-
69
- @router.get("/")
70
- async def root():
71
- return {
72
- "status": "ok",
73
- "title": "LangChain Chat Bot API",
74
- "version": "2.0.0",
75
- "model": llm.model if llm else None,
76
- "description": "Advanced chatbot API using LangChain and Groq models",
77
- "features": ["Conversation Memory", "Code Generation", "Explanations", "Session Management"]
78
- }
79
-
80
- @router.post("/generate/")
81
- async def generate_response(request: PromptRequest):
82
- if llm is None:
83
- raise HTTPException(status_code=503, detail="AI model not initialized. Check server logs for errors during startup (e.g., missing API key).")
84
- try:
85
- session_id = request.session_id or str(uuid.uuid4())
86
- memory = get_memory(session_id, request.max_history)
87
- message_type = classify_message(request.prompt)
88
- response_data = {}
89
- if message_type == "conversation" or request.response_type == "conversation":
90
- chain = create_chain(CONVERSATION_TEMPLATE, memory)
91
- response = chain.predict(input=request.prompt)
92
- response_data = {"response": response.strip(), "message_type": "conversation"}
93
- elif request.response_type == "code":
94
- chain = create_chain(CODE_GENERATION_TEMPLATE)
95
- response = chain.run(input=request.prompt)
96
- response_data = process_response(response, "code")
97
- response_data["message_type"] = "code"
98
- elif request.response_type == "explanation":
99
- chain = create_chain(EXPLANATION_TEMPLATE)
100
- response = chain.run(input=request.prompt)
101
- response_data = process_response(response, "explanation")
102
- response_data["message_type"] = "explanation"
103
- else: # "both"
104
- chain = create_chain(BOTH_TEMPLATE)
105
- response = chain.run(input=request.prompt)
106
- response_data = process_response(response, "both")
107
- response_data["message_type"] = "both"
108
- response_data["session_id"] = session_id
109
- print(f"✅ Generated response for session {session_id}")
110
- return response_data
111
- except Exception as e:
112
- print(f"❌ Error in generate_response: {str(e)}")
113
- raise HTTPException(status_code=500, detail=f"Generation error: {str(e)}")
114
-
115
- @router.post("/clear_history/")
116
- async def clear_history(request: HistoryRequest):
117
- if request.session_id in conversation_memories:
118
- conversation_memories[request.session_id].clear()
119
- return {"status": "success", "message": "Conversation history cleared"}
120
- return {"status": "not_found", "message": "Session ID not found"}
121
-
122
- @router.post("/get_history/")
123
- async def get_history(request: HistoryRequest):
124
- if request.session_id in conversation_memories:
125
- memory = conversation_memories[request.session_id]
126
- messages = memory.chat_memory.messages
127
- history = []
128
- for msg in messages:
129
- if isinstance(msg, HumanMessage):
130
- history.append(f"Human: {msg.content}")
131
- elif isinstance(msg, AIMessage):
132
- history.append(f"AI: {msg.content}")
133
- return {"status": "success", "history": history}
134
- return {"status": "not_found", "message": "Session ID not found"}
135
-
136
- @router.get("/sessions/")
137
- async def get_active_sessions():
138
- return {
139
- "active_sessions": list(conversation_memories.keys()),
140
- "total_sessions": len(conversation_memories)
141
- }
142
-
143
- @router.delete("/sessions/{session_id}")
144
- async def delete_session(session_id: str):
145
- if session_id in conversation_memories:
146
- del conversation_memories[session_id]
147
- return {"status": "success", "message": f"Session {session_id} deleted"}
148
- return {"status": "not_found", "message": "Session ID not found"}
149
-
150
- @router.get("/health")
151
- async def health_check():
152
- return {
153
- "status": "healthy",
154
- "model": llm.model if llm else None,
155
- "active_sessions": len(conversation_memories),
156
- "langchain_version": "0.0.350"
157
  }
 
1
+ from fastapi import APIRouter, HTTPException, Request
2
+ from api.models import PromptRequest, HistoryRequest
3
+ from core.llm import llm, GROQ_MODEL
4
+ from core.memory import get_memory, conversation_memories
5
+ from core.utils import classify_message, process_response
6
+ import uuid
7
+ import time
8
+ from langchain.schema import HumanMessage, AIMessage
9
+ from langchain.prompts import PromptTemplate
10
+ from langchain.chains import LLMChain, ConversationChain
11
+
12
+ router = APIRouter()
13
+
14
+ # Prompt templates
15
+ CONVERSATION_TEMPLATE = """You are a helpful AI assistant. Have a natural conversation with the user.
16
+
17
+ Current conversation:
18
+ {history}
19
+ Human: {input}
20
+ AI:"""
21
+
22
+ CODE_GENERATION_TEMPLATE = """You are an expert Python programmer. Generate clean, efficient Python code for the given request.
23
+
24
+ Request: {input}
25
+
26
+ Provide only the Python code without explanation:
27
+ ```python"""
28
+
29
+ EXPLANATION_TEMPLATE = """You are a programming tutor. Explain how to solve the programming task clearly without providing code.
30
+
31
+ Task: {input}
32
+
33
+ Explanation:"""
34
+
35
+ BOTH_TEMPLATE = """You are an expert Python programmer and tutor. For the given request, provide both a clear explanation and Python code.
36
+
37
+ Request: {input}
38
+
39
+ First, provide a clear explanation of the approach:
40
+ [EXPLANATION]
41
+
42
+ Then, provide the Python code:
43
+ ```python"""
44
+
45
+ def create_chain(template: str, memory=None):
46
+ input_vars = ["input"]
47
+ if memory:
48
+ input_vars.append("history")
49
+ prompt = PromptTemplate(
50
+ input_variables=input_vars,
51
+ template=template
52
+ )
53
+ if memory:
54
+ return ConversationChain(
55
+ llm=llm,
56
+ prompt=prompt,
57
+ memory=memory,
58
+ verbose=True,
59
+ output_key="output"
60
+ )
61
+ else:
62
+ return LLMChain(
63
+ llm=llm,
64
+ prompt=prompt,
65
+ verbose=True,
66
+ output_key="text"
67
+ )
68
+
69
+ @router.get("/")
70
+ async def root():
71
+ return {
72
+ "status": "ok",
73
+ "title": "LangChain Chat Bot",
74
+ "version": "2.0.0",
75
+ "model": GROQ_MODEL,
76
+ "description": "Advanced chatbot using LangChain and Groq models",
77
+ "features": ["Conversation Memory", "Code Generation", "Explanations", "Session Management"]
78
+ }
79
+
80
+ @router.post("/generate/")
81
+ async def generate_response(request: PromptRequest):
82
+ if llm is None:
83
+ raise HTTPException(status_code=503, detail="AI model not initialized. Check server logs for errors during startup (e.g., missing API key).")
84
+ try:
85
+ session_id = request.session_id or str(uuid.uuid4())
86
+ memory = get_memory(session_id, request.max_history)
87
+ message_type = classify_message(request.prompt)
88
+ response_data = {}
89
+ if message_type == "conversation" or request.response_type == "conversation":
90
+ chain = create_chain(CONVERSATION_TEMPLATE, memory)
91
+ response = chain.predict(input=request.prompt)
92
+ response_data = {"response": response.strip(), "message_type": "conversation"}
93
+ elif request.response_type == "code":
94
+ chain = create_chain(CODE_GENERATION_TEMPLATE)
95
+ response = chain.run(input=request.prompt)
96
+ response_data = process_response(response, "code")
97
+ response_data["message_type"] = "code"
98
+ elif request.response_type == "explanation":
99
+ chain = create_chain(EXPLANATION_TEMPLATE)
100
+ response = chain.run(input=request.prompt)
101
+ response_data = process_response(response, "explanation")
102
+ response_data["message_type"] = "explanation"
103
+ else: # "both"
104
+ chain = create_chain(BOTH_TEMPLATE)
105
+ response = chain.run(input=request.prompt)
106
+ response_data = process_response(response, "both")
107
+ response_data["message_type"] = "both"
108
+ response_data["session_id"] = session_id
109
+ print(f"✅ Generated response for session {session_id}")
110
+ return response_data
111
+ except Exception as e:
112
+ print(f"❌ Error in generate_response: {str(e)}")
113
+ raise HTTPException(status_code=500, detail=f"Generation error: {str(e)}")
114
+
115
+ @router.post("/clear_history/")
116
+ async def clear_history(request: HistoryRequest):
117
+ if request.session_id in conversation_memories:
118
+ conversation_memories[request.session_id].clear()
119
+ return {"status": "success", "message": "Conversation history cleared"}
120
+ return {"status": "not_found", "message": "Session ID not found"}
121
+
122
+ @router.post("/get_history/")
123
+ async def get_history(request: HistoryRequest):
124
+ if request.session_id in conversation_memories:
125
+ memory = conversation_memories[request.session_id]
126
+ messages = memory.chat_memory.messages
127
+ history = []
128
+ for msg in messages:
129
+ if isinstance(msg, HumanMessage):
130
+ history.append(f"Human: {msg.content}")
131
+ elif isinstance(msg, AIMessage):
132
+ history.append(f"AI: {msg.content}")
133
+ return {"status": "success", "history": history}
134
+ return {"status": "not_found", "message": "Session ID not found"}
135
+
136
+ @router.get("/sessions/")
137
+ async def get_active_sessions():
138
+ return {
139
+ "active_sessions": list(conversation_memories.keys()),
140
+ "total_sessions": len(conversation_memories)
141
+ }
142
+
143
+ @router.delete("/sessions/{session_id}")
144
+ async def delete_session(session_id: str):
145
+ if session_id in conversation_memories:
146
+ del conversation_memories[session_id]
147
+ return {"status": "success", "message": f"Session {session_id} deleted"}
148
+ return {"status": "not_found", "message": "Session ID not found"}
149
+
150
+ @router.get("/health")
151
+ async def health_check():
152
+ return {
153
+ "status": "healthy",
154
+ "model": llm.model if llm else None,
155
+ "active_sessions": len(conversation_memories),
156
+ "langchain_version": "0.0.350"
157
  }