DronA23 commited on
Commit
8408313
·
verified ·
1 Parent(s): 3e23867

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -81
app.py CHANGED
@@ -1,87 +1,70 @@
1
- import os
2
- import gradio as gr
3
- from helper import generate_reply
 
 
 
 
 
4
 
5
- # Ensure your API key is set as an env var (no dotenv needed on HF Spaces)
6
- OPENAI_KEY = os.getenv("OPENAI_API_KEY")
7
- if not OPENAI_KEY:
8
- raise ValueError("🔑 Set OPENAI_API_KEY in your Space’s Secrets.")
9
 
10
- # Response function
11
- def respond(user_message, history):
12
- bot_message = generate_reply(user_message)
13
- history = history or []
14
- history.append((user_message, bot_message))
15
- return history, ""
16
 
17
- # Custom CSS for appearance tweaks
18
- css = """
19
- body {
20
- background: #121212;
21
- color: #e0e0e0;
22
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
23
- }
24
- .gradio-container {
25
- max-width: 700px;
26
- margin: auto;
27
- padding: 1.5rem;
28
- backdrop-filter: blur(10px);
29
- background-color: rgba(30, 30, 46, 0.85);
30
- border-radius: 16px;
31
- box-shadow: 0 8px 20px rgba(0,0,0,0.5);
32
- }
33
- .gradio-container h1, .gradio-container h2, .gradio-container h3 {
34
- color: #ffffff;
35
- text-shadow: 0px 2px 4px rgba(0,0,0,0.6);
36
- }
37
- /* Chat bubbles */
38
- .chatbot .chat-message.user {
39
- background: linear-gradient(120deg, #0066ff, #3399ff) !important;
40
- color: white !important;
41
- border-radius: 16px !important;
42
- padding: 0.6rem 1rem !important;
43
- }
44
- .chatbot .chat-message.assistant {
45
- background: #262635 !important;
46
- color: #e0e0e0 !important;
47
- border-radius: 16px !important;
48
- padding: 0.6rem 1rem !important;
49
- }
50
- /* Textbox and button tweaks */
51
- #textbox textarea {
52
- background-color: #1e1e2e !important;
53
- border: 1px solid #333 !important;
54
- border-radius: 8px !important;
55
- }
56
- #textbox textarea:focus {
57
- border-color: #0066ff !important;
58
- }
59
- #send-btn {
60
- background-color: #0066cc !important;
61
- border-radius: 8px !important;
62
- padding: 0.6rem 1.2rem !important;
63
- color: white !important;
64
- font-weight: bold !important;
65
- }
66
- #send-btn:hover {
67
- background-color: #005bb5 !important;
68
- }
69
- """
70
 
71
- # Build UI
72
- with gr.Blocks(css=css) as demo:
73
- gr.Markdown("# 🤖 AI Companion\n*Warm, friendly chat with your bot*")
74
- chatbot = gr.Chatbot(elem_id="chatbot")
75
- with gr.Row():
76
- txt = gr.Textbox(
77
- placeholder="Type your message here…",
78
- show_label=False,
79
- lines=1,
80
- elem_id="textbox"
81
- )
82
- send = gr.Button("Send", elem_id="send-btn")
83
- txt.submit(respond, [txt, chatbot], [chatbot, txt])
84
- send.click(respond, [txt, chatbot], [chatbot, txt])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  if __name__ == "__main__":
87
- demo.launch()
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from typing import List, Tuple, Dict
4
+ import helper
5
+ import dialogue
6
+ import models.linear as linear_model
7
+ import models.logistic as logistic_model
8
+ import judge
9
 
10
+ app = FastAPI(
11
+ title="KindCare Chat API with ML",
12
+ description="Chat + prediction + judge endpoints"
13
+ )
14
 
15
+ class ChatRequest(BaseModel):
16
+ message: str
17
+ history: List[Tuple[str, str]] = []
 
 
 
18
 
19
+ class ChatResponse(BaseModel):
20
+ reply: str
21
+ history: List[Tuple[str, str]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ class PredictRequest(BaseModel):
24
+ features: Dict[str, float]
25
+
26
+ class PredictLinearResponse(BaseModel):
27
+ prediction: float
28
+ metrics: Dict[str, float]
29
+
30
+ class PredictLogisticRequest(PredictRequest):
31
+ bins: List[str]
32
+
33
+ class PredictLogisticResponse(BaseModel):
34
+ prediction: str
35
+ metrics: Dict[str, float]
36
+
37
+ class JudgeRequest(BaseModel):
38
+ model_type: str
39
+ metrics: Dict[str, float]
40
+
41
+ class JudgeResponse(BaseModel):
42
+ verdict: str
43
+ comments: List[str]
44
+
45
+ @app.post("/chat", response_model=ChatResponse)
46
+ async def chat(req: ChatRequest):
47
+ try:
48
+ reply, hist = dialogue.handle_message(req.message, req.history)
49
+ return ChatResponse(reply=reply, history=hist)
50
+ except Exception as e:
51
+ raise HTTPException(status_code=500, detail=str(e))
52
+
53
+ @app.post("/predict/linear", response_model=PredictLinearResponse)
54
+ async def predict_linear(req: PredictRequest):
55
+ pred, metrics = linear_model.predict(req.features)
56
+ return PredictLinearResponse(prediction=pred, metrics=metrics)
57
+
58
+ @app.post("/predict/logistic", response_model=PredictLogisticResponse)
59
+ async def predict_logistic(req: PredictLogisticRequest):
60
+ pred, metrics = logistic_model.predict(req.features, req.bins)
61
+ return PredictLogisticResponse(prediction=pred, metrics=metrics)
62
+
63
+ @app.post("/judge/model", response_model=JudgeResponse)
64
+ async def judge_endpoint(req: JudgeRequest):
65
+ result = judge.judge_model(req.model_type, req.metrics)
66
+ return JudgeResponse(**result)
67
 
68
  if __name__ == "__main__":
69
+ import uvicorn
70
+ uvicorn.run("app:app", host="0.0.0.0", port=8000)