swayamshetkar commited on
Commit
6fe2567
·
1 Parent(s): b946ba0

new overall train route

Browse files
Files changed (3) hide show
  1. ai/profile_manager.py +64 -0
  2. app.py +132 -1
  3. test.sh +91 -63
ai/profile_manager.py CHANGED
@@ -74,3 +74,67 @@ def update_profile_components(coding_style: Optional[Dict[str, Any]] = None,
74
  pass
75
  raise
76
  return profile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  pass
75
  raise
76
  return profile
77
+
78
+
79
+ def update_personal_info(personal_data: str) -> Dict[str, Any]:
80
+ """Store user's personal information in profile.
81
+
82
+ Args:
83
+ personal_data: Raw text containing user's personal details
84
+
85
+ Returns:
86
+ Updated profile dictionary
87
+ """
88
+ profile = load_profile()
89
+
90
+ # Initialize personal_info if it doesn't exist
91
+ if "personal_info" not in profile:
92
+ profile["personal_info"] = {
93
+ "entries": [],
94
+ "raw_data": ""
95
+ }
96
+
97
+ # Append new data with timestamp
98
+ entry = {
99
+ "timestamp": datetime.datetime.now().isoformat(),
100
+ "content": personal_data
101
+ }
102
+ profile["personal_info"]["entries"].append(entry)
103
+
104
+ # Also maintain a concatenated version for easy access
105
+ profile["personal_info"]["raw_data"] = "\n\n".join(
106
+ [e["content"] for e in profile["personal_info"]["entries"]]
107
+ )
108
+
109
+ profile["last_updated"] = datetime.datetime.now().isoformat()
110
+
111
+ # Atomic write
112
+ os.makedirs(os.path.dirname(PROFILE_PATH) or ".", exist_ok=True)
113
+ fd, tmp_path = tempfile.mkstemp(dir=os.path.dirname(PROFILE_PATH) or ".")
114
+ try:
115
+ with os.fdopen(fd, "w", encoding="utf-8") as tmp:
116
+ json.dump(profile, tmp, indent=2, ensure_ascii=False)
117
+ os.replace(tmp_path, PROFILE_PATH)
118
+ except Exception:
119
+ try:
120
+ os.remove(tmp_path)
121
+ except Exception:
122
+ pass
123
+ raise
124
+ return profile
125
+
126
+
127
+ def get_personal_context() -> str:
128
+ """Retrieve user's personal information as a formatted string for AI context.
129
+
130
+ Returns:
131
+ Formatted string of all personal information, or empty string if none exists.
132
+ """
133
+ profile = load_profile()
134
+ personal_info = profile.get("personal_info", {})
135
+ raw_data = personal_info.get("raw_data", "")
136
+
137
+ if not raw_data:
138
+ return ""
139
+
140
+ return f"User's Personal Information:\n{raw_data}"
app.py CHANGED
@@ -6,13 +6,22 @@ import os
6
  import threading
7
  import time
8
  import uuid
 
 
 
9
  from collections import OrderedDict, defaultdict
10
 
11
  # Unified LLM import (router selects Groq if available else local llama)
12
  from ai.llm_router import run_llama, init_model, stream_llama
13
 
14
  from ai.code_analyzer import analyze_code
15
- from ai.profile_manager import update_profile, load_profile, update_profile_components
 
 
 
 
 
 
16
  from ai.refactor_engine import suggest_refactor
17
  from ai.prompt_builder import build_persona_prompt
18
  from ai.code_fingerprint import analyze_fingerprint
@@ -25,6 +34,8 @@ from ai.code_memory import save_to_memory
25
  LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
26
  logging.basicConfig(level=LOG_LEVEL)
27
 
 
 
28
  app = Flask(__name__)
29
  CORS(app)
30
  Compress(app)
@@ -146,10 +157,15 @@ def ask():
146
  skill = meta.get("skill", "moderate_learner")
147
  emotion = meta.get("emotion", "neutral")
148
 
 
 
 
 
149
  SYSTEM_PROMPT = (
150
  "You are CodeMate — an empathetic coding tutor. "
151
  "Explain or answer the question clearly and helpfully. "
152
  f"User context: skill={skill}, emotion={emotion}"
 
153
  )
154
 
155
  # Add user message to history
@@ -201,10 +217,15 @@ def ask_stream():
201
  skill = meta.get("skill", "moderate_learner")
202
  emotion = meta.get("emotion", "neutral")
203
 
 
 
 
 
204
  SYSTEM_PROMPT = (
205
  "You are CodeMate — an empathetic coding tutor. "
206
  "Explain or answer the question clearly and helpfully. "
207
  f"User context: skill={skill}, emotion={emotion}"
 
208
  )
209
 
210
  # Add user message to history
@@ -263,6 +284,10 @@ def code_assist():
263
  style = profile.get("coding_style", {})
264
  fingerprint = profile.get("fingerprint", {})
265
 
 
 
 
 
266
  # Create adaptive persona prompt (this creates a complete prompt string)
267
  prompt_content = build_persona_prompt(query, emotion, skill, style, fingerprint)
268
 
@@ -274,6 +299,7 @@ def code_assist():
274
  "You are CodeMate — an empathetic coding assistant. "
275
  f"User profile: skill={skill}, emotion={emotion}. "
276
  "Provide helpful code examples and explanations."
 
277
  )
278
 
279
  messages = _get_messages(session_id, system_prompt)
@@ -377,6 +403,111 @@ def train():
377
  return jsonify({"error": str(e)}), 500
378
 
379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  # ---- /get_history ------------------------------------------------
381
  @app.route("/get_history", methods=["POST"])
382
  def get_history():
 
6
  import threading
7
  import time
8
  import uuid
9
+ import datetime
10
+ import tempfile
11
+ import json
12
  from collections import OrderedDict, defaultdict
13
 
14
  # Unified LLM import (router selects Groq if available else local llama)
15
  from ai.llm_router import run_llama, init_model, stream_llama
16
 
17
  from ai.code_analyzer import analyze_code
18
+ from ai.profile_manager import (
19
+ update_profile,
20
+ load_profile,
21
+ update_profile_components,
22
+ update_personal_info,
23
+ get_personal_context
24
+ )
25
  from ai.refactor_engine import suggest_refactor
26
  from ai.prompt_builder import build_persona_prompt
27
  from ai.code_fingerprint import analyze_fingerprint
 
34
  LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
35
  logging.basicConfig(level=LOG_LEVEL)
36
 
37
+ PROFILE_PATH = os.environ.get("USER_PROFILE_PATH", "./user_data/user_profile.json")
38
+
39
  app = Flask(__name__)
40
  CORS(app)
41
  Compress(app)
 
157
  skill = meta.get("skill", "moderate_learner")
158
  emotion = meta.get("emotion", "neutral")
159
 
160
+ # Get personal context from profile
161
+ personal_context = get_personal_context()
162
+ personal_section = f"\n\n{personal_context}" if personal_context else ""
163
+
164
  SYSTEM_PROMPT = (
165
  "You are CodeMate — an empathetic coding tutor. "
166
  "Explain or answer the question clearly and helpfully. "
167
  f"User context: skill={skill}, emotion={emotion}"
168
+ f"{personal_section}"
169
  )
170
 
171
  # Add user message to history
 
217
  skill = meta.get("skill", "moderate_learner")
218
  emotion = meta.get("emotion", "neutral")
219
 
220
+ # Get personal context from profile
221
+ personal_context = get_personal_context()
222
+ personal_section = f"\n\n{personal_context}" if personal_context else ""
223
+
224
  SYSTEM_PROMPT = (
225
  "You are CodeMate — an empathetic coding tutor. "
226
  "Explain or answer the question clearly and helpfully. "
227
  f"User context: skill={skill}, emotion={emotion}"
228
+ f"{personal_section}"
229
  )
230
 
231
  # Add user message to history
 
284
  style = profile.get("coding_style", {})
285
  fingerprint = profile.get("fingerprint", {})
286
 
287
+ # Get personal context from profile
288
+ personal_context = get_personal_context()
289
+ personal_section = f"\n\n{personal_context}" if personal_context else ""
290
+
291
  # Create adaptive persona prompt (this creates a complete prompt string)
292
  prompt_content = build_persona_prompt(query, emotion, skill, style, fingerprint)
293
 
 
299
  "You are CodeMate — an empathetic coding assistant. "
300
  f"User profile: skill={skill}, emotion={emotion}. "
301
  "Provide helpful code examples and explanations."
302
+ f"{personal_section}"
303
  )
304
 
305
  messages = _get_messages(session_id, system_prompt)
 
403
  return jsonify({"error": str(e)}), 500
404
 
405
 
406
+ # ---- /train_overall ----------------------------------------------
407
+ @app.route("/train_overall", methods=["POST"])
408
+ def train_overall():
409
+ """Store user's personal information for personalized AI responses.
410
+
411
+ This endpoint stores any personal details, preferences, or background information
412
+ about the user. The AI will use this context in all future conversations.
413
+
414
+ Input JSON:
415
+ personal_data: string (required) - text containing personal information
416
+ Examples: "My name is John. I'm 20 years old. I'm studying CS at MIT."
417
+ "I prefer Python over JavaScript. I work at Google."
418
+ "My hobbies include gaming and reading sci-fi novels."
419
+
420
+ Returns:
421
+ JSON with success message and updated personal info summary
422
+ """
423
+ try:
424
+ req = request.json or {}
425
+ personal_data = req.get("personal_data", "").strip()
426
+
427
+ if not personal_data:
428
+ return jsonify({"error": "personal_data is required"}), 400
429
+
430
+ # Limit size to prevent abuse (max 50KB per entry)
431
+ if len(personal_data) > 50000:
432
+ return jsonify({"error": "personal_data too large (max 50KB)"}), 400
433
+
434
+ # Update profile with personal information
435
+ profile = update_personal_info(personal_data)
436
+
437
+ # Get summary of stored data
438
+ personal_info = profile.get("personal_info", {})
439
+ entry_count = len(personal_info.get("entries", []))
440
+ total_chars = len(personal_info.get("raw_data", ""))
441
+
442
+ return jsonify({
443
+ "message": "Personal information saved successfully",
444
+ "entry_count": entry_count,
445
+ "total_characters": total_chars,
446
+ "preview": personal_data[:200] + ("..." if len(personal_data) > 200 else "")
447
+ })
448
+ except Exception as e:
449
+ logging.exception("/train_overall failed")
450
+ return jsonify({"error": str(e)}), 500
451
+
452
+
453
+ # ---- /get_personal_info ------------------------------------------
454
+ @app.route("/get_personal_info", methods=["GET"])
455
+ def get_personal_info():
456
+ """Retrieve all stored personal information.
457
+
458
+ Returns:
459
+ JSON with all personal info entries and summary
460
+ """
461
+ try:
462
+ profile = load_profile()
463
+ personal_info = profile.get("personal_info", {})
464
+
465
+ return jsonify({
466
+ "entries": personal_info.get("entries", []),
467
+ "raw_data": personal_info.get("raw_data", ""),
468
+ "entry_count": len(personal_info.get("entries", [])),
469
+ "total_characters": len(personal_info.get("raw_data", ""))
470
+ })
471
+ except Exception as e:
472
+ logging.exception("/get_personal_info failed")
473
+ return jsonify({"error": str(e)}), 500
474
+
475
+
476
+ # ---- /clear_personal_info ----------------------------------------
477
+ @app.route("/clear_personal_info", methods=["POST"])
478
+ def clear_personal_info():
479
+ """Clear all stored personal information.
480
+
481
+ Returns:
482
+ JSON with success message
483
+ """
484
+ try:
485
+ profile = load_profile()
486
+ if "personal_info" in profile:
487
+ del profile["personal_info"]
488
+ profile["last_updated"] = datetime.datetime.now().isoformat()
489
+
490
+ # Save updated profile
491
+ os.makedirs(os.path.dirname(PROFILE_PATH) or ".", exist_ok=True)
492
+ import tempfile
493
+ fd, tmp_path = tempfile.mkstemp(dir=os.path.dirname(PROFILE_PATH) or ".")
494
+ try:
495
+ with os.fdopen(fd, "w", encoding="utf-8") as tmp:
496
+ json.dump(profile, tmp, indent=2, ensure_ascii=False)
497
+ os.replace(tmp_path, PROFILE_PATH)
498
+ except Exception:
499
+ try:
500
+ os.remove(tmp_path)
501
+ except Exception:
502
+ pass
503
+ raise
504
+
505
+ return jsonify({"message": "Personal information cleared successfully"})
506
+ except Exception as e:
507
+ logging.exception("/clear_personal_info failed")
508
+ return jsonify({"error": str(e)}), 500
509
+
510
+
511
  # ---- /get_history ------------------------------------------------
512
  @app.route("/get_history", methods=["POST"])
513
  def get_history():
test.sh CHANGED
@@ -1,75 +1,103 @@
1
- #!/bin/bash
2
- # Test all Human-AI Backend Routes
3
- # Author: Swayam | AI Partner: GPT-5
4
 
5
- BASE_URL="https://swayamshetkar-human-ai-backend.hf.space"
 
 
 
 
 
 
 
6
 
7
- divider() {
8
- echo ""
9
- echo "==================== $1 ===================="
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  }
11
 
12
- # 1️⃣ Test / (Home)
13
- divider "Testing /"
14
- curl -s -X GET "$BASE_URL/" | jq
15
 
16
- # 2️⃣ Test /ask
17
- divider "Testing /ask (study mode)"
18
- curl -s -X POST "$BASE_URL/ask" \
19
- -H "Content-Type: application/json" \
20
- -d '{
21
- "query": "Explain recursion in Python",
22
- "metadata": {"skill": "moderate_learner", "emotion": "focused"}
23
- }' | jq
24
 
25
- # 3️⃣ Test /ask_stream
26
- divider "Testing /ask_stream (study mode streaming)"
27
- curl -s -N -X POST "$BASE_URL/ask_stream" \
28
- -H "Content-Type: application/json" \
29
- -d '{
30
- "query": "Explain how binary search works",
31
- "metadata": {"skill": "slow_learner", "emotion": "curious"}
32
- }'
 
 
33
 
34
- # 4️⃣ Test /code_assist
35
- divider "Testing /code_assist (code generation using trained style)"
36
- curl -s -X POST "$BASE_URL/code_assist" \
37
- -H "Content-Type: application/json" \
38
- -d '{
39
- "query": "Write a Python function that sorts a list of numbers.",
40
- "metadata": {"skill": "fast_learner", "emotion": "focused"}
41
- }' | jq
 
 
42
 
43
- # 5️⃣ Test /analyze
44
- divider "Testing /analyze (inline code style update)"
45
- curl -s -X POST "$BASE_URL/analyze" \
46
- -H "Content-Type: application/json" \
47
- -d '{
48
- "code": "def add_numbers(a,b):\\n return a+b"
49
- }' | jq
50
 
51
- # 6️⃣ Test /refactor
52
- divider "Testing /refactor (refactor code in user style)"
53
- curl -s -X POST "$BASE_URL/refactor" \
54
- -H "Content-Type: application/json" \
55
- -d '{
56
- "code": "def add_numbers(a,b):\\n c=a+b\\n return c"
57
- }' | jq
 
 
 
58
 
59
- # 7️⃣ Test /train_profile
60
- divider "Testing /train_profile (train coding style on uploaded files)"
61
- curl -s -X POST "$BASE_URL/train_profile" \
62
- -H "Content-Type: application/json" \
63
- -d '{
64
- "files": [
65
- "def greet(name):\\n print(f\"Hello {name}\")",
66
- "class Person:\\n def __init__(self, name):\\n self.name = name"
67
- ]
68
- }' | jq
 
 
 
69
 
70
- # 8️⃣ Test /profile
71
- divider "Testing /profile (get current trained profile)"
72
- curl -s -X GET "$BASE_URL/profile" | jq
 
 
 
73
 
74
- echo ""
75
- echo "✅ All routes tested successfully!"
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
 
3
 
4
+ # Simple integration test script for Human-AI backend routes.
5
+ # Tests: /train, /ask, /refactor, /code_assist
6
+ # Usage:
7
+ # bash test.sh # uses default BASE_URL=http://127.0.0.1:7860
8
+ # BASE_URL=http://localhost:7860 bash test.sh
9
+ # bash test.sh http://remote-host:7860
10
+ #
11
+ # Outputs are summarized; raw JSON stored in temporary files for inspection.
12
 
13
+ BASE_URL="${1:-${BASE_URL:-https://swayamshetkar-human-aii.hf.space}}"
14
+ echo "Using BASE_URL: $BASE_URL"
15
+
16
+ TMP_DIR="$(mktemp -d)"
17
+ cleanup() { rm -rf "$TMP_DIR"; }
18
+ trap cleanup EXIT
19
+
20
+ header() { echo -e "\n==================== $1 ===================="; }
21
+
22
+ jq_exists() { command -v jq >/dev/null 2>&1; }
23
+ pretty() {
24
+ if jq_exists; then
25
+ jq '.'
26
+ else
27
+ cat
28
+ fi
29
  }
30
 
31
+ fail() { echo "[FAIL] $1" >&2; exit 1; }
 
 
32
 
33
+ check_http() {
34
+ local code="$1"; local name="$2";
35
+ if [[ "$code" -ge 200 && "$code" -lt 300 ]]; then
36
+ echo "[OK] $name (HTTP $code)";
37
+ else
38
+ fail "$name returned HTTP $code";
39
+ fi
40
+ }
41
 
42
+ # 1. /train (profile training)
43
+ header "POST /train"
44
+ TRAIN_PAYLOAD='{"code": "def greet(name):\n return f\\"Hello, {name}!\\""}'
45
+ train_resp_file="$TMP_DIR/train.json"
46
+ train_status=$(curl -s -o "$train_resp_file" -w '%{http_code}' \
47
+ -H 'Content-Type: application/json' \
48
+ -d "$TRAIN_PAYLOAD" \
49
+ "$BASE_URL/train")
50
+ check_http "$train_status" "/train"
51
+ echo "Response:"; cat "$train_resp_file" | pretty
52
 
53
+ # 2. /ask (tutoring answer) - capture session_id
54
+ header "POST /ask"
55
+ ASK_PAYLOAD='{"query": "Explain what a Python decorator does in one sentence","metadata": {"skill": "intermediate", "emotion": "focused"}}'
56
+ ask_resp_file="$TMP_DIR/ask.json"
57
+ ask_status=$(curl -s -o "$ask_resp_file" -w '%{http_code}' \
58
+ -H 'Content-Type: application/json' \
59
+ -d "$ASK_PAYLOAD" \
60
+ "$BASE_URL/ask")
61
+ check_http "$ask_status" "/ask"
62
+ echo "Response:"; cat "$ask_resp_file" | pretty
63
 
64
+ if jq_exists; then
65
+ SESSION_ID=$(jq -r '.session_id // empty' "$ask_resp_file")
66
+ else
67
+ SESSION_ID=$(grep -o '"session_id":"[^"]*"' "$ask_resp_file" | head -n1 | cut -d'"' -f4 || true)
68
+ fi
69
+ [[ -n "$SESSION_ID" ]] || fail "Could not extract session_id from /ask response"
70
+ echo "Captured session_id: $SESSION_ID"
71
 
72
+ # 3. /refactor
73
+ header "POST /refactor"
74
+ REFAC_PAYLOAD='{"code": "def add(a,b):\n return a+b"}'
75
+ refac_resp_file="$TMP_DIR/refactor.json"
76
+ refac_status=$(curl -s -o "$refac_resp_file" -w '%{http_code}' \
77
+ -H 'Content-Type: application/json' \
78
+ -d "$REFAC_PAYLOAD" \
79
+ "$BASE_URL/refactor")
80
+ check_http "$refac_status" "/refactor"
81
+ echo "Response:"; cat "$refac_resp_file" | pretty
82
 
83
+ # 4. /code_assist (reuse session history)
84
+ header "POST /code_assist"
85
+ ASSIST_PAYLOAD=$(cat <<EOF
86
+ {"query": "Give me a Python function to compute Fibonacci using iteration","metadata": {"skill": "intermediate", "emotion": "curious"}, "session_id": "$SESSION_ID"}
87
+ EOF
88
+ )
89
+ assist_resp_file="$TMP_DIR/code_assist.json"
90
+ assist_status=$(curl -s -o "$assist_resp_file" -w '%{http_code}' \
91
+ -H 'Content-Type: application/json' \
92
+ -d "$ASSIST_PAYLOAD" \
93
+ "$BASE_URL/code_assist")
94
+ check_http "$assist_status" "/code_assist"
95
+ echo "Response:"; cat "$assist_resp_file" | pretty
96
 
97
+ header "SUMMARY"
98
+ echo "Train: $train_status -> $train_resp_file"
99
+ echo "Ask: $ask_status -> $ask_resp_file (session_id=$SESSION_ID)"
100
+ echo "Refactor:$refac_status -> $refac_resp_file"
101
+ echo "Assist: $assist_status -> $assist_resp_file"
102
+ echo "All tests completed successfully."
103