prashantmatlani commited on
Commit
fdc8455
·
1 Parent(s): da684b4

updated core_logic, app

Browse files
Files changed (2) hide show
  1. app.py +15 -21
  2. core_logic.py +15 -12
app.py CHANGED
@@ -6,6 +6,10 @@ The Interface Skeleton - The code sets up the navigation panel and the multimoda
6
  """
7
 
8
 
 
 
 
 
9
  import gradio as gr
10
  from core_logic import chat_function
11
  from storage import save_chat, load_history, get_chat_content
@@ -14,52 +18,42 @@ with gr.Blocks() as demo:
14
  chat_id_state = gr.State("")
15
 
16
  with gr.Row():
17
- # --- Sidebar ---
18
  with gr.Column(scale=1, variant="secondary"):
19
  gr.Markdown("### 🛠️ Silicon Architect")
20
  new_btn = gr.Button("➕ New Chat", variant="primary")
21
-
22
  history_list = gr.Dataset(
23
  components=[gr.Textbox(visible=False)],
24
  label="Recent Conversations",
25
  samples=load_history()
26
  )
27
 
28
- # --- Main Chat ---
29
  with gr.Column(scale=4):
30
- # FIX: Removed 'multimodal' and 'autoscroll' which caused the TypeError
31
- chatbot = gr.Chatbot(
32
- show_label=False,
33
- height=700
34
- )
35
-
36
- # The input remains multimodal; the chatbot will display
37
- # whatever the input provides automatically.
38
  chat_input = gr.MultimodalTextbox(
39
  interactive=True,
40
  placeholder="Discuss architecture or upload code...",
41
  show_label=False
42
  )
43
 
44
- # --- Event Logic ---
45
  def bot_response(message, history):
46
- # chat_function yields strings; we must return the updated history dict
47
- history.append({"role": "user", "content": message["text"]})
48
- # Placeholder for assistant message
 
 
49
  history.append({"role": "assistant", "content": ""})
50
 
51
- for partial_resp in chat_function(message, history[:-2]):
 
 
52
  history[-1]["content"] = partial_resp
53
  yield history
54
 
 
55
  chat_input.submit(bot_response, [chat_input, chatbot], [chatbot]).then(
56
  lambda h: save_chat(None, h), [chatbot], None
57
  )
58
 
59
  new_btn.click(lambda: ([], ""), None, [chatbot, chat_id_state])
60
 
61
- # Move theme and CSS here for Gradio 6.0
62
- demo.launch(
63
- theme=gr.themes.Soft(),
64
- css="styles.css"
65
- )
 
6
  """
7
 
8
 
9
+ import gradio as gr
10
+ from core_logic import chat_function
11
+ from storage import save_chat, load_history, get_chat_content
12
+
13
  import gradio as gr
14
  from core_logic import chat_function
15
  from storage import save_chat, load_history, get_chat_content
 
18
  chat_id_state = gr.State("")
19
 
20
  with gr.Row():
 
21
  with gr.Column(scale=1, variant="secondary"):
22
  gr.Markdown("### 🛠️ Silicon Architect")
23
  new_btn = gr.Button("➕ New Chat", variant="primary")
 
24
  history_list = gr.Dataset(
25
  components=[gr.Textbox(visible=False)],
26
  label="Recent Conversations",
27
  samples=load_history()
28
  )
29
 
 
30
  with gr.Column(scale=4):
31
+ chatbot = gr.Chatbot(show_label=False, height=700)
 
 
 
 
 
 
 
32
  chat_input = gr.MultimodalTextbox(
33
  interactive=True,
34
  placeholder="Discuss architecture or upload code...",
35
  show_label=False
36
  )
37
 
 
38
  def bot_response(message, history):
39
+ # 1. Add User Message
40
+ user_content = message["text"]
41
+ history.append({"role": "user", "content": user_content})
42
+
43
+ # 2. Add empty Assistant Message to be filled
44
  history.append({"role": "assistant", "content": ""})
45
 
46
+ # 3. Stream the response
47
+ # history[:-1] sends everything EXCEPT the empty assistant slot we just made
48
+ for partial_resp in chat_function(message, history[:-1]):
49
  history[-1]["content"] = partial_resp
50
  yield history
51
 
52
+ # Event Handlers
53
  chat_input.submit(bot_response, [chat_input, chatbot], [chatbot]).then(
54
  lambda h: save_chat(None, h), [chatbot], None
55
  )
56
 
57
  new_btn.click(lambda: ([], ""), None, [chatbot, chat_id_state])
58
 
59
+ demo.launch(theme=gr.themes.Soft(), css="styles.css")
 
 
 
 
core_logic.py CHANGED
@@ -31,33 +31,36 @@ When a user provides files, analyze the code structure and logic before proposin
31
  """
32
 
33
  def chat_function(message, history):
34
- # message is now a dict: {"text": "...", "files": [...]}
35
  user_text = message.get("text", "")
36
  files = message.get("files", [])
37
 
38
- # Process Files
39
  context_from_files = ""
40
  for f in files:
41
- # f can be a filepath string or a dict depending on upload status
42
  path = f["path"] if isinstance(f, dict) else f
43
  context_from_files += parse_file(path)
44
 
45
- # Research Trigger
46
  if any(keyword in user_text.lower() for keyword in ["search", "docs", "latest"]):
47
  research_context = web_search(user_text)
48
  prompt = f"RESEARCH:\n{research_context}\n\nFILES:\n{context_from_files}\n\nUSER: {user_text}"
49
  else:
50
  prompt = f"FILES:\n{context_from_files}\n\nUSER: {user_text}"
51
 
52
- # Build Gradio 6.0 compliant messages
53
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
54
- # 'history' is already a list of dicts in Gradio 6
55
- messages.extend(history)
 
 
 
56
  messages.append({"role": "user", "content": prompt})
57
 
58
  response_text = ""
59
- for chunk in client.chat_completion(messages, max_tokens=2048, stream=True, temperature=0.2):
60
- token = chunk.choices[0].delta.content
61
- if token:
62
- response_text += token
63
- yield response_text
 
 
 
 
 
 
31
  """
32
 
33
  def chat_function(message, history):
 
34
  user_text = message.get("text", "")
35
  files = message.get("files", [])
36
 
 
37
  context_from_files = ""
38
  for f in files:
 
39
  path = f["path"] if isinstance(f, dict) else f
40
  context_from_files += parse_file(path)
41
 
 
42
  if any(keyword in user_text.lower() for keyword in ["search", "docs", "latest"]):
43
  research_context = web_search(user_text)
44
  prompt = f"RESEARCH:\n{research_context}\n\nFILES:\n{context_from_files}\n\nUSER: {user_text}"
45
  else:
46
  prompt = f"FILES:\n{context_from_files}\n\nUSER: {user_text}"
47
 
 
48
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
49
+
50
+ # Ensure history is in the correct format for the API
51
+ for turn in history:
52
+ messages.append({"role": turn["role"], "content": turn["content"]})
53
+
54
  messages.append({"role": "user", "content": prompt})
55
 
56
  response_text = ""
57
+ try:
58
+ for chunk in client.chat_completion(messages, max_tokens=2048, stream=True, temperature=0.2):
59
+ # FIX: Check if choices exists and is not empty
60
+ if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
61
+ token = chunk.choices[0].delta.content
62
+ if token:
63
+ response_text += token
64
+ yield response_text
65
+ except Exception as e:
66
+ yield f"Architecture Error: {str(e)}"