Mahrukhh commited on
Commit
a7e0859
Β·
verified Β·
1 Parent(s): 4052fe3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -53
app.py CHANGED
@@ -9,7 +9,7 @@ from io import BytesIO
9
  GROQ_API_KEY = os.getenv("GROQ_API_KEY") or "PASTE_YOUR_GROQ_API_KEY_HERE"
10
 
11
  # βœ… Groq LLaMA 3 model endpoint (text-only)
12
- GROQ_MODEL = "llama-3.3-70b-versatile"
13
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
14
 
15
  # Store history per session
@@ -18,28 +18,33 @@ history = []
18
  def analyze_image_with_prompt(image, user_prompt):
19
  global history
20
 
 
 
 
 
 
 
21
  # Convert image to base64 (for reference only, model won't see it)
22
  buffered = BytesIO()
23
  image.save(buffered, format="JPEG")
24
  img_base64 = base64.b64encode(buffered.getvalue()).decode()
25
 
26
- # Add system message on first call
27
  if len(history) == 0:
28
  history.append({
29
  "role": "system",
30
  "content": (
31
  "You are a helpful construction engineer bot.\n"
32
- "Users may upload an image, but since you can't see it,\n"
33
- "ask them to describe the damage. Once described, identify:\n"
34
  "- Type of damage\n"
35
  "- Possible causes\n"
36
  "- Recommended tools/materials\n"
37
- "- Estimated repair time.\n"
38
- "Then answer follow-up questions."
39
  )
40
  })
41
 
42
- # Append user input (with note that image was uploaded)
43
  history.append({
44
  "role": "user",
45
  "content": f"(Image uploaded by user)\nPrompt: {user_prompt}"
@@ -53,62 +58,66 @@ def analyze_image_with_prompt(image, user_prompt):
53
  payload = {
54
  "model": GROQ_MODEL,
55
  "messages": history,
56
- "temperature": 0.7
57
  }
58
 
59
- response = requests.post(GROQ_URL, headers=headers, json=payload)
60
-
61
- if response.status_code == 200:
62
  reply = response.json()["choices"][0]["message"]["content"]
63
  history.append({"role": "assistant", "content": reply})
64
  return reply
65
- else:
66
- return f"\u274c Error: {response.status_code}\n{response.text}"
67
 
68
  def reset_session():
69
  global history
70
  history = []
71
- return "Session reset. Upload a new image and ask a question."
72
 
73
  # 🌐 Gradio Interface
74
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
- gr.Markdown("""
76
- # πŸ—οΈ Construction Damage Assistant
77
- Upload an image and describe the damage. The AI will help you identify the issue and recommend repairs.
78
- """)
79
-
80
- with gr.Row():
81
- with gr.Column(scale=1):
82
- img_input = gr.Image(
83
- type="pil",
84
- label="Upload Damage Image",
85
- height=300
86
- )
87
-
88
- prompt_input = gr.Textbox(
89
- lines=3,
90
- label="Describe the Problem or Ask a Question",
91
- placeholder="e.g. What type of crack is this? What caused it?"
92
- )
93
-
94
- with gr.Row():
95
- submit_btn = gr.Button("πŸ” Analyze", variant="primary")
96
- reset_btn = gr.Button("πŸ” New Session")
97
-
98
- with gr.Column(scale=2):
99
- output = gr.Textbox(
100
- label="🧠 AI Assistant Response",
101
- lines=18,
102
- show_copy_button=True
103
- )
104
-
105
- submit_btn.click(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=[output])
106
- reset_btn.click(reset_session, outputs=[output])
107
-
108
- gr.Markdown("""
109
- <hr>
110
- ⚠️ **Note**: The AI cannot directly analyze images. Please describe what you see.
111
- """)
 
 
112
 
113
  # πŸš€ Launch the app
114
- demo.launch()
 
 
 
9
  GROQ_API_KEY = os.getenv("GROQ_API_KEY") or "PASTE_YOUR_GROQ_API_KEY_HERE"
10
 
11
  # βœ… Groq LLaMA 3 model endpoint (text-only)
12
+ GROQ_MODEL = "llama-3-70b-8192"
13
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
14
 
15
  # Store history per session
 
18
  def analyze_image_with_prompt(image, user_prompt):
19
  global history
20
 
21
+ if image is None:
22
+ return "⚠️ Please upload an image."
23
+
24
+ if not user_prompt.strip():
25
+ return "⚠️ Please describe the damage or ask a question."
26
+
27
  # Convert image to base64 (for reference only, model won't see it)
28
  buffered = BytesIO()
29
  image.save(buffered, format="JPEG")
30
  img_base64 = base64.b64encode(buffered.getvalue()).decode()
31
 
32
+ # Add system message once
33
  if len(history) == 0:
34
  history.append({
35
  "role": "system",
36
  "content": (
37
  "You are a helpful construction engineer bot.\n"
38
+ "The user has uploaded an image (you can't see it).\n"
39
+ "Ask them to describe the visible damage and respond with:\n"
40
  "- Type of damage\n"
41
  "- Possible causes\n"
42
  "- Recommended tools/materials\n"
43
+ "- Estimated repair time."
 
44
  )
45
  })
46
 
47
+ # Add user message
48
  history.append({
49
  "role": "user",
50
  "content": f"(Image uploaded by user)\nPrompt: {user_prompt}"
 
58
  payload = {
59
  "model": GROQ_MODEL,
60
  "messages": history,
61
+ "temperature": 0.5
62
  }
63
 
64
+ try:
65
+ response = requests.post(GROQ_URL, headers=headers, json=payload)
66
+ response.raise_for_status()
67
  reply = response.json()["choices"][0]["message"]["content"]
68
  history.append({"role": "assistant", "content": reply})
69
  return reply
70
+ except Exception as e:
71
+ return f"❌ Error: {str(e)}"
72
 
73
  def reset_session():
74
  global history
75
  history = []
76
+ return "πŸ”„ Session reset. Please upload a new image and describe the damage."
77
 
78
  # 🌐 Gradio Interface
79
+ def build_ui():
80
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
+ gr.Markdown("""
82
+ # πŸ—οΈ Construction Damage Assistant
83
+ Upload an image of damage and briefly describe it. AI will help you analyze and recommend fixes.
84
+ """)
85
+
86
+ with gr.Row():
87
+ with gr.Column(scale=1):
88
+ img_input = gr.Image(
89
+ type="pil",
90
+ label="πŸ“· Upload Damage Image",
91
+ height=300
92
+ )
93
+
94
+ prompt_input = gr.Textbox(
95
+ lines=3,
96
+ label="✏️ Describe the Damage or Ask a Question",
97
+ placeholder="e.g. What type of crack is this? What caused it?"
98
+ )
99
+
100
+ output = gr.Textbox(
101
+ label="🧠 AI Assistant Response",
102
+ lines=18,
103
+ show_copy_button=True
104
+ )
105
+
106
+ img_input.change(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=output)
107
+ prompt_input.submit(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=output)
108
+
109
+ with gr.Row():
110
+ reset_btn = gr.Button("πŸ” Reset Session")
111
+ reset_btn.click(reset_session, outputs=output)
112
+
113
+ gr.Markdown("""
114
+ <hr>
115
+ ⚠️ <small>The AI cannot see the image β€” your description is essential for accurate help.</small>
116
+ """)
117
+
118
+ return demo
119
 
120
  # πŸš€ Launch the app
121
+ if __name__ == "__main__":
122
+ ui = build_ui()
123
+ ui.launch()