Mahrukhh commited on
Commit
487cdc9
Β·
verified Β·
1 Parent(s): 694b9d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -71
app.py CHANGED
@@ -5,49 +5,44 @@ import requests
5
  from PIL import Image
6
  from io import BytesIO
7
 
8
- # πŸ” Set your Groq API key here or use environment variable
9
  GROQ_API_KEY = os.getenv("GROQ_API_KEY") or "PASTE_YOUR_GROQ_API_KEY_HERE"
10
-
11
- # βœ… Groq LLaMA 3 model endpoint (text-only)
12
- GROQ_MODEL = "llama-3-70b-8192"
13
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
14
 
15
- # Store history per session
16
  history = []
17
 
18
  def analyze_image_with_prompt(image, user_prompt):
19
  global history
20
 
21
- if image is None:
22
- return "⚠️ Please upload an image."
23
-
24
- if not user_prompt.strip():
25
- return "⚠️ Please describe the damage or ask a question."
26
 
27
- # Convert image to base64 (for reference only, model won't see it)
28
  buffered = BytesIO()
29
  image.save(buffered, format="JPEG")
30
  img_base64 = base64.b64encode(buffered.getvalue()).decode()
31
 
32
- # Add system message once
33
- if len(history) == 0:
34
  history.append({
35
  "role": "system",
36
  "content": (
37
- "You are a helpful construction engineer bot.\n"
38
- "The user has uploaded an image (you can't see it).\n"
39
- "Ask them to describe the visible damage and respond with:\n"
40
- "- Type of damage\n"
41
- "- Possible causes\n"
42
  "- Recommended tools/materials\n"
43
- "- Estimated repair time."
 
44
  )
45
  })
46
 
47
- # Add user message
48
  history.append({
49
  "role": "user",
50
- "content": f"(Image uploaded by user)\nPrompt: {user_prompt}"
51
  })
52
 
53
  headers = {
@@ -58,66 +53,67 @@ def analyze_image_with_prompt(image, user_prompt):
58
  payload = {
59
  "model": GROQ_MODEL,
60
  "messages": history,
61
- "temperature": 0.5
62
  }
63
 
64
- try:
65
- response = requests.post(GROQ_URL, headers=headers, json=payload)
66
- response.raise_for_status()
67
  reply = response.json()["choices"][0]["message"]["content"]
68
  history.append({"role": "assistant", "content": reply})
69
  return reply
70
- except Exception as e:
71
- return f"❌ Error: {str(e)}"
72
 
73
  def reset_session():
74
  global history
75
  history = []
76
- return "πŸ”„ Session reset. Please upload a new image and describe the damage."
77
 
78
  # 🌐 Gradio Interface
79
- def build_ui():
80
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
81
- gr.Markdown("""
82
- # πŸ—οΈ Construction Damage Assistant
83
- Upload an image of damage and briefly describe it. AI will help you analyze and recommend fixes.
84
- """)
85
-
86
- with gr.Row():
87
- with gr.Column(scale=1):
88
- img_input = gr.Image(
89
- type="pil",
90
- label="πŸ“· Upload Damage Image",
91
- height=300
92
- )
93
-
94
- prompt_input = gr.Textbox(
95
- lines=3,
96
- label="✏️ Describe the Damage or Ask a Question",
97
- placeholder="e.g. What type of crack is this? What caused it?"
98
- )
99
-
100
- output = gr.Textbox(
101
- label="🧠 AI Assistant Response",
102
- lines=18,
103
- show_copy_button=True
104
- )
105
-
106
- img_input.change(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=output)
107
- prompt_input.submit(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=output)
108
-
109
- with gr.Row():
110
- reset_btn = gr.Button("πŸ” Reset Session")
111
- reset_btn.click(reset_session, outputs=output)
112
-
113
- gr.Markdown("""
114
- <hr>
115
- ⚠️ <small>The AI cannot see the image β€” your description is essential for accurate help.</small>
116
- """)
117
-
118
- return demo
 
 
 
119
 
120
  # πŸš€ Launch the app
121
- if __name__ == "__main__":
122
- ui = build_ui()
123
- ui.launch()
 
5
  from PIL import Image
6
  from io import BytesIO
7
 
8
+ # πŸ” Set your Groq API key
9
  GROQ_API_KEY = os.getenv("GROQ_API_KEY") or "PASTE_YOUR_GROQ_API_KEY_HERE"
10
+ GROQ_MODEL = "llama3-8b-8192"
 
 
11
  GROQ_URL = "https://api.groq.com/openai/v1/chat/completions"
12
 
13
+ # 🧠 Conversation history per session
14
  history = []
15
 
16
  def analyze_image_with_prompt(image, user_prompt):
17
  global history
18
 
19
+ if image is None or not user_prompt.strip():
20
+ return "⚠️ Please upload an image and describe the problem."
 
 
 
21
 
22
+ # Base64 (not used by model, just for completeness)
23
  buffered = BytesIO()
24
  image.save(buffered, format="JPEG")
25
  img_base64 = base64.b64encode(buffered.getvalue()).decode()
26
 
27
+ # Add system message on first use
28
+ if not history:
29
  history.append({
30
  "role": "system",
31
  "content": (
32
+ "You are a helpful construction assistant bot.\n"
33
+ "You cannot see images but users will describe what is shown.\n"
34
+ "Based on their description, provide:\n"
35
+ "- Type of construction damage\n"
36
+ "- Likely cause\n"
37
  "- Recommended tools/materials\n"
38
+ "- Estimated repair time\n"
39
+ "Be concise and professional."
40
  )
41
  })
42
 
 
43
  history.append({
44
  "role": "user",
45
+ "content": f"(Image uploaded)\nProblem description: {user_prompt}"
46
  })
47
 
48
  headers = {
 
53
  payload = {
54
  "model": GROQ_MODEL,
55
  "messages": history,
56
+ "temperature": 0.7
57
  }
58
 
59
+ response = requests.post(GROQ_URL, headers=headers, json=payload)
60
+
61
+ if response.status_code == 200:
62
  reply = response.json()["choices"][0]["message"]["content"]
63
  history.append({"role": "assistant", "content": reply})
64
  return reply
65
+ else:
66
+ return f"❌ Error: {response.status_code}\n{response.text}"
67
 
68
  def reset_session():
69
  global history
70
  history = []
71
+ return "Session reset. Upload a new image and ask a question."
72
 
73
  # 🌐 Gradio Interface
74
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
75
+ gr.Markdown("""
76
+ # πŸ—οΈ Construction Damage Assistant
77
+
78
+ This assistant helps you understand construction damage.
79
+
80
+ **Step 1:** πŸ“€ Upload a picture of the damaged area.
81
+ **Step 2:** πŸ“ Describe the problem or ask a question about the image.
82
+ **Step 3:** πŸ’‘ Get a smart, helpful response.
83
+
84
+ ---
85
+ """)
86
+
87
+ with gr.Row():
88
+ with gr.Column(scale=1):
89
+ img_input = gr.Image(
90
+ type="pil",
91
+ label="Step 1: Upload a Picture",
92
+ height=300
93
+ )
94
+
95
+ prompt_input = gr.Textbox(
96
+ lines=3,
97
+ label="Step 2: Ask a Question / Describe the Damage",
98
+ placeholder="e.g. What type of crack is this? What might have caused it?"
99
+ )
100
+
101
+ with gr.Row():
102
+ submit_btn = gr.Button("πŸ” Get Answer", variant="primary")
103
+ reset_btn = gr.Button("πŸ” New Session")
104
+
105
+ with gr.Column(scale=1):
106
+ output = gr.Textbox(
107
+ label="Step 3: Assistant's Answer",
108
+ lines=18,
109
+ show_copy_button=True
110
+ )
111
+
112
+ submit_btn.click(analyze_image_with_prompt, inputs=[img_input, prompt_input], outputs=[output])
113
+ reset_btn.click(reset_session, outputs=[output])
114
+
115
+ gr.Markdown("---")
116
+ gr.Markdown("⚠️ **Note:** The AI does not process the image directly. Please describe what you see.")
117
 
118
  # πŸš€ Launch the app
119
+ demo.launch()