arshadrana commited on
Commit
f1e814e
·
verified ·
1 Parent(s): ca2909c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -48
app.py CHANGED
@@ -10,70 +10,143 @@ client = None
10
 
11
  def initialize_client(api_key=None):
12
  global client
 
13
  api_key = os.getenv("TOGETHER_API_KEY")
14
- if api_key:
15
- client = Together()
16
- print("Client initialized successfully.")
17
- else:
18
- print("API key not found. Please set TOGETHER_API_KEY environment variable.")
19
 
20
  def encode_image(image_path):
21
- try:
22
- with Image.open(image_path) as img:
23
- buffered = io.BytesIO()
24
- img.save(buffered, format="PNG")
25
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
26
- except Exception as e:
27
- print(f"Error encoding image: {str(e)}")
28
- return None
29
 
30
- def detect_accident(image_path):
 
 
 
31
  if client is None:
32
- initialize_client()
33
- if client is None:
34
- return "Error: Client not initialized."
 
 
 
35
 
36
- prompt = "Determine if the radiator in the image shows signs of being involved in an accident or not."
37
- image_base64 = encode_image(image_path)
38
-
39
- if not image_base64:
40
- return "Error: Could not encode the image. Please try again with a different one."
41
-
42
- messages = [
43
- {"role": "system", "content": prompt},
44
- {
45
- "role": "user",
46
- "content": [
47
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
48
- ],
49
- },
50
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  try:
53
- response = client.chat.completions.create(
54
  model="meta-llama/Llama-Vision-Free",
55
  messages=messages,
56
- max_tokens=10,
57
- temperature=0.5
58
- ).choices[0].message["content"].strip()
59
-
60
- # Check if response contains "accident" or "not accident"
61
- return "accident" if "accident" in response.lower() else "not accident"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  except Exception as e:
64
- print(f"Error during model call: {str(e)}")
65
- return f"Error: Could not process the image. Please try again with a different one."
 
 
 
 
 
 
66
 
67
- # Gradio interface
68
  with gr.Blocks() as demo:
69
  gr.Markdown("# Radiator Accident Detection")
70
- gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident.")
 
 
71
 
72
- image_input = gr.Image(type="filepath")
73
- result = gr.Textbox(label="Result")
74
- submit = gr.Button("Submit")
75
 
76
- submit.click(detect_accident, inputs=image_input, outputs=result)
 
 
 
 
 
77
 
78
  if __name__ == "__main__":
79
- demo.launch(debug=True)
 
10
 
11
  def initialize_client(api_key=None):
12
  global client
13
+ # Fetch the API key from the environment if it's not provided directly
14
  api_key = os.getenv("TOGETHER_API_KEY")
15
+ print(api_key)
16
+
17
+ client = Together()
18
+
 
19
 
20
  def encode_image(image_path):
21
+ with Image.open(image_path) as img:
22
+ buffered = io.BytesIO()
23
+ img.save(buffered, format="PNG")
24
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
 
 
 
 
25
 
26
+
27
+ def bot_streaming(message, history):
28
+ max_new_tokens=250
29
+ temperature=0.7
30
  if client is None:
31
+ try:
32
+ initialize_client()
33
+ except Exception as e:
34
+ history.append((message, f"Error initializing client: {str(e)}"))
35
+ yield history
36
+ return
37
 
38
+ prompt = """
39
+ Determine if the radiator in the image shows signs of being involved in an accident or not
40
+ """
41
+
42
+
43
+ messages = [{"role": "system", "content": prompt}]
44
+
45
+ # Add history to messages
46
+ for user_msg, assistant_msg in history:
47
+ if isinstance(user_msg, str): # Text message
48
+ messages.append(
49
+ {"role": "user", "content": [{"type": "text", "text": user_msg}]}
50
+ )
51
+ elif isinstance(user_msg, dict): # Image message
52
+ image_base64 = encode_image(user_msg["image_path"])
53
+ messages.append(
54
+ {
55
+ "role": "user",
56
+ "content": [
57
+ {"type": "text", "text": user_msg.get("text", "")},
58
+ {
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": f"data:image/png;base64,{image_base64}"
62
+ },
63
+ },
64
+ ],
65
+ }
66
+ )
67
+ messages.append(
68
+ {"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]}
69
+ )
70
+
71
+ # Prepare the current message
72
+ user_message_content = []
73
+ if isinstance(message, dict):
74
+ if message.get("text"):
75
+ user_message_content.append({"type": "text", "text": message["text"]})
76
+ if message.get("files") and len(message["files"]) > 0:
77
+ image_path = message["files"][0]
78
+ image_base64 = encode_image(image_path)
79
+ user_message_content.append(
80
+ {
81
+ "type": "image_url",
82
+ "image_url": {"url": f"data:image/png;base64,{image_base64}"},
83
+ }
84
+ )
85
+ elif isinstance(message, str):
86
+ user_message_content.append({"type": "text", "text": message})
87
+
88
+ current_message = {"role": "user", "content": user_message_content}
89
+ messages.append(current_message)
90
+
91
+ # Add the user's message to the history
92
+ user_display_message = message["text"] if isinstance(message, dict) else message
93
+ history = history + [(user_display_message, "")]
94
 
95
  try:
96
+ stream = client.chat.completions.create(
97
  model="meta-llama/Llama-Vision-Free",
98
  messages=messages,
99
+ max_tokens=max_new_tokens,
100
+ temperature=temperature,
101
+ stream=True,
102
+ )
103
+
104
+ response = ""
105
+ for chunk in stream:
106
+ if (
107
+ chunk.choices
108
+ and chunk.choices[0].delta
109
+ and chunk.choices[0].delta.content is not None
110
+ ):
111
+ response += chunk.choices[0].delta.content
112
+ # Update the assistant's response in the history
113
+ history[-1] = (user_display_message, response)
114
+ yield history
115
+
116
+ if not response:
117
+ history[-1] = (
118
+ user_display_message,
119
+ "No response generated. Please try again.",
120
+ )
121
+ yield history
122
 
123
  except Exception as e:
124
+ error_message = (
125
+ "The image is too large. Please try with a smaller image or compress the existing one."
126
+ if "Request Entity Too Large" in str(e)
127
+ else f"An error occurred: {str(e)}"
128
+ )
129
+ history[-1] = (user_display_message, error_message)
130
+ yield history
131
+
132
 
133
+ # The rest of your Gradio interface code remains the same
134
  with gr.Blocks() as demo:
135
  gr.Markdown("# Radiator Accident Detection")
136
+ gr.Markdown(
137
+ "Upload an image of a radiator to determine if it shows signs of an accident"
138
+ )
139
 
140
+ chatbot = gr.Chatbot()
141
+ msg = gr.MultimodalTextbox(label="")
142
+ clear = gr.Button("Clear")
143
 
144
+ msg.submit(
145
+ bot_streaming,
146
+ [msg, chatbot],
147
+ chatbot,
148
+ )
149
+ clear.click(lambda: None, None, chatbot, queue=False)
150
 
151
  if __name__ == "__main__":
152
+ demo.launch(debug=True)