arshadrana commited on
Commit
85309cd
·
verified ·
1 Parent(s): 846c8a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -54
app.py CHANGED
@@ -11,80 +11,85 @@ client = None
11
  def initialize_client(api_key=None):
12
  global client
13
  api_key = os.getenv("TOGETHER_API_KEY")
14
- if api_key:
15
- client = Together()
16
- print("Client initialized successfully.")
17
- else:
18
- print("API key not found. Please set TOGETHER_API_KEY environment variable.")
19
 
20
  def encode_image(image_path):
21
- try:
22
- with Image.open(image_path) as img:
23
- buffered = io.BytesIO()
24
- img.save(buffered, format="PNG")
25
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
26
- except Exception as e:
27
- print(f"Error encoding image: {str(e)}")
28
- return None
29
 
30
- def detect_accident(image_path):
 
 
31
  if client is None:
32
- initialize_client()
33
- if client is None:
34
- return "Error: Client not initialized."
 
 
 
35
 
36
- prompt = "Analyze the attached radiator image and respond with 'accident' if it shows signs of an accident, or 'not accident' if it does not."
37
- image_base64 = encode_image(image_path)
38
-
39
- if not image_base64:
40
- return "Error: Could not encode the image. Please try again with a different one."
41
 
42
- messages = [
43
- {"role": "system", "content": prompt},
44
- {
45
- "role": "user",
46
- "content": [
47
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
48
- ],
49
- },
50
- ]
 
 
 
 
 
51
 
52
  try:
53
- response = client.chat.completions.create(
54
  model="meta-llama/Llama-Vision-Free",
55
  messages=messages,
56
- max_tokens=10,
57
- temperature=0.0,
 
58
  )
59
 
60
- # Print the raw response for debugging
61
- print("Raw response from model:", response)
 
 
 
 
62
 
63
- # Extract and clean the response
64
- response_text = response.choices[0].message.content.strip().lower()
65
-
66
- # Return the expected results
67
- if "accident" in response_text:
68
- return "accident"
69
- elif "not accident" in response_text:
70
- return "not accident"
71
- else:
72
- return f"Error: Unexpected response format. Received: {response_text}"
73
 
74
  except Exception as e:
75
- print(f"Error during model call: {str(e)}")
76
- return f"Error: Could not process the image. Details: {str(e)}"
 
 
 
 
 
77
 
78
- # Gradio interface
79
  with gr.Blocks() as demo:
80
  gr.Markdown("# Radiator Accident Detection")
81
- gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident.")
82
 
83
- image_input = gr.Image(type="filepath", label="Upload Radiator Image")
84
- result = gr.Textbox(label="Result")
85
- submit = gr.Button("Submit")
86
 
87
- submit.click(detect_accident, inputs=image_input, outputs=result)
 
88
 
89
  if __name__ == "__main__":
90
  demo.launch(debug=True)
 
11
  def initialize_client(api_key=None):
12
  global client
13
  api_key = os.getenv("TOGETHER_API_KEY")
14
+ print(api_key)
15
+ client = Together()
 
 
 
16
 
17
  def encode_image(image_path):
18
+ with Image.open(image_path) as img:
19
+ buffered = io.BytesIO()
20
+ img.save(buffered, format="PNG")
21
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
 
 
 
 
22
 
23
+ def bot_streaming(image_path, history):
24
+ max_new_tokens = 250
25
+ temperature = 0.7
26
  if client is None:
27
+ try:
28
+ initialize_client()
29
+ except Exception as e:
30
+ history.append(("Error initializing client", f"{str(e)}"))
31
+ yield history
32
+ return
33
 
34
+ prompt = """
35
+ Determine if the radiator in the image shows signs of being involved in an accident or not
36
+ """
 
 
37
 
38
+ messages = [{"role": "system", "content": prompt}]
39
+
40
+ # Encode the image and add to messages
41
+ image_base64 = encode_image(image_path)
42
+ messages.append({
43
+ "role": "user",
44
+ "content": [
45
+ {
46
+ "type": "image_url",
47
+ "image_url": {"url": f"data:image/png;base64,{image_base64}"}
48
+ }
49
+ ]
50
+ })
51
+ history = history + [("Image uploaded", "")]
52
 
53
  try:
54
+ stream = client.chat.completions.create(
55
  model="meta-llama/Llama-Vision-Free",
56
  messages=messages,
57
+ max_tokens=max_new_tokens,
58
+ temperature=temperature,
59
+ stream=True,
60
  )
61
 
62
+ response = ""
63
+ for chunk in stream:
64
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
65
+ response += chunk.choices[0].delta.content
66
+ history[-1] = ("Image uploaded", response)
67
+ yield history
68
 
69
+ if not response:
70
+ history[-1] = ("Image uploaded", "No response generated. Please try again.")
71
+ yield history
 
 
 
 
 
 
 
72
 
73
  except Exception as e:
74
+ error_message = (
75
+ "The image is too large. Please try with a smaller image or compress the existing one."
76
+ if "Request Entity Too Large" in str(e)
77
+ else f"An error occurred: {str(e)}"
78
+ )
79
+ history[-1] = ("Image uploaded", error_message)
80
+ yield history
81
 
82
+ # Set up Gradio interface
83
  with gr.Blocks() as demo:
84
  gr.Markdown("# Radiator Accident Detection")
85
+ gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident")
86
 
87
+ chatbot = gr.Chatbot()
88
+ img = gr.Image(type="filepath", label="Upload Radiator Image")
89
+ clear = gr.Button("Clear")
90
 
91
+ img.upload(bot_streaming, inputs=[img, chatbot], outputs=chatbot)
92
+ clear.click(lambda: None, None, chatbot, queue=False)
93
 
94
  if __name__ == "__main__":
95
  demo.launch(debug=True)