arshadrana commited on
Commit
aec026b
·
verified ·
1 Parent(s): 95f5bc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -64
app.py CHANGED
@@ -10,89 +10,77 @@ client = None
10
 
11
  def initialize_client(api_key=None):
12
  global client
13
- # Fetch the API key from the environment if it's not provided directly
14
  api_key = os.getenv("TOGETHER_API_KEY")
15
- print(api_key)
16
-
17
- client = Together()
 
 
18
 
19
  def encode_image(image_path):
20
- with Image.open(image_path) as img:
21
- buffered = io.BytesIO()
22
- img.save(buffered, format="PNG")
23
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
 
 
 
 
24
 
25
- def bot_streaming(image_path, history):
26
- max_new_tokens = 350 # Increase for potentially longer responses
27
- temperature = 0.7
28
  if client is None:
29
- try:
30
- initialize_client()
31
- except Exception as e:
32
- history.append(("Error initializing client", f"{str(e)}"))
33
- yield history
34
- return
35
 
36
- prompt = """
37
- Determine if the radiator in the image shows signs of being involved in an accident or not
38
- """
39
-
40
- messages = [{"role": "system", "content": prompt}]
41
-
42
- # Encode the image and add to messages
43
  image_base64 = encode_image(image_path)
44
- messages.append({
45
- "role": "user",
46
- "content": [
47
- {
48
- "type": "image_url",
49
- "image_url": {"url": f"data:image/png;base64,{image_base64}"}
50
- }
51
- ]
52
- })
53
- history = history + [("Image uploaded", "")]
 
 
 
54
 
55
  try:
56
- stream = client.chat.completions.create(
57
  model="meta-llama/Llama-Vision-Free",
58
  messages=messages,
59
- max_tokens=max_new_tokens,
60
- temperature=temperature,
61
- stream=True,
62
  )
63
 
64
- response = ""
65
- for chunk in stream:
66
- if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content is not None:
67
- response += chunk.choices[0].delta.content
68
- history[-1] = ("Image uploaded", response)
69
- yield history
70
-
71
- # Handle incomplete responses
72
- if not response.strip():
73
- history[-1] = ("Image uploaded", "No response generated. Please try again.")
74
- yield history
75
 
76
  except Exception as e:
77
- error_message = (
78
- "The image is too large. Please try with a smaller image or compress the existing one."
79
- if "Request Entity Too Large" in str(e)
80
- else f"An error occurred: {str(e)}"
81
- )
82
- history[-1] = ("Image uploaded", error_message)
83
- yield history
84
 
85
- # Set up Gradio interface
86
  with gr.Blocks() as demo:
87
  gr.Markdown("# Radiator Accident Detection")
88
- gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident")
89
 
90
- chatbot = gr.Chatbot()
91
- img = gr.Image(type="filepath", label="Upload Radiator Image")
92
- clear = gr.Button("Clear")
93
 
94
- img.upload(bot_streaming, inputs=[img, chatbot], outputs=chatbot)
95
- clear.click(lambda: None, None, chatbot, queue=False)
96
 
97
  if __name__ == "__main__":
98
- demo.launch(share=True, debug=True) # Set share=True to create a public link
 
10
 
11
  def initialize_client(api_key=None):
12
  global client
 
13
  api_key = os.getenv("TOGETHER_API_KEY")
14
+ if api_key:
15
+ client = Together()
16
+ print("Client initialized successfully.")
17
+ else:
18
+ print("API key not found. Please set TOGETHER_API_KEY environment variable.")
19
 
20
  def encode_image(image_path):
21
+ try:
22
+ with Image.open(image_path) as img:
23
+ buffered = io.BytesIO()
24
+ img.save(buffered, format="PNG")
25
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
26
+ except Exception as e:
27
+ print(f"Error encoding image: {str(e)}")
28
+ return None
29
 
30
+ def detect_accident(image_path):
 
 
31
  if client is None:
32
+ initialize_client()
33
+ if client is None:
34
+ return "Error: Client not initialized."
 
 
 
35
 
36
+ # This is a simple prompt to guide the model
37
+ prompt = "Given the image of a radiator, respond with 'accident' if it shows signs of an accident or 'not accident' if it does not."
 
 
 
 
 
38
  image_base64 = encode_image(image_path)
39
+
40
+ if not image_base64:
41
+ return "Error: Could not encode the image. Please try again with a different one."
42
+
43
+ messages = [
44
+ {"role": "system", "content": prompt},
45
+ {
46
+ "role": "user",
47
+ "content": [
48
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}},
49
+ ],
50
+ },
51
+ ]
52
 
53
  try:
54
+ response = client.chat.completions.create(
55
  model="meta-llama/Llama-Vision-Free",
56
  messages=messages,
57
+ max_tokens=10, # Keep this low to limit the response
58
+ temperature=0.0, # Reduce randomness
 
59
  )
60
 
61
+ # Extract the response and clean it
62
+ response_text = response.choices[0].message.content.strip().lower()
63
+
64
+ # Validate the response
65
+ if response_text == "accident" or response_text == "not accident":
66
+ return response_text
67
+ else:
68
+ return "Error: Unexpected response format."
 
 
 
69
 
70
  except Exception as e:
71
+ print(f"Error during model call: {str(e)}")
72
+ return f"Error: Could not process the image. Details: {str(e)}"
 
 
 
 
 
73
 
74
+ # Gradio interface
75
  with gr.Blocks() as demo:
76
  gr.Markdown("# Radiator Accident Detection")
77
+ gr.Markdown("Upload an image of a radiator to determine if it shows signs of an accident.")
78
 
79
+ image_input = gr.Image(type="filepath")
80
+ result = gr.Textbox(label="Result")
81
+ submit = gr.Button("Submit")
82
 
83
+ submit.click(detect_accident, inputs=image_input, outputs=result)
 
84
 
85
  if __name__ == "__main__":
86
+ demo.launch(debug=True)