sergioska commited on
Commit
5f53f40
·
1 Parent(s): b49b1bf

fix load image

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -37,7 +37,8 @@ if uploaded_image_file is not None:
37
  st.title('Upload an image file to detection')
38
 
39
  uploaded_image_zero_file = st.file_uploader("Choose an image file (zero)")
40
- texts = st.text_input('apple', 'eggs')
 
41
  if uploaded_image_zero_file is not None:
42
  with NamedTemporaryFile() as temp:
43
  temp.write(uploaded_image_zero_file.getvalue())
@@ -45,15 +46,17 @@ if uploaded_image_zero_file is not None:
45
  image = Image.open(uploaded_image_zero_file)
46
  outputImage = np.array(image)
47
  st.image(outputImage)
48
- inputs = processor(text=texts, images=image, return_tensors="pt")
49
- outputs = model(**inputs)
50
- target_sizes = torch.Tensor([image.size[::-1]])
51
- results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes)
52
- i = 0 # Retrieve predictions for the first image for the corresponding text queries
53
- text = texts[i]
54
- boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
55
- st.write(results)
56
- # Print detected objects and rescaled box coordinates
57
- for box, score, label in zip(boxes, scores, labels):
58
- box = [round(i, 2) for i in box.tolist()]
59
- print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
 
 
 
37
  st.title('Upload an image file to detection')
38
 
39
  uploaded_image_zero_file = st.file_uploader("Choose an image file (zero)")
40
+ texts = st.text_input('apple, eggs')
41
+ image = ''
42
  if uploaded_image_zero_file is not None:
43
  with NamedTemporaryFile() as temp:
44
  temp.write(uploaded_image_zero_file.getvalue())
 
46
  image = Image.open(uploaded_image_zero_file)
47
  outputImage = np.array(image)
48
  st.image(outputImage)
49
+
50
+ if st.button('check tags'):
51
+ inputs = processor(text=texts, images=image, return_tensors="pt")
52
+ outputs = model(**inputs)
53
+ target_sizes = torch.Tensor([image.size[::-1]])
54
+ results = processor.post_process_object_detection(outputs=outputs, threshold=0.1, target_sizes=target_sizes)
55
+ i = 0 # Retrieve predictions for the first image for the corresponding text queries
56
+ text = texts[i]
57
+ boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
58
+ st.write(results)
59
+ # Print detected objects and rescaled box coordinates
60
+ for box, score, label in zip(boxes, scores, labels):
61
+ box = [round(i, 2) for i in box.tolist()]
62
+ print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")