Spaces:
Sleeping
Sleeping
Daniel Cerda Escobar
commited on
Commit
·
11d0e57
1
Parent(s):
837f8a9
Utils file
Browse files
utils.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
+
import sahi.predict
|
| 3 |
+
import sahi.utils
|
| 4 |
+
import PyPDF4
|
| 5 |
+
from pdf2image import convert_from_path
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
TEMP_DIR = "temp"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def sahi_yolov8m_inference(
|
| 12 |
+
image,
|
| 13 |
+
detection_model,
|
| 14 |
+
slice_height=512,
|
| 15 |
+
slice_width=512,
|
| 16 |
+
overlap_height_ratio=0.2,
|
| 17 |
+
overlap_width_ratio=0.2,
|
| 18 |
+
image_size=640,
|
| 19 |
+
#postprocess_type="GREEDYNMM",
|
| 20 |
+
#postprocess_match_metric="IOS",
|
| 21 |
+
postprocess_match_threshold=0.5,
|
| 22 |
+
#postprocess_class_agnostic=False,
|
| 23 |
+
):
|
| 24 |
+
|
| 25 |
+
# standard inference
|
| 26 |
+
detection_model.image_size = image_size
|
| 27 |
+
prediction_result_1 = sahi.predict.get_prediction(
|
| 28 |
+
image=image, detection_model=detection_model
|
| 29 |
+
)
|
| 30 |
+
visual_result_1 = sahi.utils.cv.visualize_object_predictions(
|
| 31 |
+
image=numpy.array(image),
|
| 32 |
+
object_prediction_list=prediction_result_1.object_prediction_list,
|
| 33 |
+
)
|
| 34 |
+
output_1 = Image.fromarray(visual_result_1["image"])
|
| 35 |
+
|
| 36 |
+
# sliced inference
|
| 37 |
+
prediction_result_2 = sahi.predict.get_sliced_prediction(
|
| 38 |
+
image=image,
|
| 39 |
+
detection_model=detection_model,
|
| 40 |
+
slice_height=slice_height,
|
| 41 |
+
slice_width=slice_width,
|
| 42 |
+
overlap_height_ratio=overlap_height_ratio,
|
| 43 |
+
overlap_width_ratio=overlap_width_ratio,
|
| 44 |
+
#postprocess_type=postprocess_type,
|
| 45 |
+
#postprocess_match_metric=postprocess_match_metric,
|
| 46 |
+
postprocess_match_threshold=postprocess_match_threshold,
|
| 47 |
+
#postprocess_class_agnostic=postprocess_class_agnostic,
|
| 48 |
+
)
|
| 49 |
+
visual_result_2 = sahi.utils.cv.visualize_object_predictions(
|
| 50 |
+
image=numpy.array(image),
|
| 51 |
+
object_prediction_list=prediction_result_2.object_prediction_list,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
output_2 = Image.fromarray(visual_result_2["image"])
|
| 55 |
+
|
| 56 |
+
return output_1, output_2
|
| 57 |
+
|
| 58 |
+
def convert_pdf_file(
|
| 59 |
+
path,
|
| 60 |
+
#filename=name,
|
| 61 |
+
dpi=300,
|
| 62 |
+
image_width=4961,
|
| 63 |
+
image_heigth=3508,
|
| 64 |
+
grayscale=True,
|
| 65 |
+
):
|
| 66 |
+
with open(path, 'rb') as pdf_file:
|
| 67 |
+
pdf_reader = PyPDF4.PdfFileReader(pdf_file, strict=False)
|
| 68 |
+
first_page = pdf_reader.getPage(0)
|
| 69 |
+
page_size = (first_page.mediaBox.getWidth(), first_page.mediaBox.getHeight())
|
| 70 |
+
if page_size[0] > page_size[1]:
|
| 71 |
+
image = convert_from_path(path, dpi=dpi, size=(image_width,image_heigth), grayscale=grayscale)
|
| 72 |
+
else:
|
| 73 |
+
image = convert_from_path(path, dpi=dpi, size=(image_heigth,image_width), grayscale=grayscale)
|
| 74 |
+
return image
|
| 75 |
+
#image[0].save(f'{path}/{filename}.png', 'PNG')
|