File size: 4,053 Bytes
2c6b75e
 
 
57e2463
 
 
 
f18b773
 
57e2463
2c6b75e
f18b773
 
9d50c24
23c3cd4
f18b773
2c6b75e
 
b19ba23
 
0857b98
2c6b75e
 
 
 
 
 
 
 
 
 
 
0857b98
2c6b75e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23c3cd4
2c6b75e
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
from PIL import Image, ImageDraw
from io import BytesIO
import boto3

# Inicieu un laboratori a AWS i copieu les credencials de la sessió (AWS CLI)

ACCESS_KEY='ASIA47CRWWK5XKZMPTTZ'
SECRET_KEY='Xn/8P+HXMq8aQF2ZjKQw78vPOQ4eQFTA7x+dppOE'
SESSION_TOKEN='IQoJb3JpZ2luX2VjENb//////////wEaCXVzLXdlc3QtMiJIMEYCIQDYWsAhaEVjsOLcA0MIzQ2qRnLojp4GF2PpAJvK3NSCNgIhALfrOzfHBkfhpe2j/8N3r4ojUY8EVTSdmx4wMLrJj2LbKqMCCG8QABoMODkxMzc3MDcwNzc5IgyEYmwqgac9JNnYvyQqgALV5+iV04/MXnA9qCZCT87zVoUtq9xyYoAUh2T7PiitBU5dN+nEYojC1C8r4TD0Lbfe/9S20V8srAMwet6XEsybHIHu92FjVFHlzTlPYM3w0PiJg1km/X/4BEUo1J5htZSDThEL00euy4zOnXH93Xyia+tezh79kXYlVYkD7AC3WLxvQoPvfgOjiQXrSp6qYpdSvMCDZPSG2Sw48OpYH8BVyBaGhkan2pVmBKFWl2Jwh3HdTuNaoQff5XaW+syNXglWgeI7I1Mty0IbbiF3qCVAFiqtX2dHu6ol7TcMAUWcI1wubkDsPUVMbPOSWjiDOH/Gxr/gnPem4mveAWGqrgRKMOmwvMAGOpwB7K2oqI+KxwP01iHMT4GsI6b+L/a/HarsJr7kML43zlxQz53gFxmlD55HPDvlDBiKm7lEm8PGSOXzOkn28mW5Ro0Yd4W+z5vHpddWMDUnE/2V9Q9vvoKfXHOGUAQVjhqCZPINn8g39TUAyK35v1HgBVxQy/anthk7KSSJR9cSbluOBp47gPDSNKEyepRXJps4TZDn2tVUfPUrTMBd'

s3_client = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN)
rekognition = boto3.client('rekognition', region_name='us-east-1', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN)
s3BucketName = 'eac5'
rutes = ["biblioteca.jpg", "skyline_tarragona.jpg"]

def procesar(input_image):

    #s3_client = boto3.client('s3')
    #rekognition = boto3.client('rekognition')
    s3_client.upload_file(input_image, s3BucketName, input_image)
    #Font exercici de detecció de cares
    image_file = s3_client.get_object(
    Bucket= "eac5",
    Key= input_image)
    image_bytes = image_file['Body'].read()

    image = Image.open(BytesIO(image_bytes))

    response = rekognition.detect_labels(
    Image={
        'S3Object': {
            'Bucket': s3BucketName,
            'Name': input_image
        }
    })
    
    resultats = []  
    # Crear un objecte ImageDraw
    draw = ImageDraw.Draw(image)

    for instance in response["Labels"]:
        #Busquem les noms i la confiança i les imprimim
        nom = instance["Name"]
        grau = instance["Confidence"]
        resultats.append('%s %d%%' % (nom, grau))
    
        #Busquem les capses i les dibuixem
        for instance_label in instance["Instances"]:
            bounding_box = instance_label["BoundingBox"]
                
            # Calcular les coordenades de píxels per al quadre delimitador
            left = int(bounding_box["Left"] * image.width)
            top = int(bounding_box["Top"] * image.height)
            width = int(bounding_box["Width"] * image.width)
            height = int(bounding_box["Height"] * image.height)
                
            # Dibuixar el quadre delimitador a la imatge
            draw.rectangle([left, top, left + width, top + height], outline="red", width=2)
    return "\n".join(resultats), image

example1 = s3_client.get_object(
    Bucket= s3BucketName,
    Key= "biblioteca.jpg")
example1_bytes = example1['Body'].read()
image1 = Image.open(BytesIO(example1_bytes))

example2 = s3_client.get_object(
    Bucket= s3BucketName,
    Key= "skyline_tarragona.jpg")
example2_bytes = example2['Body'].read()
image2 = Image.open(BytesIO(example2_bytes))


text_output = gr.Textbox(lines=10, label="Results")

with gr.Blocks() as demo:
    with gr.Row():
        gr.Markdown(
        """
        Label Detection
        """)
    with gr.Row():
        with gr.Column():
            image_input = gr.Image(type='filepath')
            with gr.Row():
                examples = gr.Examples(rutes, inputs=[image_input], fn=procesar, outputs=[text_output, image_input], cache_examples=True, label="Choose a sample image")
                upload_button = gr.UploadButton("Use your own image", file_types=["image"], file_count="single")            
        with gr.Column():
            text_output.render()
        upload_button.upload(procesar, upload_button, [text_output, image_input])

demo.launch()