Krishnan Kumar commited on
Commit
d024405
·
1 Parent(s): 158940f

Add app file

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # [START aiplatform_predict_custom_trained_model_sample]
16
+ from typing import Dict, List, Union
17
+
18
+ from google.cloud import aiplatform
19
+ from google.protobuf import json_format
20
+ from google.protobuf.struct_pb2 import Value
21
+ import os
22
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS']='./key-int.json'
23
+ def predict_custom_trained_model_sample(
24
+ project: str,
25
+ endpoint_id: str,
26
+ instances: Union[Dict, List[Dict]],
27
+ location: str = "us-central1",
28
+ api_endpoint: str = "us-central1-aiplatform.googleapis.com",
29
+ ):
30
+ """
31
+ `instances` can be either single instance of type dict or a list
32
+ of instances.
33
+ """
34
+ # The AI Platform services require regional API endpoints.
35
+ client_options = {"api_endpoint": api_endpoint}
36
+ # Initialize client that will be used to create and send requests.
37
+ # This client only needs to be created once, and can be reused for multiple requests.
38
+ client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
39
+ # The format of each instance should conform to the deployed model's prediction input schema.
40
+ instances = instances if type(instances) == list else [instances]
41
+ instances = [
42
+ json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
43
+ ]
44
+ parameters_dict = {}
45
+ parameters = json_format.ParseDict(parameters_dict, Value())
46
+ endpoint = client.endpoint_path(
47
+ project=project, location=location, endpoint=endpoint_id
48
+ )
49
+ response = client.predict(
50
+ endpoint=endpoint, instances=instances, parameters=parameters
51
+ )
52
+ print("response")
53
+ print(" deployed_model_id:", response.deployed_model_id)
54
+ # The predictions are a google.protobuf.Value representation of the model's predictions.
55
+ predictions = response.predictions
56
+ for prediction in predictions:
57
+ print(" prediction:", dict(prediction))
58
+ return predictions[0]
59
+
60
+ # [END aiplatform_predict_custom_trained_model_sample]
61
+
62
+
63
+ import base64
64
+ import os
65
+ from datetime import datetime
66
+ from io import BytesIO
67
+ import numpy as np
68
+
69
+ import requests
70
+ from google.cloud import aiplatform
71
+ from PIL import Image
72
+
73
+ def download_image(url):
74
+ response = requests.get(url)
75
+ return Image.open(BytesIO(response.content)).convert("RGB")
76
+
77
+ def image_to_base64(image, format="JPEG"):
78
+ # Convert numpy array to PIL Image
79
+ image_pil = Image.fromarray((image * 255).astype(np.uint8))
80
+
81
+ buffer = BytesIO()
82
+ image_pil.save(buffer, format=format)
83
+ image_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
84
+ return image_str
85
+
86
+
87
+ import gradio as gr
88
+
89
+ def predict (image, text):
90
+ if len(text) == 0:
91
+ return "No prompt provided"
92
+ response = predict_custom_trained_model_sample(
93
+ instances=[{ "image": image_to_base64(image),"text":text}],
94
+ project="1018963165306",
95
+ endpoint_id="5638185676072550400",
96
+ location="us-central1"
97
+ )
98
+ print(dict(response))
99
+ return dict(response)['answer']
100
+
101
+
102
+
103
+ demo = gr.Interface(fn=predict, inputs=["image","text"],outputs="text")
104
+ demo.launch(share=True)