Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,19 +1,17 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import tensorflow as tf
|
| 3 |
-
from tensorflow import keras
|
| 4 |
import numpy as np
|
| 5 |
from PIL import Image
|
| 6 |
-
import io
|
| 7 |
import cv2
|
| 8 |
import openai
|
| 9 |
import os
|
|
|
|
| 10 |
|
| 11 |
# Set up OpenAI API key
|
| 12 |
openai.api_key = "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"
|
| 13 |
|
| 14 |
-
# Load pre-trained
|
| 15 |
face_detection_model = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
| 16 |
-
emotion_model = keras.models.load_model('path_to_emotion_model.h5') # You'll need to train or download this model
|
| 17 |
|
| 18 |
def detect_face(image):
|
| 19 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
@@ -31,20 +29,26 @@ def analyze_symmetry(face):
|
|
| 31 |
symmetry_score = 1 - (np.sum(diff) / (255 * height * (width//2)))
|
| 32 |
return symmetry_score
|
| 33 |
|
| 34 |
-
def analyze_emotion(face):
|
| 35 |
-
face = cv2.resize(face, (48, 48))
|
| 36 |
-
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
|
| 37 |
-
face = np.reshape(face, [1, 48, 48, 1]) / 255.0
|
| 38 |
-
emotion_pred = emotion_model.predict(face)
|
| 39 |
-
emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
|
| 40 |
-
return emotions[np.argmax(emotion_pred)]
|
| 41 |
-
|
| 42 |
def analyze_aesthetic_quality(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
prompt = f"Analyze the aesthetic quality of this image, focusing on lighting, composition, and overall visual appeal. Provide a brief description and a score out of 10."
|
| 44 |
response = openai.ChatCompletion.create(
|
| 45 |
model="gpt-4-vision-preview",
|
| 46 |
messages=[
|
| 47 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
]
|
| 49 |
)
|
| 50 |
return response.choices[0].message.content
|
|
@@ -59,21 +63,16 @@ def compare_photos(image1, image2):
|
|
| 59 |
symmetry1 = analyze_symmetry(face1)
|
| 60 |
symmetry2 = analyze_symmetry(face2)
|
| 61 |
|
| 62 |
-
emotion1 = analyze_emotion(face1)
|
| 63 |
-
emotion2 = analyze_emotion(face2)
|
| 64 |
-
|
| 65 |
aesthetic1 = analyze_aesthetic_quality(image1)
|
| 66 |
aesthetic2 = analyze_aesthetic_quality(image2)
|
| 67 |
|
| 68 |
return {
|
| 69 |
"Image 1": {
|
| 70 |
"Symmetry": symmetry1,
|
| 71 |
-
"Emotion": emotion1,
|
| 72 |
"Aesthetic Quality": aesthetic1
|
| 73 |
},
|
| 74 |
"Image 2": {
|
| 75 |
"Symmetry": symmetry2,
|
| 76 |
-
"Emotion": emotion2,
|
| 77 |
"Aesthetic Quality": aesthetic2
|
| 78 |
}
|
| 79 |
}
|
|
@@ -104,12 +103,10 @@ def main():
|
|
| 104 |
with col1:
|
| 105 |
st.write("Image 1")
|
| 106 |
st.write(f"Symmetry: {results['Image 1']['Symmetry']:.2f}")
|
| 107 |
-
st.write(f"Emotion: {results['Image 1']['Emotion']}")
|
| 108 |
st.write(f"Aesthetic Quality: {results['Image 1']['Aesthetic Quality']}")
|
| 109 |
with col2:
|
| 110 |
st.write("Image 2")
|
| 111 |
st.write(f"Symmetry: {results['Image 2']['Symmetry']:.2f}")
|
| 112 |
-
st.write(f"Emotion: {results['Image 2']['Emotion']}")
|
| 113 |
st.write(f"Aesthetic Quality: {results['Image 2']['Aesthetic Quality']}")
|
| 114 |
|
| 115 |
st.write("Note: Beauty is subjective, and this tool is for entertainment purposes only.")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import tensorflow as tf
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
|
|
|
| 5 |
import cv2
|
| 6 |
import openai
|
| 7 |
import os
|
| 8 |
+
import io
|
| 9 |
|
| 10 |
# Set up OpenAI API key
|
| 11 |
openai.api_key = "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"
|
| 12 |
|
| 13 |
+
# Load pre-trained model for face detection
|
| 14 |
face_detection_model = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
|
|
|
| 15 |
|
| 16 |
def detect_face(image):
|
| 17 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
|
| 29 |
symmetry_score = 1 - (np.sum(diff) / (255 * height * (width//2)))
|
| 30 |
return symmetry_score
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
def analyze_aesthetic_quality(image):
|
| 33 |
+
# Convert the image to a byte stream
|
| 34 |
+
img_byte_arr = io.BytesIO()
|
| 35 |
+
Image.fromarray(image).save(img_byte_arr, format='PNG')
|
| 36 |
+
img_byte_arr = img_byte_arr.getvalue()
|
| 37 |
+
|
| 38 |
+
# Encode the image
|
| 39 |
+
encoded_image = base64.b64encode(img_byte_arr).decode('ascii')
|
| 40 |
+
|
| 41 |
prompt = f"Analyze the aesthetic quality of this image, focusing on lighting, composition, and overall visual appeal. Provide a brief description and a score out of 10."
|
| 42 |
response = openai.ChatCompletion.create(
|
| 43 |
model="gpt-4-vision-preview",
|
| 44 |
messages=[
|
| 45 |
+
{
|
| 46 |
+
"role": "user",
|
| 47 |
+
"content": [
|
| 48 |
+
{"type": "text", "text": prompt},
|
| 49 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{encoded_image}"}}
|
| 50 |
+
]
|
| 51 |
+
}
|
| 52 |
]
|
| 53 |
)
|
| 54 |
return response.choices[0].message.content
|
|
|
|
| 63 |
symmetry1 = analyze_symmetry(face1)
|
| 64 |
symmetry2 = analyze_symmetry(face2)
|
| 65 |
|
|
|
|
|
|
|
|
|
|
| 66 |
aesthetic1 = analyze_aesthetic_quality(image1)
|
| 67 |
aesthetic2 = analyze_aesthetic_quality(image2)
|
| 68 |
|
| 69 |
return {
|
| 70 |
"Image 1": {
|
| 71 |
"Symmetry": symmetry1,
|
|
|
|
| 72 |
"Aesthetic Quality": aesthetic1
|
| 73 |
},
|
| 74 |
"Image 2": {
|
| 75 |
"Symmetry": symmetry2,
|
|
|
|
| 76 |
"Aesthetic Quality": aesthetic2
|
| 77 |
}
|
| 78 |
}
|
|
|
|
| 103 |
with col1:
|
| 104 |
st.write("Image 1")
|
| 105 |
st.write(f"Symmetry: {results['Image 1']['Symmetry']:.2f}")
|
|
|
|
| 106 |
st.write(f"Aesthetic Quality: {results['Image 1']['Aesthetic Quality']}")
|
| 107 |
with col2:
|
| 108 |
st.write("Image 2")
|
| 109 |
st.write(f"Symmetry: {results['Image 2']['Symmetry']:.2f}")
|
|
|
|
| 110 |
st.write(f"Aesthetic Quality: {results['Image 2']['Aesthetic Quality']}")
|
| 111 |
|
| 112 |
st.write("Note: Beauty is subjective, and this tool is for entertainment purposes only.")
|