Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,124 +1,147 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
from transformers import pipeline
|
| 4 |
-
import fitz # PyMuPDF for PDF
|
| 5 |
|
| 6 |
# -------------------------------
|
| 7 |
-
# Models
|
| 8 |
# -------------------------------
|
|
|
|
| 9 |
text_detector = pipeline("text-classification", model="roberta-base-openai-detector")
|
|
|
|
|
|
|
| 10 |
image_analyzer = pipeline("image-classification", model="microsoft/resnet-50")
|
|
|
|
|
|
|
| 11 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1)
|
| 12 |
|
| 13 |
# -------------------------------
|
| 14 |
-
#
|
| 15 |
# -------------------------------
|
| 16 |
@spaces.GPU
|
| 17 |
-
def
|
| 18 |
-
if not user_text.strip():
|
| 19 |
-
return "⚠️ Please provide some text."
|
| 20 |
-
result = text_detector(user_text)
|
| 21 |
-
label = result[0]['label']
|
| 22 |
-
score = round(result[0]['score'] * 100, 2)
|
| 23 |
-
if label.lower() == "fake":
|
| 24 |
-
return f"🌌 **Prophecy:** {score}% AI‑generated 🤖✨"
|
| 25 |
-
return f"☀️ **Prophecy:** {score}% Human‑written 🧑💻"
|
| 26 |
|
| 27 |
-
|
| 28 |
-
def oracle_image(user_img):
|
| 29 |
-
if user_img is None:
|
| 30 |
-
return "⚠️ Please upload an image!"
|
| 31 |
-
result = image_analyzer(user_img)
|
| 32 |
-
label = result[0]['label']
|
| 33 |
-
score = round(result[0]['score'] * 100, 2)
|
| 34 |
-
return f"🖼️ Oracle’s Sight: {score}% resemblance to **{label}**"
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
return f"🔊 Transcript: *{transcript}*\n\n🌌 Prophecy: {score}% AI‑generated 🤖"
|
| 46 |
-
return f"🔊 Transcript: *{transcript}*\n\n☀️ Prophecy: {score}% Human‑written 🧑💻"
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# -------------------------------
|
| 64 |
-
# UI (
|
| 65 |
# -------------------------------
|
| 66 |
with gr.Blocks(css="""
|
|
|
|
| 67 |
body {
|
| 68 |
background: linear-gradient(135deg, #fbc2eb, #a6c1ee);
|
| 69 |
font-family: 'Trebuchet MS', sans-serif;
|
| 70 |
color: #2c003e;
|
| 71 |
text-align: center;
|
| 72 |
}
|
| 73 |
-
|
|
|
|
|
|
|
| 74 |
font-size: 3em !important;
|
| 75 |
color: #ff0080;
|
| 76 |
text-shadow: 2px 2px 8px #00000055;
|
|
|
|
| 77 |
}
|
| 78 |
-
|
| 79 |
font-size: 1.4em !important;
|
| 80 |
color: #4b0082;
|
|
|
|
| 81 |
}
|
|
|
|
|
|
|
| 82 |
textarea, input, .gr-textbox {
|
| 83 |
font-size: 1.2em !important;
|
| 84 |
}
|
|
|
|
|
|
|
| 85 |
.result-box {
|
| 86 |
-
background: rgba(255,255,255,0.
|
| 87 |
border-radius: 20px;
|
| 88 |
padding: 20px;
|
| 89 |
-
margin-top:
|
| 90 |
font-size: 1.4em;
|
| 91 |
color: #222;
|
| 92 |
box-shadow: 0px 4px 15px rgba(0,0,0,0.3);
|
| 93 |
text-align: left;
|
|
|
|
| 94 |
}
|
| 95 |
""") as demo:
|
| 96 |
|
| 97 |
-
|
| 98 |
-
gr.
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
|
|
|
|
|
|
| 123 |
|
| 124 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
from transformers import pipeline
|
| 4 |
+
import fitz # PyMuPDF for PDF reading
|
| 5 |
|
| 6 |
# -------------------------------
|
| 7 |
+
# Load Models
|
| 8 |
# -------------------------------
|
| 9 |
+
# Text AI detector
|
| 10 |
text_detector = pipeline("text-classification", model="roberta-base-openai-detector")
|
| 11 |
+
|
| 12 |
+
# Image classifier (placeholder for "AI detection", but works publicly)
|
| 13 |
image_analyzer = pipeline("image-classification", model="microsoft/resnet-50")
|
| 14 |
+
|
| 15 |
+
# Whisper tiny for audio transcription
|
| 16 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1)
|
| 17 |
|
| 18 |
# -------------------------------
|
| 19 |
+
# Oracle Function
|
| 20 |
# -------------------------------
|
| 21 |
@spaces.GPU
|
| 22 |
+
def oracle_prophecy(user_text, user_img, user_audio, user_pdf):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
prophecy = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
# TEXT check
|
| 27 |
+
if user_text and user_text.strip():
|
| 28 |
+
result = text_detector(user_text)
|
| 29 |
+
label = result[0]['label']
|
| 30 |
+
score = round(result[0]['score'] * 100, 2)
|
| 31 |
+
if label.lower() == "fake":
|
| 32 |
+
prophecy += f"📜 Text: 🌌 **Prophecy:** {score}% AI‑generated 🤖✨\n\n"
|
| 33 |
+
else:
|
| 34 |
+
prophecy += f"📜 Text: ☀️ **Prophecy:** {score}% Human‑written 🧑💻\n\n"
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
# IMAGE check
|
| 37 |
+
if user_img is not None:
|
| 38 |
+
result = image_analyzer(user_img)
|
| 39 |
+
label = result[0]['label']
|
| 40 |
+
score = round(result[0]['score'] * 100, 2)
|
| 41 |
+
prophecy += f"🖼️ Image: Oracle’s Sight → {score}% match with **{label}** 🌠\n\n"
|
| 42 |
+
|
| 43 |
+
# AUDIO check
|
| 44 |
+
if user_audio is not None:
|
| 45 |
+
transcript = asr_pipe(user_audio)["text"]
|
| 46 |
+
result = text_detector(transcript)
|
| 47 |
+
label = result[0]['label']
|
| 48 |
+
score = round(result[0]['score'] * 100, 2)
|
| 49 |
+
if label.lower() == "fake":
|
| 50 |
+
prophecy += f"🔊 Voice transcript: *{transcript}* — 🌌 {score}% AI‑generated 🤖\n\n"
|
| 51 |
+
else:
|
| 52 |
+
prophecy += f"🔊 Voice transcript: *{transcript}* — ☀️ {score}% Human‑spoken 🧑💻\n\n"
|
| 53 |
+
|
| 54 |
+
# PDF check
|
| 55 |
+
if user_pdf is not None:
|
| 56 |
+
doc = fitz.open(user_pdf)
|
| 57 |
+
text = "".join([page.get_text() for page in doc])
|
| 58 |
+
if text.strip():
|
| 59 |
+
result = text_detector(text[:800]) # only analyze first 800 chars
|
| 60 |
+
label = result[0]['label']
|
| 61 |
+
score = round(result[0]['score'] * 100, 2)
|
| 62 |
+
if label.lower() == "fake":
|
| 63 |
+
prophecy += f"📑 PDF: 🌌 Prophecy: {score}% AI‑generated 🤖\n\n"
|
| 64 |
+
else:
|
| 65 |
+
prophecy += f"📑 PDF: ☀️ Prophecy: {score}% Human‑authored 🧑💻\n\n"
|
| 66 |
+
else:
|
| 67 |
+
prophecy += "📑 PDF: ⚠️ No readable text found.\n\n"
|
| 68 |
+
|
| 69 |
+
if prophecy.strip() == "":
|
| 70 |
+
prophecy = "⚠️ Please provide text, image, audio, or PDF for the Oracle."
|
| 71 |
+
|
| 72 |
+
return f"<div class='result-box'>{prophecy}</div>"
|
| 73 |
|
| 74 |
# -------------------------------
|
| 75 |
+
# Gradio UI (Theme like Awesome‑Pet‑Talk)
|
| 76 |
# -------------------------------
|
| 77 |
with gr.Blocks(css="""
|
| 78 |
+
/* Background Gradient */
|
| 79 |
body {
|
| 80 |
background: linear-gradient(135deg, #fbc2eb, #a6c1ee);
|
| 81 |
font-family: 'Trebuchet MS', sans-serif;
|
| 82 |
color: #2c003e;
|
| 83 |
text-align: center;
|
| 84 |
}
|
| 85 |
+
|
| 86 |
+
/* Title + subtitle */
|
| 87 |
+
#title {
|
| 88 |
font-size: 3em !important;
|
| 89 |
color: #ff0080;
|
| 90 |
text-shadow: 2px 2px 8px #00000055;
|
| 91 |
+
margin-top: 20px;
|
| 92 |
}
|
| 93 |
+
#subtitle {
|
| 94 |
font-size: 1.4em !important;
|
| 95 |
color: #4b0082;
|
| 96 |
+
margin-bottom: 20px;
|
| 97 |
}
|
| 98 |
+
|
| 99 |
+
/* Bigger inputs */
|
| 100 |
textarea, input, .gr-textbox {
|
| 101 |
font-size: 1.2em !important;
|
| 102 |
}
|
| 103 |
+
|
| 104 |
+
/* Results Box */
|
| 105 |
.result-box {
|
| 106 |
+
background: rgba(255,255,255,0.95);
|
| 107 |
border-radius: 20px;
|
| 108 |
padding: 20px;
|
| 109 |
+
margin-top: 25px;
|
| 110 |
font-size: 1.4em;
|
| 111 |
color: #222;
|
| 112 |
box-shadow: 0px 4px 15px rgba(0,0,0,0.3);
|
| 113 |
text-align: left;
|
| 114 |
+
white-space: pre-line;
|
| 115 |
}
|
| 116 |
""") as demo:
|
| 117 |
|
| 118 |
+
# Title
|
| 119 |
+
gr.HTML("<div id='title'>🔮 Oracle of Truth 🔮</div>")
|
| 120 |
+
gr.HTML("<div id='subtitle'>✨ Offer your text, image, voice, or PDF — receive the Oracle's prophecy ✨</div>")
|
| 121 |
+
|
| 122 |
+
gr.Markdown("""
|
| 123 |
+
👋 Welcome, Seeker!
|
| 124 |
+
|
| 125 |
+
Place your offerings before the Oracle:
|
| 126 |
+
- 📜 Paste *text* (essay, blog, email, story)
|
| 127 |
+
- 🖼️ Upload an *image*
|
| 128 |
+
- 🔊 Speak with your *voice*
|
| 129 |
+
- 📑 Upload a *PDF*
|
| 130 |
+
|
| 131 |
+
Then click **Reveal Prophecy ✨** and watch the Oracle’s vision unfold.
|
| 132 |
+
""")
|
| 133 |
+
|
| 134 |
+
with gr.Row():
|
| 135 |
+
with gr.Column():
|
| 136 |
+
txt_in = gr.Textbox(lines=6, label="📝 Text Offering")
|
| 137 |
+
img_in = gr.Image(type="filepath", label="🖼️ Image Offering")
|
| 138 |
+
aud_in = gr.Audio(sources=["microphone"], type="filepath", label="🎤 Voice Offering")
|
| 139 |
+
pdf_in = gr.File(file_types=[".pdf"], label="📄 PDF Offering")
|
| 140 |
+
btn = gr.Button("✨ Reveal Prophecy", variant="primary")
|
| 141 |
+
|
| 142 |
+
with gr.Column():
|
| 143 |
+
result = gr.HTML(label="Oracle's Prophecy")
|
| 144 |
+
|
| 145 |
+
btn.click(oracle_prophecy, inputs=[txt_in, img_in, aud_in, pdf_in], outputs=result)
|
| 146 |
|
| 147 |
demo.launch()
|