Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,83 +1,82 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
-
import
|
| 4 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 5 |
|
| 6 |
# -------------------------------
|
| 7 |
-
# Load
|
| 8 |
# -------------------------------
|
| 9 |
-
model_id = "Qwen/Qwen2.5-7B-Instruct"
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
model_id,
|
| 14 |
-
torch_dtype=torch.float16,
|
| 15 |
-
device_map="auto",
|
| 16 |
-
trust_remote_code=True
|
| 17 |
-
)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
model=model,
|
| 22 |
-
tokenizer=tokenizer,
|
| 23 |
-
device_map="auto"
|
| 24 |
-
)
|
| 25 |
|
| 26 |
# -------------------------------
|
| 27 |
-
#
|
| 28 |
# -------------------------------
|
| 29 |
-
@spaces.GPU
|
| 30 |
-
def life_genie(user_input: str) -> str:
|
| 31 |
-
if not user_input.strip():
|
| 32 |
-
return "๐ง Please ask me something magical!"
|
| 33 |
-
prompt = f"You are a witty magical genie. Answer creatively.\nUser: {user_input}\nGenie:"
|
| 34 |
-
output = qwen_pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
|
| 35 |
-
return output[0]["generated_text"]
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
# -------------------------------
|
| 43 |
-
# Voice Wizard: speech โ Genie reply
|
| 44 |
-
# -------------------------------
|
| 45 |
@spaces.GPU
|
| 46 |
-
def
|
| 47 |
-
if
|
| 48 |
-
return "
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
output = qwen_pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
|
| 54 |
-
return f"โจ Voice Transcription: {transcript}\n\n๐ฎ Wizard Replies:\n{output[0]['generated_text']}"
|
| 55 |
|
| 56 |
# -------------------------------
|
| 57 |
# Gradio UI
|
| 58 |
# -------------------------------
|
| 59 |
with gr.Blocks(css="""
|
| 60 |
-
body {
|
| 61 |
-
background: linear-gradient(135deg, #
|
| 62 |
font-family: Trebuchet MS, sans-serif;
|
|
|
|
| 63 |
}
|
|
|
|
| 64 |
""") as demo:
|
| 65 |
|
| 66 |
-
gr.HTML("<h1 style='text-align:center;
|
| 67 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
btn_chat.click(life_genie, inputs=txt_in, outputs=txt_out)
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
+
from transformers import pipeline
|
|
|
|
| 4 |
|
| 5 |
# -------------------------------
|
| 6 |
+
# Load lightweight AI detection models
|
| 7 |
# -------------------------------
|
|
|
|
| 8 |
|
| 9 |
+
# Text AI detector
|
| 10 |
+
text_detector = pipeline("text-classification", model="roberta-base-openai-detector")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# Image AI detector
|
| 13 |
+
image_detector = pipeline("image-classification", model="oripress/ai-image-detector")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# -------------------------------
|
| 16 |
+
# Functions for Oracle of Truth
|
| 17 |
# -------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
@spaces.GPU
|
| 20 |
+
def detect_text(user_text: str):
|
| 21 |
+
if not user_text.strip():
|
| 22 |
+
return "๐ง Please provide some text for the Oracle to examine."
|
| 23 |
+
result = text_detector(user_text)
|
| 24 |
+
label = result[0]['label']
|
| 25 |
+
score = round(result[0]['score'] * 100, 2)
|
| 26 |
+
if label.lower() == "fake":
|
| 27 |
+
return f"๐ Oracleโs Vision: {score}% chance this was conjured by AI magic ๐คโจ"
|
| 28 |
+
else:
|
| 29 |
+
return f"โ๏ธ Oracleโs Vision: {score}% chance this was written by a mortal hand ๐งโ๐ป"
|
| 30 |
|
|
|
|
|
|
|
|
|
|
| 31 |
@spaces.GPU
|
| 32 |
+
def detect_image(user_img):
|
| 33 |
+
if user_img is None:
|
| 34 |
+
return "๐ผ๏ธ Please upload an image for the Oracle to divine."
|
| 35 |
+
result = image_detector(user_img)
|
| 36 |
+
label = result[0]['label']
|
| 37 |
+
score = round(result[0]['score'] * 100, 2)
|
| 38 |
+
return f"๐ Oracleโs Gaze: {score}% chance this is {label}."
|
|
|
|
|
|
|
| 39 |
|
| 40 |
# -------------------------------
|
| 41 |
# Gradio UI
|
| 42 |
# -------------------------------
|
| 43 |
with gr.Blocks(css="""
|
| 44 |
+
body {
|
| 45 |
+
background: linear-gradient(135deg, #0f2027, #203a43, #2c5364);
|
| 46 |
font-family: Trebuchet MS, sans-serif;
|
| 47 |
+
color: white;
|
| 48 |
}
|
| 49 |
+
h1, h2, h3 { color: #ffcc00; }
|
| 50 |
""") as demo:
|
| 51 |
|
| 52 |
+
gr.HTML("<h1 style='text-align:center;'>๐ฎ Oracle of Truth ๐ฎ</h1>")
|
| 53 |
+
gr.Markdown("""
|
| 54 |
+
Welcome to the **Chamber of Truth** โจ.
|
| 55 |
+
Offer your **text** or **image** to the Oracle,
|
| 56 |
+
and it will divine whether it was born of **human hands** โ๏ธ
|
| 57 |
+
or conjured by **AI sorcery** ๐ค๐.
|
| 58 |
+
""")
|
| 59 |
+
|
| 60 |
+
with gr.Tab("๐ Text Divination"):
|
| 61 |
+
txt_in = gr.Textbox(label="Enter Text", lines=5, placeholder="Paste some text here...")
|
| 62 |
+
btn_txt = gr.Button("๐ Ask the Oracle")
|
| 63 |
+
txt_out = gr.Textbox(label="Oracle's Verdict", lines=4)
|
| 64 |
+
btn_txt.click(detect_text, inputs=txt_in, outputs=txt_out)
|
| 65 |
|
| 66 |
+
with gr.Tab("๐ผ๏ธ Image Divination"):
|
| 67 |
+
img_in = gr.Image(type="filepath", label="Upload an Image")
|
| 68 |
+
btn_img = gr.Button("๐ Ask the Oracle")
|
| 69 |
+
img_out = gr.Textbox(label="Oracle's Verdict", lines=2)
|
| 70 |
+
btn_img.click(detect_image, inputs=img_in, outputs=img_out)
|
|
|
|
| 71 |
|
| 72 |
+
# Preloaded examples
|
| 73 |
+
gr.Examples(
|
| 74 |
+
examples=[
|
| 75 |
+
["Once upon a time in a galaxy far away, a hero rose against the odds."],
|
| 76 |
+
["In this paper, we analyze the performance of Transformer models on natural language inference tasks."]
|
| 77 |
+
],
|
| 78 |
+
inputs=txt_in,
|
| 79 |
+
label="Try sample texts"
|
| 80 |
+
)
|
| 81 |
|
| 82 |
demo.launch()
|