| | import gradio as gr |
| | import torch |
| | from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline |
| | |
| | import os |
| | from numpy import exp |
| | import pandas as pd |
| | from PIL import Image |
| | import urllib.request |
| | import uuid |
| | uid=uuid.uuid4() |
| |
|
| | models=[ |
| | "Nahrawy/AIorNot", |
| | "umm-maybe/AI-image-detector", |
| | "Organika/sdxl-detector", |
| | |
| | ] |
| |
|
| | pipe0 = pipeline("image-classification", f"{models[0]}") |
| | pipe1 = pipeline("image-classification", f"{models[1]}") |
| | pipe2 = pipeline("image-classification", f"{models[2]}") |
| | |
| |
|
| | fin_sum=[] |
| | def image_classifier0(image): |
| | labels = ["AI","Real"] |
| | outputs = pipe0(image) |
| | results = {} |
| | result_test={} |
| | for idx,result in enumerate(outputs): |
| | results[labels[idx]] = outputs[idx]['score'] |
| | |
| | |
| | |
| | |
| | fin_sum.append(results) |
| | return results |
| | def image_classifier1(image): |
| | labels = ["AI","Real"] |
| | outputs = pipe1(image) |
| | results = {} |
| | result_test={} |
| | for idx,result in enumerate(outputs): |
| | results[labels[idx]] = outputs[idx]['score'] |
| | |
| | |
| | |
| | |
| | fin_sum.append(results) |
| | return results |
| | def image_classifier2(image): |
| | labels = ["AI","Real"] |
| | outputs = pipe2(image) |
| | results = {} |
| | result_test={} |
| | for idx,result in enumerate(outputs): |
| | results[labels[idx]] = outputs[idx]['score'] |
| | |
| | |
| | |
| | |
| | fin_sum.append(results) |
| | return results |
| |
|
| | def softmax(vector): |
| | e = exp(vector) |
| | return e / e.sum() |
| |
|
| | |
| |
|
| | def aiornot0(image): |
| | labels = ["AI", "Real"] |
| | mod=models[0] |
| | feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod) |
| | model0 = AutoModelForImageClassification.from_pretrained(mod) |
| | input = feature_extractor0(image, return_tensors="pt") |
| | with torch.no_grad(): |
| | outputs = model0(**input) |
| | logits = outputs.logits |
| | probability = softmax(logits) |
| | px = pd.DataFrame(probability.numpy()) |
| | prediction = logits.argmax(-1).item() |
| | label = labels[prediction] |
| | html_out = f""" |
| | <h1>This image is likely: {label}</h1><br><h3> |
| | |
| | Probabilites:<br> |
| | Real: {px[1][0]}<br> |
| | AI: {px[0][0]}""" |
| | results = {} |
| | for idx,result in enumerate(px): |
| | results[labels[idx]] = px[idx][0] |
| | |
| | fin_sum.append(results) |
| | return gr.HTML.update(html_out),results |
| | def aiornot1(image): |
| | labels = ["AI", "Real"] |
| | mod=models[1] |
| | feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod) |
| | model1 = AutoModelForImageClassification.from_pretrained(mod) |
| | input = feature_extractor1(image, return_tensors="pt") |
| | with torch.no_grad(): |
| | outputs = model1(**input) |
| | logits = outputs.logits |
| | probability = softmax(logits) |
| | px = pd.DataFrame(probability.numpy()) |
| | prediction = logits.argmax(-1).item() |
| | label = labels[prediction] |
| | html_out = f""" |
| | <h1>This image is likely: {label}</h1><br><h3> |
| | |
| | Probabilites:<br> |
| | Real: {px[1][0]}<br> |
| | AI: {px[0][0]}""" |
| | results = {} |
| | for idx,result in enumerate(px): |
| | results[labels[idx]] = px[idx][0] |
| | |
| | fin_sum.append(results) |
| | return gr.HTML.update(html_out),results |
| | def aiornot2(image): |
| | labels = ["Real", "AI"] |
| | mod=models[2] |
| | feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod) |
| | |
| | model2 = AutoModelForImageClassification.from_pretrained(mod) |
| | input = feature_extractor2(image, return_tensors="pt") |
| | with torch.no_grad(): |
| | outputs = model2(**input) |
| | logits = outputs.logits |
| | probability = softmax(logits) |
| | px = pd.DataFrame(probability.numpy()) |
| | prediction = logits.argmax(-1).item() |
| | label = labels[prediction] |
| | html_out = f""" |
| | <h1>This image is likely: {label}</h1><br><h3> |
| | |
| | Probabilites:<br> |
| | Real: {px[0][0]}<br> |
| | AI: {px[1][0]}""" |
| |
|
| | results = {} |
| | for idx,result in enumerate(px): |
| | results[labels[idx]] = px[idx][0] |
| | |
| | fin_sum.append(results) |
| | |
| | return gr.HTML.update(html_out),results |
| |
|
| | def load_url(url): |
| | try: |
| | urllib.request.urlretrieve( |
| | f'{url}', |
| | f"{uid}tmp_im.png") |
| | image = Image.open(f"{uid}tmp_im.png") |
| | mes = "Image Loaded" |
| | except Exception as e: |
| | image=None |
| | mes=f"Image not Found<br>Error: {e}" |
| | return image,mes |
| |
|
| | def tot_prob(): |
| | try: |
| | fin_out = fin_sum[0]["Real"]+fin_sum[1]["Real"]+fin_sum[2]["Real"]+fin_sum[3]["Real"]+fin_sum[4]["Real"]+fin_sum[5]["Real"] |
| | fin_out = fin_out/6 |
| | fin_sub = 1-fin_out |
| | out={ |
| | "Real":f"{fin_out}", |
| | "AI":f"{fin_sub}" |
| | } |
| | |
| | |
| | return out |
| | except Exception as e: |
| | pass |
| | print (e) |
| | return None |
| | def fin_clear(): |
| | fin_sum.clear() |
| | return None |
| |
|
| | def upd(image): |
| | print (image) |
| | rand_im = uuid.uuid4() |
| | image.save(f"{rand_im}-vid_tmp_proc.png") |
| | out = Image.open(f"{rand_im}-vid_tmp_proc.png") |
| |
|
| | |
| | |
| | |
| | |
| | return out |
| |
|
| | |
| | with gr.Blocks() as app: |
| | gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)""") |
| | with gr.Column(): |
| | inp = gr.Image(type='pil') |
| | in_url=gr.Textbox(label="Image URL") |
| | with gr.Row(): |
| | load_btn=gr.Button("Load URL") |
| | btn = gr.Button("Detect AI") |
| | mes = gr.HTML("""""") |
| | with gr.Group(): |
| | with gr.Row(): |
| | fin=gr.Label(label="Final Probability") |
| | with gr.Row(): |
| | with gr.Box(): |
| | lab0 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[0]}'>{models[0]}</a></b>""") |
| | nun0 = gr.HTML("""""") |
| | with gr.Box(): |
| | lab1 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[1]}'>{models[1]}</a></b>""") |
| | nun1 = gr.HTML("""""") |
| | with gr.Box(): |
| | lab2 = gr.HTML(f"""<b>Testing on Model: <a href='https://huggingface.co/{models[2]}'>{models[2]}</a></b>""") |
| | nun2 = gr.HTML("""""") |
| | |
| | with gr.Row(): |
| | with gr.Box(): |
| | n_out0=gr.Label(label="Output") |
| | outp0 = gr.HTML("""""") |
| | with gr.Box(): |
| | n_out1=gr.Label(label="Output") |
| | outp1 = gr.HTML("""""") |
| | with gr.Box(): |
| | n_out2=gr.Label(label="Output") |
| | outp2 = gr.HTML("""""") |
| | with gr.Row(): |
| | with gr.Box(): |
| | n_out3=gr.Label(label="Output") |
| | outp3 = gr.HTML("""""") |
| | with gr.Box(): |
| | n_out4=gr.Label(label="Output") |
| | outp4 = gr.HTML("""""") |
| | with gr.Box(): |
| | n_out5=gr.Label(label="Output") |
| | outp5 = gr.HTML("""""") |
| | hid_box=gr.Textbox(visible=False) |
| | hid_im = gr.Image(type="pil",visible=False) |
| | def echo(inp): |
| | return inp |
| |
|
| | |
| | |
| | btn.click(fin_clear,None,fin,show_progress=False) |
| | load_btn.click(load_url,in_url,[inp,mes]) |
| | |
| | btn.click(aiornot0,[inp],[outp0,n_out0]).then(tot_prob,None,fin,show_progress=False) |
| | btn.click(aiornot1,[inp],[outp1,n_out1]).then(tot_prob,None,fin,show_progress=False) |
| | btn.click(aiornot2,[inp],[outp2,n_out2]).then(tot_prob,None,fin,show_progress=False) |
| | |
| | btn.click(image_classifier0,[inp],[n_out3]).then(tot_prob,None,fin,show_progress=False) |
| | btn.click(image_classifier1,[inp],[n_out4]).then(tot_prob,None,fin,show_progress=False) |
| | btn.click(image_classifier2,[inp],[n_out5]).then(tot_prob,None,fin,show_progress=False) |
| |
|
| | app.launch(show_api=False,max_threads=24) |