import requests import torch import streamlit as st from PIL import Image from transformers import AutoModelForImageClassification, AutoImageProcessor st.set_page_config(page_title="Turquoise Classification") MAX_FILE_SIZE = 5 * 1024 * 1024 # 5MB st.write("## Real or fake turquoise?") st.write( ":dog: Try uploading an image of a turquoise to check :grin:" ) repo_name = "dzhao114/vit-base-patch16-224-finetuned-turquoise" image_processor = AutoImageProcessor.from_pretrained(repo_name) model = AutoModelForImageClassification.from_pretrained(repo_name) def inference(upload): image = Image.open(upload) encoding = image_processor(image.convert("RGB"), return_tensors="pt") st.image(image) with torch.no_grad(): outputs = model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() st.write("Predicted:", model.config.id2label[predicted_class_idx]) my_upload = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"]) if my_upload is not None: if my_upload.size > MAX_FILE_SIZE: st.error("The uploaded file is too large. Please upload an image smaller than 5MB.") else: inference(my_upload) else: pass