hamzenium commited on
Commit
c8c46cf
·
verified ·
1 Parent(s): da3a45e

Upload 6 files

Browse files
Files changed (6) hide show
  1. config.json +32 -0
  2. main.py +39 -0
  3. makefile +27 -0
  4. model.safetensors +3 -0
  5. preprocessor_config.json +23 -0
  6. requirements.txt +9 -0
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ViTForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": "real",
12
+ "1": "fake"
13
+ },
14
+ "image_size": 224,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "fake": 1,
19
+ "real": 0
20
+ },
21
+ "layer_norm_eps": 1e-12,
22
+ "model_type": "vit",
23
+ "num_attention_heads": 12,
24
+ "num_channels": 3,
25
+ "num_hidden_layers": 12,
26
+ "patch_size": 16,
27
+ "pooler_act": "tanh",
28
+ "pooler_output_size": 768,
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.50.3"
32
+ }
main.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from PIL import Image
3
+ import torch
4
+ from transformers import AutoImageProcessor, ViTForImageClassification
5
+ from fastapi import FastAPI, File, UploadFile
6
+ from io import BytesIO
7
+ import shutil
8
+
9
+ model_path = "./best_model"
10
+
11
+ app = FastAPI()
12
+
13
+ processor = AutoImageProcessor.from_pretrained(
14
+ model_path,
15
+ local_files_only=True,
16
+ use_fast=True
17
+ )
18
+
19
+ model = ViTForImageClassification.from_pretrained(
20
+ model_path,
21
+ local_files_only=True,
22
+ id2label={"0": "real", "1": "fake"},
23
+ label2id={"real": 0, "fake": 1}
24
+ )
25
+
26
+ def predict_image(image: Image.Image):
27
+ inputs = processor(image, return_tensors="pt")
28
+ with torch.no_grad():
29
+ outputs = model(**inputs)
30
+ pred_id = torch.argmax(outputs.logits, dim=1).item()
31
+ pred_label = model.config.id2label[str(pred_id)]
32
+ return pred_label
33
+
34
+ @app.post("/predict/")
35
+ async def upload_file(file: UploadFile = File(...)):
36
+ image_data = await file.read()
37
+ image = Image.open(BytesIO(image_data)).convert("RGB")
38
+ prediction = predict_image(image)
39
+ return {"filename": file.filename, "prediction": prediction}
makefile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VENV_NAME = venv
2
+ PYTHON = $(VENV_NAME)/bin/python
3
+
4
+ .PHONY: all setup install train infer clean
5
+
6
+ build: setup install
7
+
8
+ setup:
9
+ python3 -m venv $(VENV_NAME)
10
+
11
+ install:
12
+ $(PYTHON) -m pip install --upgrade pip
13
+ $(PYTHON) -m pip install -r requirements.txt
14
+
15
+ train:
16
+ $(PYTHON) train.py
17
+
18
+ test:
19
+ $(PYTHON) test.py
20
+
21
+ infer:
22
+ uvicorn main:app --reload
23
+
24
+
25
+ clean:
26
+ rm -rf $(VENV_NAME)
27
+ rm -rf __pycache__
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11dc93bbdaa2b9c05e3566d57cfe9d3821fdf01743c4212636b8fa2a90c723d9
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ transformers
4
+ uvicorn
5
+ fastapi
6
+ pillow
7
+ numpy
8
+ scikit-learn
9
+ scipy