sharu commited on
Commit
029f7dd
·
verified ·
1 Parent(s): 9365701

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SiglipForImageClassification"
4
+ ],
5
+ "id2label": {
6
+ "0": "Fake",
7
+ "1": "Real"
8
+ },
9
+ "initializer_factor": 1.0,
10
+ "label2id": {
11
+ "Fake": 0,
12
+ "Real": 1
13
+ },
14
+ "model_type": "siglip",
15
+ "problem_type": "single_label_classification",
16
+ "text_config": {
17
+ "attention_dropout": 0.0,
18
+ "hidden_act": "gelu_pytorch_tanh",
19
+ "hidden_size": 768,
20
+ "intermediate_size": 3072,
21
+ "layer_norm_eps": 1e-06,
22
+ "max_position_embeddings": 64,
23
+ "model_type": "siglip_text_model",
24
+ "num_attention_heads": 12,
25
+ "num_hidden_layers": 12,
26
+ "projection_size": 768,
27
+ "torch_dtype": "float32",
28
+ "vocab_size": 256000
29
+ },
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.50.0",
32
+ "vision_config": {
33
+ "attention_dropout": 0.0,
34
+ "hidden_act": "gelu_pytorch_tanh",
35
+ "hidden_size": 768,
36
+ "image_size": 224,
37
+ "intermediate_size": 3072,
38
+ "layer_norm_eps": 1e-06,
39
+ "model_type": "siglip_vision_model",
40
+ "num_attention_heads": 12,
41
+ "num_channels": 3,
42
+ "num_hidden_layers": 12,
43
+ "patch_size": 16,
44
+ "torch_dtype": "float32"
45
+ }
46
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db42002064e2722f38d3ad5c42441a9a4cfc5da8f6dbe2ff92ab9055a2d09eb
3
+ size 371567992
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fe1ea2babd281d0538c4ce855dfca04962c8ddec3e93bb7aeb40973c8172762
3
+ size 686555770
preprocessor_config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "SiglipImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "processor_class": "SiglipProcessor",
18
+ "resample": 2,
19
+ "rescale_factor": 0.00392156862745098,
20
+ "size": {
21
+ "height": 224,
22
+ "width": 224
23
+ }
24
+ }
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88acce3cfb3e8de5af4f824e7ecfcd91bf0344fb9c417988b4d078e64bcaa34a
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7a07120c819f89245e4dc484312c0ddc4f1ffbcf3788890e1ed108cac2bcf04
3
+ size 1064
testing.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoImageProcessor, SiglipForImageClassification
3
+ from PIL import Image
4
+ import torch
5
+
6
+ # -------------------------------------------------------
7
+ # Load model once at startup
8
+ # -------------------------------------------------------
9
+ model_path = r"C:\Users\Sharulatha\Documents\hackathon\deepfake-detector-model-v1\checkpoint-625"
10
+
11
+ st.title("🕵️ Deepfake Detector")
12
+
13
+ @st.cache_resource
14
+ def load_model():
15
+ model = SiglipForImageClassification.from_pretrained(model_path)
16
+ processor = AutoImageProcessor.from_pretrained(model_path)
17
+ return model, processor
18
+
19
+ model, processor = load_model()
20
+
21
+ # Label mapping
22
+ id2label = {"0": "fake", "1": "real"}
23
+
24
+ # -------------------------------------------------------
25
+ # File uploader
26
+ # -------------------------------------------------------
27
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
28
+
29
+ if uploaded_file is not None:
30
+ image = Image.open(uploaded_file).convert("RGB")
31
+ st.image(image, caption="Uploaded Image", use_column_width=True)
32
+
33
+ # Preprocess and predict
34
+ inputs = processor(images=image, return_tensors="pt")
35
+
36
+ with torch.no_grad():
37
+ outputs = model(**inputs)
38
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1).squeeze().tolist()
39
+
40
+ prediction = {id2label[str(i)]: round(probs[i], 3) for i in range(len(probs))}
41
+
42
+ st.subheader("Prediction")
43
+ st.json(prediction)
trainer_state.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 625,
3
+ "best_metric": 0.1934153288602829,
4
+ "best_model_checkpoint": "siglip2-finetune-full/checkpoint-625",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 625,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.8,
14
+ "grad_norm": 16.568904876708984,
15
+ "learning_rate": 6.249999999999999e-07,
16
+ "loss": 0.3408,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.9169958497924896,
22
+ "eval_loss": 0.1934153288602829,
23
+ "eval_model_preparation_time": 0.0027,
24
+ "eval_runtime": 479.9201,
25
+ "eval_samples_per_second": 41.672,
26
+ "eval_steps_per_second": 5.209,
27
+ "step": 625
28
+ }
29
+ ],
30
+ "logging_steps": 500,
31
+ "max_steps": 1250,
32
+ "num_input_tokens_seen": 0,
33
+ "num_train_epochs": 2,
34
+ "save_steps": 500,
35
+ "stateful_callbacks": {
36
+ "TrainerControl": {
37
+ "args": {
38
+ "should_epoch_stop": false,
39
+ "should_evaluate": false,
40
+ "should_log": false,
41
+ "should_save": true,
42
+ "should_training_stop": false
43
+ },
44
+ "attributes": {}
45
+ }
46
+ },
47
+ "total_flos": 1.6750261609304187e+18,
48
+ "train_batch_size": 32,
49
+ "trial_name": null,
50
+ "trial_params": null
51
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8fe67ed70f3fccd73e38b764ae5cbe4db6218d9f6b3fa5d2f439dc363351786
3
+ size 5304