schrilax commited on
Commit
d7afb92
·
1 Parent(s): 43750fc

initial commit

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
1.png ADDED
2.png ADDED
3.png ADDED
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import AutoFeatureExtractor, SegformerForSemanticSegmentation
5
+
6
+ extractor = AutoFeatureExtractor.from_pretrained('saved_model_files')
7
+
8
+ labels = {0: 'road/sidewalk/path', 1: 'human', 2: 'vehicles', 3:'other objects', 4:'nature and background'}
9
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+ model = SegformerForSemanticSegmentation.from_pretrained('saved_model_files',
11
+ num_labels = len(labels),
12
+ id2label={str(i) : c for i, c in enumerate(labels)},
13
+ label2id={c: str(i) for i, c in enumerate(labels)},
14
+ ignore_mismatched_sizes=True)
15
+ model.eval()
16
+ model.to(device)
17
+
18
+ def classify(im):
19
+ inputs = extractor(images=im, return_tensors='pt').to(device)
20
+ outputs = model(**inputs)
21
+ logits = outputs.logits
22
+ classes = logits[0].detach().cpu().numpy().argmax(axis=0)
23
+ colors = np.array([[128,0,0], [128,128,0], [0,0,128], [128,0,128], [0,0,0]])
24
+ return colors[classes]
25
+
26
+ interface = gr.Interface(fn=classify,
27
+ inputs='image',
28
+ outputs='image')
29
+
30
+ interface = gr.Interface(fn=classify, inputs='image', outputs='image', examples=['1.png', '2.png', '3.png'], title='Image Segmentation App', description='Perform segmentation on pictures of outdoor scenes', flagging_dir='flagged_examples/') # FILL HERE
31
+
32
+ interface.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
saved_model_files/.DS_Store ADDED
Binary file (6.15 kB). View file
 
saved_model_files/config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/segformer-b0-finetuned-ade-512-512",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 256,
9
+ "depths": [
10
+ 2,
11
+ 2,
12
+ 2,
13
+ 2
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 32,
26
+ 64,
27
+ 160,
28
+ 256
29
+ ],
30
+ "id2label": {
31
+ "0": 0,
32
+ "1": 1,
33
+ "2": 2,
34
+ "3": 3,
35
+ "4": 4
36
+ },
37
+ "image_size": 224,
38
+ "initializer_range": 0.02,
39
+ "label2id": {
40
+ "0": "0",
41
+ "1": "1",
42
+ "2": "2",
43
+ "3": "3",
44
+ "4": "4"
45
+ },
46
+ "layer_norm_eps": 1e-06,
47
+ "mlp_ratios": [
48
+ 4,
49
+ 4,
50
+ 4,
51
+ 4
52
+ ],
53
+ "model_type": "segformer",
54
+ "num_attention_heads": [
55
+ 1,
56
+ 2,
57
+ 5,
58
+ 8
59
+ ],
60
+ "num_channels": 3,
61
+ "num_encoder_blocks": 4,
62
+ "patch_sizes": [
63
+ 7,
64
+ 3,
65
+ 3,
66
+ 3
67
+ ],
68
+ "reshape_last_stage": true,
69
+ "semantic_loss_ignore_index": 255,
70
+ "sr_ratios": [
71
+ 8,
72
+ 4,
73
+ 2,
74
+ 1
75
+ ],
76
+ "strides": [
77
+ 4,
78
+ 2,
79
+ 2,
80
+ 2
81
+ ],
82
+ "torch_dtype": "float32",
83
+ "transformers_version": "4.22.2"
84
+ }
saved_model_files/preprocessor_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "SegformerFeatureExtractor",
5
+ "image_mean": [
6
+ 0.485,
7
+ 0.456,
8
+ 0.406
9
+ ],
10
+ "image_std": [
11
+ 0.229,
12
+ 0.224,
13
+ 0.225
14
+ ],
15
+ "reduce_labels": true,
16
+ "resample": 2,
17
+ "size": 512
18
+ }
saved_model_files/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7ab4a2411f4ab1d7e7de03fe9cc1a342a251fd60b1b7a9e87a6b4b672cdfe6a
3
+ size 14933025