Snehhaa23 commited on
Commit
cf64866
·
1 Parent(s): 29510bb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +73 -0
README.md CHANGED
@@ -1,3 +1,76 @@
1
  ---
 
 
 
2
  license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - image-to-text
4
+ - image-captioning
5
  license: apache-2.0
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/burger.jpg
8
+ example_title: burger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
10
+ example_title: Football Match
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg
12
+ example_title: Airport
13
  ---
14
+
15
+ # nlpconnect/vit-gpt2-image-captioning
16
+
17
+ This is an image captioning model trained by @ydshieh in [flax ](https://github.com/huggingface/transformers/tree/main/examples/flax/image-captioning) this is pytorch version of [this](https://huggingface.co/ydshieh/vit-gpt2-coco-en-ckpts).
18
+
19
+
20
+ # Sample running code
21
+
22
+ ```python
23
+
24
+ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
25
+ import torch
26
+ from PIL import Image
27
+
28
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
29
+ feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
30
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
31
+
32
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
33
+ model.to(device)
34
+
35
+
36
+
37
+ max_length = 16
38
+ num_beams = 4
39
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
40
+ def predict_step(image_paths):
41
+ images = []
42
+ for image_path in image_paths:
43
+ i_image = Image.open(image_path)
44
+ if i_image.mode != "RGB":
45
+ i_image = i_image.convert(mode="RGB")
46
+
47
+ images.append(i_image)
48
+
49
+ pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
50
+ pixel_values = pixel_values.to(device)
51
+
52
+ output_ids = model.generate(pixel_values, **gen_kwargs)
53
+
54
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
55
+ preds = [pred.strip() for pred in preds]
56
+ return preds
57
+
58
+
59
+ predict_step(['doctor.e16ba4e4.jpg']) # ['a woman in a hospital bed with a woman in a hospital bed']
60
+
61
+ ```
62
+
63
+ # Sample running code using transformers pipeline
64
+
65
+ ```python
66
+
67
+ from transformers import pipeline
68
+
69
+ image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
70
+
71
+ image_to_text("https://ankur3107.github.io/assets/images/image-captioning-example.png")
72
+
73
+ # [{'generated_text': 'a soccer game with a player jumping to catch the ball '}]
74
+
75
+
76
+ ```