bishalbose294 commited on
Commit
b61b120
·
1 Parent(s): e12e90f
Files changed (1) hide show
  1. imageCaptionGPT.py +2 -2
imageCaptionGPT.py CHANGED
@@ -1,5 +1,5 @@
1
  import pickle, os, io, re, gc
2
- from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
3
  from keras.models import Model, load_model
4
  from keras.applications.inception_v3 import InceptionV3, preprocess_input
5
  import numpy as np
@@ -40,7 +40,7 @@ device='cpu'
40
  encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
41
  decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
42
  model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
43
- feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint, cache_dir=hf_model)
44
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint, cache_dir=hf_model)
45
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint, cache_dir=hf_model)
46
 
 
1
  import pickle, os, io, re, gc
2
+ from transformers import AutoTokenizer, VisionEncoderDecoderModel, ViTImageProcessor
3
  from keras.models import Model, load_model
4
  from keras.applications.inception_v3 import InceptionV3, preprocess_input
5
  import numpy as np
 
40
  encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
41
  decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
42
  model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
43
+ feature_extractor = ViTImageProcessor.from_pretrained(encoder_checkpoint, cache_dir=hf_model)
44
  tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint, cache_dir=hf_model)
45
  model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint, cache_dir=hf_model)
46