peshk1n commited on
Commit
f525983
·
verified ·
1 Parent(s): c61736d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -27
app.py CHANGED
@@ -457,7 +457,7 @@ optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
457
  coca_model.compile(optimizer)
458
 
459
  save_dir = "models/"
460
- model_name = "coca_v1"
461
  coca_model.load_weights(f"{save_dir}/{model_name}.weights.h5")
462
 
463
 
@@ -584,44 +584,90 @@ def has_repeated_ngrams(seq, n=2):
584
  return len(ngrams) != len(set(ngrams))
585
 
586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  def generate_caption_coca(image):
588
  img_processed = load_and_preprocess_image(image)
 
 
589
  _, cap_features = coca_model.encoder.predict(img_processed, verbose=0)
590
 
591
- beams = [([word_index[start_token]], 0.0)]
 
 
 
592
 
593
- for _ in range(max_length):
594
- new_beams = []
595
- for seq, log_prob in beams:
596
- if seq[-1] == word_index[end_token]:
597
- new_beams.append((seq, log_prob))
598
- continue
599
 
600
- text_input = np.zeros((1, max_length), dtype=np.int32)
601
- text_input[0, :len(seq)] = seq
 
 
 
602
 
603
- predictions = coca_model.decoder.predict([text_input, cap_features], verbose=0)
604
- _, logits = predictions
605
- logits = logits[0, len(seq)-1, :] / temperature
606
- probs = np.exp(logits - np.max(logits))
607
- probs /= probs.sum()
608
 
609
- top_k = np.argpartition(probs, -beam_width)[-beam_width:]
610
- for token in top_k:
611
- new_seq = seq + [token]
612
- new_log_prob = (log_prob * len(seq) + np.log(probs[token])) / (len(seq) + 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
 
614
- if has_repeated_ngrams(new_seq, n=2):
615
- new_log_prob -= 0.5
616
 
617
- new_beams.append((new_seq, new_log_prob))
618
 
619
- beams = sorted(new_beams, key=lambda x: x[1], reverse=True)[:beam_width]
620
- if all(beam[0][-1] == word_index[end_token] for beam in beams):
621
- break
622
 
623
- best_seq = max(beams, key=lambda x: x[1])[0]
624
- return " ".join(index_word[i] for i in best_seq if i not in {word_index[start_token], word_index[end_token]})
625
 
626
 
627
  def generate_caption_rnn(image):
 
457
  coca_model.compile(optimizer)
458
 
459
  save_dir = "models/"
460
+ model_name = "coca"
461
  coca_model.load_weights(f"{save_dir}/{model_name}.weights.h5")
462
 
463
 
 
584
  return len(ngrams) != len(set(ngrams))
585
 
586
 
587
+ image_mean = [0.5, 0.5, 0.5]
588
+ image_std = [0.5, 0.5, 0.5]
589
+
590
+ def load_and_preprocess_image(img):
591
+ #img = tf.image.decode_jpeg(img, channels=3)
592
+ img = tf.convert_to_tensor(img)
593
+ img = tf.image.resize(img, (img_size, img_size))
594
+ img = img / 255.0
595
+
596
+ img = (img - image_mean) / image_std
597
+ img = tf.transpose(img, perm=[2, 0, 1])
598
+
599
+ return np.expand_dims(img, axis=0)
600
+
601
+
602
  def generate_caption_coca(image):
603
  img_processed = load_and_preprocess_image(image)
604
+ img_processed = np.expand_dims(img_processed, axis=0)
605
+
606
  _, cap_features = coca_model.encoder.predict(img_processed, verbose=0)
607
 
608
+ start_token_id = word_index[start_token]
609
+ end_token_id = word_index[end_token]
610
+ sequence = [start_token_id]
611
+ text_input = np.zeros((1, sentence_length - 1))
612
 
613
+ for t in range(sentence_length - 1):
614
+ text_input[0, :len(sequence)] = sequence
 
 
 
 
615
 
616
+ _, logits = coca_model.decoder.predict(
617
+ [text_input, cap_features],
618
+ verbose=0
619
+ )
620
+ next_token = np.argmax(logits[0, t, :])
621
 
622
+ sequence.append(next_token)
623
+ if next_token == end_token_id or len(sequence) >= (sentence_length - 1):
624
+ break
 
 
625
 
626
+ caption = " ".join(
627
+ [index_word[token] for token in sequence
628
+ if token not in {word_index[start_token], word_index[end_token]}]
629
+ )
630
+
631
+ return caption
632
+
633
+ # def generate_caption_coca(image):
634
+ # img_processed = load_and_preprocess_image(image)
635
+ # _, cap_features = coca_model.encoder.predict(img_processed, verbose=0)
636
+
637
+ # beams = [([word_index[start_token]], 0.0)]
638
+
639
+ # for _ in range(max_length):
640
+ # new_beams = []
641
+ # for seq, log_prob in beams:
642
+ # if seq[-1] == word_index[end_token]:
643
+ # new_beams.append((seq, log_prob))
644
+ # continue
645
+
646
+ # text_input = np.zeros((1, max_length), dtype=np.int32)
647
+ # text_input[0, :len(seq)] = seq
648
+
649
+ # predictions = coca_model.decoder.predict([text_input, cap_features], verbose=0)
650
+ # _, logits = predictions
651
+ # logits = logits[0, len(seq)-1, :] / temperature
652
+ # probs = np.exp(logits - np.max(logits))
653
+ # probs /= probs.sum()
654
+
655
+ # top_k = np.argpartition(probs, -beam_width)[-beam_width:]
656
+ # for token in top_k:
657
+ # new_seq = seq + [token]
658
+ # new_log_prob = (log_prob * len(seq) + np.log(probs[token])) / (len(seq) + 1)
659
 
660
+ # if has_repeated_ngrams(new_seq, n=2):
661
+ # new_log_prob -= 0.5
662
 
663
+ # new_beams.append((new_seq, new_log_prob))
664
 
665
+ # beams = sorted(new_beams, key=lambda x: x[1], reverse=True)[:beam_width]
666
+ # if all(beam[0][-1] == word_index[end_token] for beam in beams):
667
+ # break
668
 
669
+ # best_seq = max(beams, key=lambda x: x[1])[0]
670
+ # return " ".join(index_word[i] for i in best_seq if i not in {word_index[start_token], word_index[end_token]})
671
 
672
 
673
  def generate_caption_rnn(image):