shrusti333 commited on
Commit
3134e5e
·
1 Parent(s): e1fd8c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -22
app.py CHANGED
@@ -6,17 +6,15 @@ from sklearn.feature_extraction.text import CountVectorizer
6
  from tensorflow.keras.models import Model
7
  from tensorflow.keras import models
8
  from tensorflow.keras.layers import Input,LSTM,Dense
9
- #translation
10
  input_texts_translation=['again.', 'arrive.', 'bathroom.', 'believe.', 'can.', 'deaf.', 'fine.', 'go.', 'hello.', 'help.', 'home.', 'how.', 'hungury.', 'sorry.', 'call.', 'later.', 'learn.', 'like.', 'live.', 'meet.', 'my.', 'name.', 'nice.', 'no.', 'please.', 'see.', 'share.', 'sign.', 'slow.', 'takecare.', 'talk.', 'thank you.', 'time.', 'understand.', 'we.', 'what.', 'when.', 'where.', 'who.', 'yes.', 'you.', 'you.', 'you.', 'you.', 'name.', 'good.', 'everning.', 'night.', 'how you.', 'you name what.', 'my name.', 'you live where.', 'i.', 'live.', 'you help.', 'yes help me.', 'you understand.', 'i hungry.', 'good everning.', 'good night.', 'pleased.', 'nice meet you.', 'i fine.', 'home arrive when.', 'where bathroom.', 'believe my.', 'call.', 'call.','deaf.', 'i call later.', 'i deaf.', 'what time.', 'i sorry.', 'on my.', 'my.', 'believe.', 'learn.', 'learn.', 'what are you learning.', 'you learn sign where.', 'i go home.', 'i.', 'i.', 'see you later.', 'meet.', 'meet.', 'we meet.', 'like.', 'i like.', 'talk later.', 'later.', 'later.']
11
  input_characters_translation=[' ', '.', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y']
12
- target_characters_translation=['\t', '\n', ' ', '.', 'ं', 'आ', 'उ', 'क', 'ख', 'ग', 'घ', 'च', 'ज', 'झ', 'ट', 'ड', 'ण', 'त', 'द', 'न', 'प', 'फ', 'ब', 'भ', 'म', 'य', 'र', 'ल', 'ळ', 'व', 'श', 'स', 'ह', 'ा', 'ि', 'ी', 'ु', 'ू', 'े', 'ो', '्']
13
-
14
 
15
  num_en_chars_translation = 24
16
- num_dec_chars_translation = 41
17
 
18
  max_input_length_translation = 22
19
- max_target_length_translation = 37
20
 
21
  cv_translation=CountVectorizer(binary=True,tokenizer=lambda txt: txt.split(),stop_words=None,analyzer='char')
22
 
@@ -26,8 +24,8 @@ enc_outputs_translation, state_h_enc_translation, state_c_enc_translation = mode
26
 
27
  en_model_translation = Model(model_translation.input[0], [state_h_enc_translation, state_c_enc_translation])
28
 
29
- dec_state_input_h_translation = Input(shape=(256,))
30
- dec_state_input_c_translation = Input(shape=(256,))
31
  dec_states_inputs_translation = [dec_state_input_h_translation, dec_state_input_c_translation]
32
 
33
  dec_lstm_translation = model_translation.layers[3]
@@ -41,39 +39,37 @@ dec_outputs_translation = dec_dense_translation(dec_outputs_translation)
41
  dec_model_translation = Model(
42
  [model_translation.input[1]] + dec_states_inputs_translation, [dec_outputs_translation] + dec_states_translation
43
  )
 
44
  def decode_sequence_translation(input_seq):
45
-
46
  reverse_target_char_index_translation = dict(enumerate(target_characters_translation))
47
 
48
  states_value_translation = en_model_translation.predict(input_seq)
49
 
50
-
51
  co_translation=cv_translation.fit(target_characters_translation)
52
  target_seq_translation=np.array([co_translation.transform(list("\t")).toarray().tolist()],dtype="float32")
53
 
54
-
55
  stop_condition = False
56
-
57
  decoded_sentence_translation = ""
58
 
59
  while not stop_condition:
60
-
61
  output_chars_translation, h_translation, c_translation = dec_model_translation.predict([target_seq_translation] + states_value_translation)
62
 
63
-
64
  char_index_translation = np.argmax(output_chars_translation[0, -1, :])
65
  text_char_translation = reverse_target_char_index_translation[char_index_translation]
66
  decoded_sentence_translation += text_char_translation
67
-
68
  if text_char_translation == "\n" or len(decoded_sentence_translation) > max_target_length_translation:
69
  stop_condition = True
70
-
71
  target_seq_translation = np.zeros((1, 1, num_dec_chars_translation))
72
  target_seq_translation[0, 0, char_index_translation] = 1.0
73
  states_value_translation = [h_translation, c_translation]
74
-
75
  return decoded_sentence_translation
76
-
77
  def bagofcharacter_translation(input_t):
78
  cv_translation=CountVectorizer(binary=True,tokenizer=lambda txt:
79
  txt.split(),stop_words=None,analyzer='char')
@@ -87,6 +83,7 @@ def bagofcharacter_translation(input_t):
87
  en_in_data[0].append(pad_en)
88
 
89
  return np.array(en_in_data,dtype="float32")
 
90
  #transliteration
91
 
92
  input_characters_transliteration=[' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u','v', 'w', 'x', 'y', 'z']
@@ -96,7 +93,6 @@ num_dec_chars_transliteration = 68
96
  max_input_length_transliteration = 21
97
  max_target_length_transliteration = 20
98
  cv_transliteration=CountVectorizer(binary=True,tokenizer=lambda txt: txt.split(),stop_words=None,analyzer='char')
99
-
100
  model_transliteration = models.load_model("s2s_transliteration")
101
 
102
  enc_outputs_transliteration, state_h_enc_transliteration, state_c_enc_transliteration = model_transliteration.layers[2].output
@@ -106,7 +102,6 @@ en_model_transliteration = Model(model_transliteration.input[0], [state_h_enc_tr
106
  dec_state_input_h_transliteration = Input(shape=(256,), name="input_6")
107
  dec_state_input_c_transliteration = Input(shape=(256,), name="input_7")
108
  dec_states_inputs_transliteration = [dec_state_input_h_transliteration, dec_state_input_c_transliteration]
109
-
110
  dec_lstm_transliteration = model_transliteration.layers[3]
111
  dec_outputs_transliteration, state_h_dec_transliteration, state_c_dec_transliteration = dec_lstm_transliteration(
112
  model_transliteration.input[1], initial_state=dec_states_inputs_transliteration
@@ -114,7 +109,6 @@ dec_outputs_transliteration, state_h_dec_transliteration, state_c_dec_transliter
114
  dec_states_transliteration = [state_h_dec_transliteration, state_c_dec_transliteration]
115
  dec_dense_transliteration = model_transliteration.layers[4]
116
  dec_outputs_transliteration = dec_dense_transliteration(dec_outputs_transliteration)
117
-
118
  dec_model_transliteration = Model(
119
  [model_transliteration.input[1]] + dec_states_inputs_transliteration, [dec_outputs_transliteration] + dec_states_transliteration
120
  )
@@ -162,6 +156,7 @@ def bagofcharacter_transliteration(input_t):
162
  def translate_to_Konkani(sent):
163
 
164
  input_text = sent.split(',')
 
165
  output_texts=""
166
  for x in input_text:
167
  term=x+"."
@@ -183,5 +178,4 @@ def translate_to_Konkani(sent):
183
  return output_texts
184
 
185
  iface = gr.Interface(fn=translate_to_Konkani, inputs="text", outputs="text")
186
- iface.launch()
187
- #
 
6
  from tensorflow.keras.models import Model
7
  from tensorflow.keras import models
8
  from tensorflow.keras.layers import Input,LSTM,Dense
 
9
  input_texts_translation=['again.', 'arrive.', 'bathroom.', 'believe.', 'can.', 'deaf.', 'fine.', 'go.', 'hello.', 'help.', 'home.', 'how.', 'hungury.', 'sorry.', 'call.', 'later.', 'learn.', 'like.', 'live.', 'meet.', 'my.', 'name.', 'nice.', 'no.', 'please.', 'see.', 'share.', 'sign.', 'slow.', 'takecare.', 'talk.', 'thank you.', 'time.', 'understand.', 'we.', 'what.', 'when.', 'where.', 'who.', 'yes.', 'you.', 'you.', 'you.', 'you.', 'name.', 'good.', 'everning.', 'night.', 'how you.', 'you name what.', 'my name.', 'you live where.', 'i.', 'live.', 'you help.', 'yes help me.', 'you understand.', 'i hungry.', 'good everning.', 'good night.', 'pleased.', 'nice meet you.', 'i fine.', 'home arrive when.', 'where bathroom.', 'believe my.', 'call.', 'call.','deaf.', 'i call later.', 'i deaf.', 'what time.', 'i sorry.', 'on my.', 'my.', 'believe.', 'learn.', 'learn.', 'what are you learning.', 'you learn sign where.', 'i go home.', 'i.', 'i.', 'see you later.', 'meet.', 'meet.', 'we meet.', 'like.', 'i like.', 'talk later.', 'later.', 'later.']
10
  input_characters_translation=[' ', '.', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y']
11
+ target_characters_translation=['\t', '\n', ' ', 'ं', 'आ', 'उ', 'क', 'ख', 'ग', 'घ', 'च', 'ज', 'झ', 'ट', 'ड', 'ण', 'त', 'द', 'न', 'प', 'फ', 'ब', 'भ', 'म', 'य', 'र', 'ल', 'ळ', 'व', 'श', 'स', 'ह', 'ा', 'ि', 'ी', 'ु', 'ू', 'े', 'ो', '्']
 
12
 
13
  num_en_chars_translation = 24
14
+ num_dec_chars_translation = 40
15
 
16
  max_input_length_translation = 22
17
+ max_target_length_translation = 36
18
 
19
  cv_translation=CountVectorizer(binary=True,tokenizer=lambda txt: txt.split(),stop_words=None,analyzer='char')
20
 
 
24
 
25
  en_model_translation = Model(model_translation.input[0], [state_h_enc_translation, state_c_enc_translation])
26
 
27
+ dec_state_input_h_translation = Input(shape=(256,),name="input_3")
28
+ dec_state_input_c_translation = Input(shape=(256,),name="input_4")
29
  dec_states_inputs_translation = [dec_state_input_h_translation, dec_state_input_c_translation]
30
 
31
  dec_lstm_translation = model_translation.layers[3]
 
39
  dec_model_translation = Model(
40
  [model_translation.input[1]] + dec_states_inputs_translation, [dec_outputs_translation] + dec_states_translation
41
  )
42
+
43
  def decode_sequence_translation(input_seq):
44
+
45
  reverse_target_char_index_translation = dict(enumerate(target_characters_translation))
46
 
47
  states_value_translation = en_model_translation.predict(input_seq)
48
 
49
+
50
  co_translation=cv_translation.fit(target_characters_translation)
51
  target_seq_translation=np.array([co_translation.transform(list("\t")).toarray().tolist()],dtype="float32")
52
 
 
53
  stop_condition = False
54
+
55
  decoded_sentence_translation = ""
56
 
57
  while not stop_condition:
58
+
59
  output_chars_translation, h_translation, c_translation = dec_model_translation.predict([target_seq_translation] + states_value_translation)
60
 
 
61
  char_index_translation = np.argmax(output_chars_translation[0, -1, :])
62
  text_char_translation = reverse_target_char_index_translation[char_index_translation]
63
  decoded_sentence_translation += text_char_translation
64
+
65
  if text_char_translation == "\n" or len(decoded_sentence_translation) > max_target_length_translation:
66
  stop_condition = True
67
+
68
  target_seq_translation = np.zeros((1, 1, num_dec_chars_translation))
69
  target_seq_translation[0, 0, char_index_translation] = 1.0
70
  states_value_translation = [h_translation, c_translation]
71
+
72
  return decoded_sentence_translation
 
73
  def bagofcharacter_translation(input_t):
74
  cv_translation=CountVectorizer(binary=True,tokenizer=lambda txt:
75
  txt.split(),stop_words=None,analyzer='char')
 
83
  en_in_data[0].append(pad_en)
84
 
85
  return np.array(en_in_data,dtype="float32")
86
+
87
  #transliteration
88
 
89
  input_characters_transliteration=[' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u','v', 'w', 'x', 'y', 'z']
 
93
  max_input_length_transliteration = 21
94
  max_target_length_transliteration = 20
95
  cv_transliteration=CountVectorizer(binary=True,tokenizer=lambda txt: txt.split(),stop_words=None,analyzer='char')
 
96
  model_transliteration = models.load_model("s2s_transliteration")
97
 
98
  enc_outputs_transliteration, state_h_enc_transliteration, state_c_enc_transliteration = model_transliteration.layers[2].output
 
102
  dec_state_input_h_transliteration = Input(shape=(256,), name="input_6")
103
  dec_state_input_c_transliteration = Input(shape=(256,), name="input_7")
104
  dec_states_inputs_transliteration = [dec_state_input_h_transliteration, dec_state_input_c_transliteration]
 
105
  dec_lstm_transliteration = model_transliteration.layers[3]
106
  dec_outputs_transliteration, state_h_dec_transliteration, state_c_dec_transliteration = dec_lstm_transliteration(
107
  model_transliteration.input[1], initial_state=dec_states_inputs_transliteration
 
109
  dec_states_transliteration = [state_h_dec_transliteration, state_c_dec_transliteration]
110
  dec_dense_transliteration = model_transliteration.layers[4]
111
  dec_outputs_transliteration = dec_dense_transliteration(dec_outputs_transliteration)
 
112
  dec_model_transliteration = Model(
113
  [model_transliteration.input[1]] + dec_states_inputs_transliteration, [dec_outputs_transliteration] + dec_states_transliteration
114
  )
 
156
  def translate_to_Konkani(sent):
157
 
158
  input_text = sent.split(',')
159
+
160
  output_texts=""
161
  for x in input_text:
162
  term=x+"."
 
178
  return output_texts
179
 
180
  iface = gr.Interface(fn=translate_to_Konkani, inputs="text", outputs="text")
181
+ iface.launch()