Files changed (1) hide show
  1. app.py +90 -3
app.py CHANGED
@@ -1,8 +1,95 @@
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface.launch()
8
  #
 
1
  import gradio as gr
2
 
3
+ #Inference model
4
+ #load the model
5
+ model_translation = models.load_model("shrusti333/translator/model_translation")
6
+ #construct encoder model from the output of second layer
7
+ #discard the encoder output and store only states.
8
+ enc_outputs_translation, state_h_enc_translation, state_c_enc_translation = model_translation.layers[2].output
9
+ #add input object and state from the layer.
10
+ en_model_translation = Model(model_translation.input[0], [state_h_enc_translation, state_c_enc_translation])
11
+ #create Input object for hidden and cell state for decoder
12
+ #shape of layer with hidden or latent dimension
13
+ dec_state_input_h_translation = Input(shape=(256,))
14
+ dec_state_input_c_translation = Input(shape=(256,))
15
+ dec_states_inputs_translation = [dec_state_input_h_translation, dec_state_input_c_translation]
16
+ #add input from the encoder output and initialize with states.
17
+ dec_lstm_translation = model_translation.layers[3]
18
+ dec_outputs_translation, state_h_dec_translation, state_c_dec_translation = dec_lstm_translation(
19
+ model_translation.input[1], initial_state=dec_states_inputs_translation
20
+ )
21
+ dec_states_translation = [state_h_dec_translation, state_c_dec_translation]
22
+ dec_dense_translation = model_translation.layers[4]
23
+ dec_outputs_translation = dec_dense_translation(dec_outputs_translation)
24
+ #create Model with the input of decoder state input and encoder input
25
+ #and decoder output with the decoder states.
26
+ dec_model_translation = Model(
27
+ [model_translation.input[1]] + dec_states_inputs_translation, [dec_outputs_translation] + dec_states_translation
28
+ )
29
 
30
+
31
+ def decode_sequence_translation(input_seq):
32
+ #create a dictionary with a key as index and value as characters.
33
+ reverse_target_char_index_translation = dict(enumerate(target_characters_translation))
34
+ #get the states from the user input sequence
35
+ states_value_translation = en_model_translation.predict(input_seq)
36
+
37
+ #fit target characters and
38
+ #initialize every first character to be 1 which is '\t'.
39
+ #Generate empty target sequence of length 1.
40
+ co_translation=cv_translation.fit(target_characters_translation)
41
+ target_seq_translation=np.array([co_translation.transform(list("\t")).toarray().tolist()],dtype="float32")
42
+
43
+ #if the iteration reaches the end of text than it will be stop the it
44
+ stop_condition = False
45
+ #append every predicted character in decoded sentence
46
+ decoded_sentence_translation = ""
47
+
48
+ while not stop_condition:
49
+ #get predicted output and discard hidden and cell state.
50
+ output_chars_translation, h_translation, c_translation = dec_model_translation.predict([target_seq_translation] + states_value_translation)
51
+
52
+ #get the index and from the dictionary get the character.
53
+ char_index_translation = np.argmax(output_chars_translation[0, -1, :])
54
+ text_char_translation = reverse_target_char_index_translation[char_index_translation]
55
+ decoded_sentence_translation += text_char_translation
56
+ # Exit condition: either hit max length
57
+ # or find a stop character.
58
+ if text_char_translation == "\n" or len(decoded_sentence_translation) > max_target_length_translation:
59
+ stop_condition = True
60
+ #update target sequence to the current character index.
61
+ target_seq_translation = np.zeros((1, 1, num_dec_chars_translation))
62
+ target_seq_translation[0, 0, char_index_translation] = 1.0
63
+ states_value_translation = [h_translation, c_translation]
64
+ #return the decoded sentence
65
+ return decoded_sentence_translation
66
+
67
+ def bagofcharacter_translation(input_t):
68
+ cv_translation=CountVectorizer(binary=True,tokenizer=lambda txt:
69
+ txt.split(),stop_words=None,analyzer='char')
70
+ en_in_data=[] ; pad_en=[1]+[0]*(len(input_characters_translation)-1)
71
+
72
+ cv_inp_translation= cv_translation.fit(input_characters_translation)
73
+ en_in_data.append(cv_inp_translation.transform(list(input_t)).toarray().tolist())
74
+
75
+ if len(input_t)< max_input_length_translation:
76
+ for _ in range(max_input_length_translation-len(input_t)):
77
+ en_in_data[0].append(pad_en)
78
+
79
+ return np.array(en_in_data,dtype="float32")
80
+
81
+
82
+ def translate_to_Konkani(sent):
83
+ input_text = sent.split(',')
84
+ output_texts=""
85
+ for x in input_text:
86
+ term=x+"."
87
+ if term in input_texts_translation:
88
+ en_in_data = bagofcharacter_translation( x.lower()+".")
89
+ x=decode_sequence_translation(en_in_data)
90
+ output_texts+=" "+ x
91
+ return output_texts
92
+
93
+ iface = gr.Interface(fn=translate_to_Konkani, inputs="text", outputs="text")
94
  iface.launch()
95
  #