Update README.md
Browse files
README.md
CHANGED
|
@@ -67,7 +67,7 @@ import re
|
|
| 67 |
model = from_pretrained_keras("vrclc/transliteration")
|
| 68 |
```
|
| 69 |
|
| 70 |
-
### Define Tokens:
|
| 71 |
|
| 72 |
```
|
| 73 |
source_tokens = list('abcdefghijklmnopqrstuvwxyz ')
|
|
@@ -91,6 +91,9 @@ target_tokens = [
|
|
| 91 |
]
|
| 92 |
target_tokenizer = Tokenizer(char_level=True, filters='')
|
| 93 |
target_tokenizer.fit_on_texts(target_tokens)
|
|
|
|
|
|
|
|
|
|
| 94 |
```
|
| 95 |
|
| 96 |
### Wrapper script to split input sentences to words before passing to the model
|
|
|
|
| 67 |
model = from_pretrained_keras("vrclc/transliteration")
|
| 68 |
```
|
| 69 |
|
| 70 |
+
### Define Tokens and Input Sequence Length:
|
| 71 |
|
| 72 |
```
|
| 73 |
source_tokens = list('abcdefghijklmnopqrstuvwxyz ')
|
|
|
|
| 91 |
]
|
| 92 |
target_tokenizer = Tokenizer(char_level=True, filters='')
|
| 93 |
target_tokenizer.fit_on_texts(target_tokens)
|
| 94 |
+
|
| 95 |
+
max_seq_length = model.get_layer("encoder_input").input_shape[0][1]
|
| 96 |
+
|
| 97 |
```
|
| 98 |
|
| 99 |
### Wrapper script to split input sentences to words before passing to the model
|