Spaces:
Runtime error
Runtime error
updating sidebar
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import streamlit as st
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 3 |
# from huggingface_hub import snapshot_download
|
| 4 |
|
| 5 |
-
page = st.sidebar.selectbox("Model ", ["
|
| 6 |
|
| 7 |
def load_model(model_name):
|
| 8 |
with st.spinner('Waiting for the model to load.....'):
|
|
@@ -15,21 +15,16 @@ def load_model(model_name):
|
|
| 15 |
seed = st.sidebar.text_input('Starting text', 'ආයුබෝවන්')
|
| 16 |
seq_num = st.sidebar.number_input('Number of sentences to generate ', 1, 20, 5)
|
| 17 |
max_len = st.sidebar.number_input('Length of the sentence ', 5, 300, 100)
|
|
|
|
| 18 |
|
| 19 |
-
if page ==
|
| 20 |
-
|
| 21 |
-
st.
|
| 22 |
-
st.markdown('This model has been finetuned Sinhala-gpt2 model with 6000 news articles(~12MB)')
|
| 23 |
-
|
| 24 |
-
# seed = st.text_input('Starting text', 'ආයුබෝවන්')
|
| 25 |
-
# seq_num = st.number_input('Number of sentences to generate ', 1, 20, 5)
|
| 26 |
-
# max_len = st.number_input('Length of the sentence ', 5, 300, 100)
|
| 27 |
|
| 28 |
-
|
| 29 |
-
model, tokenizer = load_model('keshan/sinhala-gpt2-newswire')
|
| 30 |
|
| 31 |
|
| 32 |
-
if
|
| 33 |
try:
|
| 34 |
with st.spinner('Generating...'):
|
| 35 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
@@ -38,14 +33,14 @@ if page == "Finetuned on News data":
|
|
| 38 |
except Exception as e:
|
| 39 |
st.exception(f'Exception: {e}')
|
| 40 |
else:
|
| 41 |
-
|
| 42 |
-
st.
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
model, tokenizer = load_model('
|
| 46 |
|
| 47 |
|
| 48 |
-
if
|
| 49 |
try:
|
| 50 |
with st.spinner('Generating...'):
|
| 51 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
@@ -53,7 +48,7 @@ else:
|
|
| 53 |
st.write(seqs)
|
| 54 |
except Exception as e:
|
| 55 |
st.exception(f'Exception: {e}')
|
| 56 |
-
|
| 57 |
|
| 58 |
st.markdown('____________')
|
| 59 |
st.markdown('by Keshan with Flax Community')
|
|
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 3 |
# from huggingface_hub import snapshot_download
|
| 4 |
|
| 5 |
+
page = st.sidebar.selectbox("Model ", ["Finetuned on News data", "Pretrained GPT2"])
|
| 6 |
|
| 7 |
def load_model(model_name):
|
| 8 |
with st.spinner('Waiting for the model to load.....'):
|
|
|
|
| 15 |
seed = st.sidebar.text_input('Starting text', 'ආයුබෝවන්')
|
| 16 |
seq_num = st.sidebar.number_input('Number of sentences to generate ', 1, 20, 5)
|
| 17 |
max_len = st.sidebar.number_input('Length of the sentence ', 5, 300, 100)
|
| 18 |
+
gen_bt = st.sidebar.button('Generate')
|
| 19 |
|
| 20 |
+
if page == 'Pretrained GPT2':
|
| 21 |
+
st.title('Sinhala Text generation with GPT2')
|
| 22 |
+
st.markdown('A simple demo using Sinhala-gpt2 model trained during hf-flax week')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
model, tokenizer = load_model('flax-community/Sinhala-gpt2')
|
|
|
|
| 25 |
|
| 26 |
|
| 27 |
+
if gen_bt:
|
| 28 |
try:
|
| 29 |
with st.spinner('Generating...'):
|
| 30 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
|
|
| 33 |
except Exception as e:
|
| 34 |
st.exception(f'Exception: {e}')
|
| 35 |
else:
|
| 36 |
+
|
| 37 |
+
st.title('Sinhala Text generation with Finetuned GPT2')
|
| 38 |
+
st.markdown('This model has been finetuned Sinhala-gpt2 model with 6000 news articles(~12MB)')
|
| 39 |
+
|
| 40 |
+
model, tokenizer = load_model('keshan/sinhala-gpt2-newswire')
|
| 41 |
|
| 42 |
|
| 43 |
+
if gen_bt:
|
| 44 |
try:
|
| 45 |
with st.spinner('Generating...'):
|
| 46 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
|
|
|
| 48 |
st.write(seqs)
|
| 49 |
except Exception as e:
|
| 50 |
st.exception(f'Exception: {e}')
|
| 51 |
+
|
| 52 |
|
| 53 |
st.markdown('____________')
|
| 54 |
st.markdown('by Keshan with Flax Community')
|