Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import time | |
| from transformers import pipeline | |
| import torch | |
| trust_remote_code=True | |
| st.markdown('## Text-generation gpt-ya from Breadlicker45') | |
| use_auth_token=True | |
| def get_model(): | |
| return pipeline('text-generation', model=model, do_sample=False) | |
| col1, col2 = st.columns([2,1]) | |
| with st.sidebar: | |
| st.markdown('## Model Parameters') | |
| max_length = st.slider('Max text length', 0, 500, 80) | |
| num_beams = st.slider('N° tree beams search', 1, 15, 2) | |
| early_stopping = st.selectbox( | |
| 'Early stopping text generation', | |
| ('True', 'False'), key={'True' : True, 'False': False}, index=0) | |
| no_ngram_repeat = st.slider('Max repetition limit', 1, 5, 2) | |
| with col1: | |
| prompt= st.text_area('Your prompt here', | |
| '''What is the meaning of life?''') | |
| with col2: | |
| select_model = st.radio( | |
| "Select the model to use:", | |
| ('gpt-ya', 'gpt-ya-1-1', 'gpt-ya-1-1-160M'), index = 2) | |
| if select_model == 'gpt-ya': | |
| model = 'breadlicker45/gpt-ya' | |
| elif select_model == 'gpt-ya-1-1': | |
| model = 'BreadAi/gpt-YA-1-1_70M' | |
| elif select_model == 'gpt-ya-1-1-160M': | |
| model = 'BreadAi/gpt-YA-1-1_160M' | |
| with st.spinner('Loading Model... (This may take a while)'): | |
| generator = get_model() | |
| st.success('Model loaded correctly!') | |
| gen = st.info('Generating text...') | |
| answer = generator(prompt, max_length=max_length, no_repeat_ngram_size=no_ngram_repeat, | |
| early_stopping=early_stopping, num_beams=num_beams, do_sample=False) | |
| gen.empty() | |
| lst = answer[0]['generated_text'] | |
| t = st.empty() | |
| for i in range(len(lst)): | |
| t.markdown("#### %s" % lst[0:i]) | |
| time.sleep(0.04) |