| from transformers import pipeline, AutoTokenizer |
| import streamlit as st |
| |
| hf_token = os.getenv("HF_MODEL_TOKEN") |
|
|
| |
| login(token=hf_token) |
|
|
| @st.cache_resource |
| def load_pipe(): |
| model_name = "MSey/_table_CaBERT_0003_gbert-base_fl32_checkpoint-15852" |
| return pipeline("token-classification", model=model_name), AutoTokenizer.from_pretrained(model_name) |
|
|
| pipe, tokenizer = load_pipe() |
|
|
| st.header("Test Environment for GBERT Ca Model") |
| user_input = st.text_input("Enter your Prompt here:", "") |
|
|
|
|
| if user_input: |
| with st.spinner('Generating response...'): |
| response = pipe(user_input) |
| st.write("Response:") |
| tuples = "" |
| |
| for entity in response: |
| label = entity['entity'] |
| word = entity["word"] |
| tuples += f"{word}\t{label}\n" |
|
|
| |
| st.text(tuples) |