Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| # ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋ | |
| model_directory = "." # ํ์ฌ ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ๋ก๋ | |
| model = AutoModelForSequenceClassification.from_pretrained(model_directory) | |
| tokenizer = AutoTokenizer.from_pretrained(model_directory) | |
| # model = AutoModelForSequenceClassification.from_pretrained("pytorch_model_ethics8multilable_acc8997.bin") | |
| # tokenizer = AutoTokenizer.from_pretrained("tokenizer.json") | |
| # ์ถ๋ก ํ์ดํ๋ผ์ธ ์ค์ | |
| inference_pipeline = pipeline( | |
| "text-classification", | |
| model=model, | |
| tokenizer=tokenizer, | |
| return_all_scores=True, | |
| ) | |
| # Streamlit UI ๊ตฌ์ฑ | |
| st.title("ํ ์คํธ ์ค๋ฆฌ์ฑ ๋ถ์") | |
| st.write(''' ์๋์ ํ ์คํธ๋ฅผ ์ ๋ ฅํ๋ฉด 8๊ฐ์ง ๊ธฐ์ค์ ๋ฐ๋ผ ์ค๋ฆฌ์ ๋ฌธ์ ๋ฅผ ํ์งํด ์ค๋๋ค(ํ ์คํธ๋ 100์ ์ด๋ด ๊ถ์ฅ)''') | |
| st.write('๊ธฐ์ค: IMMORAL_NONE(๋ฌธ์ ์์), CRIME(๋ฒ์ฃ), SEXUAL(์ ์ ), HATE(ํ์ค), DISCRIMINATION(์ฐจ๋ณ), CENSURE(๋น๋), ABUSE(์์ค), VIOLENCE(ํญ๋ ฅ)') | |
| # ์์ ๋ฌธ์ฅ ์ ๋ ฅ | |
| example_sentence = st.text_area("ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์", value="") | |
| if st.button("๋ถ์"): | |
| # ์ถ๋ก ์ํ | |
| results = inference_pipeline(example_sentence) | |
| # ๊ฒฐ๊ณผ ํด์ (0.5๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ผ๋ฒจ ์์ธก) | |
| id2label = { | |
| 0: 'IMMORAL_NONE(๋ฌธ์ ์์)', 1: 'CRIME(๋ฒ์ฃ)', 2: 'SEXUAL(์ ์ )', 3: 'HATE(ํ์ค)', | |
| 4: 'DISCRIMINATION(์ฐจ๋ณ)', 5: 'CENSURE(๋น๋)', 6: 'ABUSE(์์ค)', 7: 'VIOLENCE(ํญ๋ ฅ)' | |
| } | |
| st.write("๋ถ์ ๊ฒฐ๊ณผ:") | |
| for result in results[0]: | |
| label = id2label[int(result['label'].split('_')[-1])] | |
| score = result['score'] | |
| prediction = 1 if score > 0.5 else 0 | |
| st.write(f"{label}: {'YES' if prediction else 'NO'} (ํ๋ฅ : {score:.4f})") | |