Spaces:
Runtime error
Runtime error
File size: 1,851 Bytes
6950142 27a08ca 6950142 e0e0dce 6950142 f94793f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
# ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋
model_directory = "." # ํ์ฌ ์์
๋๋ ํ ๋ฆฌ๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ๋ก๋
model = AutoModelForSequenceClassification.from_pretrained(model_directory)
tokenizer = AutoTokenizer.from_pretrained(model_directory)
# model = AutoModelForSequenceClassification.from_pretrained("pytorch_model_ethics8multilable_acc8997.bin")
# tokenizer = AutoTokenizer.from_pretrained("tokenizer.json")
# ์ถ๋ก ํ์ดํ๋ผ์ธ ์ค์
inference_pipeline = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
return_all_scores=True,
)
# Streamlit UI ๊ตฌ์ฑ
st.title("ํ
์คํธ ์ค๋ฆฌ์ฑ ๋ถ์")
st.write(''' ์๋์ ํ
์คํธ๋ฅผ ์
๋ ฅํ๋ฉด 8๊ฐ์ง ๊ธฐ์ค์ ๋ฐ๋ผ ์ค๋ฆฌ์ ๋ฌธ์ ๋ฅผ ํ์งํด ์ค๋๋ค(ํ
์คํธ๋ 100์ ์ด๋ด ๊ถ์ฅ)''')
st.write('๊ธฐ์ค: IMMORAL_NONE(๋ฌธ์ ์์), CRIME(๋ฒ์ฃ), SEXUAL(์ ์ ), HATE(ํ์ค), DISCRIMINATION(์ฐจ๋ณ), CENSURE(๋น๋), ABUSE(์์ค), VIOLENCE(ํญ๋ ฅ)')
# ์์ ๋ฌธ์ฅ ์
๋ ฅ
example_sentence = st.text_area("ํ
์คํธ๋ฅผ ์
๋ ฅํ์ธ์", value="")
if st.button("๋ถ์"):
# ์ถ๋ก ์ํ
results = inference_pipeline(example_sentence)
# ๊ฒฐ๊ณผ ํด์ (0.5๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ผ๋ฒจ ์์ธก)
id2label = {
0: 'IMMORAL_NONE(๋ฌธ์ ์์)', 1: 'CRIME(๋ฒ์ฃ)', 2: 'SEXUAL(์ ์ )', 3: 'HATE(ํ์ค)',
4: 'DISCRIMINATION(์ฐจ๋ณ)', 5: 'CENSURE(๋น๋)', 6: 'ABUSE(์์ค)', 7: 'VIOLENCE(ํญ๋ ฅ)'
}
st.write("๋ถ์ ๊ฒฐ๊ณผ:")
for result in results[0]:
label = id2label[int(result['label'].split('_')[-1])]
score = result['score']
prediction = 1 if score > 0.5 else 0
st.write(f"{label}: {'YES' if prediction else 'NO'} (ํ๋ฅ : {score:.4f})")
|