jonghhhh's picture
Update app.py
e0e0dce verified
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
# ๋ชจ๋ธ ๋ฐ ํ† ํฌ๋‚˜์ด์ € ๋กœ๋“œ
model_directory = "." # ํ˜„์žฌ ์ž‘์—… ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ ๋กœ๋“œ
model = AutoModelForSequenceClassification.from_pretrained(model_directory)
tokenizer = AutoTokenizer.from_pretrained(model_directory)
# model = AutoModelForSequenceClassification.from_pretrained("pytorch_model_ethics8multilable_acc8997.bin")
# tokenizer = AutoTokenizer.from_pretrained("tokenizer.json")
# ์ถ”๋ก  ํŒŒ์ดํ”„๋ผ์ธ ์„ค์ •
inference_pipeline = pipeline(
"text-classification",
model=model,
tokenizer=tokenizer,
return_all_scores=True,
)
# Streamlit UI ๊ตฌ์„ฑ
st.title("ํ…์ŠคํŠธ ์œค๋ฆฌ์„ฑ ๋ถ„์„")
st.write(''' ์•„๋ž˜์— ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด 8๊ฐ€์ง€ ๊ธฐ์ค€์— ๋”ฐ๋ผ ์œค๋ฆฌ์  ๋ฌธ์ œ๋ฅผ ํƒ์ง€ํ•ด ์ค๋‹ˆ๋‹ค(ํ…์ŠคํŠธ๋Š” 100์ž ์ด๋‚ด ๊ถŒ์žฅ)''')
st.write('๊ธฐ์ค€: IMMORAL_NONE(๋ฌธ์ œ์—†์Œ), CRIME(๋ฒ”์ฃ„), SEXUAL(์„ ์ •), HATE(ํ˜์˜ค), DISCRIMINATION(์ฐจ๋ณ„), CENSURE(๋น„๋‚œ), ABUSE(์š•์„ค), VIOLENCE(ํญ๋ ฅ)')
# ์˜ˆ์‹œ ๋ฌธ์žฅ ์ž…๋ ฅ
example_sentence = st.text_area("ํ…์ŠคํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”", value="")
if st.button("๋ถ„์„"):
# ์ถ”๋ก  ์ˆ˜ํ–‰
results = inference_pipeline(example_sentence)
# ๊ฒฐ๊ณผ ํ•ด์„ (0.5๋ฅผ ๊ธฐ์ค€์œผ๋กœ ๋ผ๋ฒจ ์˜ˆ์ธก)
id2label = {
0: 'IMMORAL_NONE(๋ฌธ์ œ์—†์Œ)', 1: 'CRIME(๋ฒ”์ฃ„)', 2: 'SEXUAL(์„ ์ •)', 3: 'HATE(ํ˜์˜ค)',
4: 'DISCRIMINATION(์ฐจ๋ณ„)', 5: 'CENSURE(๋น„๋‚œ)', 6: 'ABUSE(์š•์„ค)', 7: 'VIOLENCE(ํญ๋ ฅ)'
}
st.write("๋ถ„์„ ๊ฒฐ๊ณผ:")
for result in results[0]:
label = id2label[int(result['label'].split('_')[-1])]
score = result['score']
prediction = 1 if score > 0.5 else 0
st.write(f"{label}: {'YES' if prediction else 'NO'} (ํ™•๋ฅ : {score:.4f})")