File size: 4,454 Bytes
4acc3bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
"""
OpenAI Embedding ๊ธฐ๋ฐ˜ ๋…์ฐฝ์„ฑ ์ธก์ • (Gradio GUI)

์‚ฌ์šฉ๋ฒ•:
    pip install gradio openai numpy nltk
    python OpenAI_Originality_GUI.py
"""

import numpy as np
import gradio as gr
from openai import OpenAI
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk

# NLTK ๋ฐ์ดํ„ฐ ๋‹ค์šด๋กœ๋“œ
try:
    nltk.data.find('tokenizers/punkt_tab')
except LookupError:
    nltk.download('punkt_tab')


def cosine_distance(v1, v2):
    dot = np.dot(v1, v2)
    norm = np.linalg.norm(v1) * np.linalg.norm(v2)
    similarity = dot / norm if norm > 0 else 0
    return 1 - similarity


def get_embeddings(client, texts, model="text-embedding-3-large"):
    response = client.embeddings.create(input=texts, model=model)
    return [item.embedding for item in response.data]


def calculate_sem_div(client, text):
    sentences = sent_tokenize(text)
    if len(sentences) < 2:
        return 0.0, sentences
    embeddings = get_embeddings(client, sentences)
    distances = []
    for i in range(len(sentences)):
        for j in range(i):
            dist = cosine_distance(embeddings[i], embeddings[j])
            distances.append(dist)
    return np.mean(distances), sentences


def calculate_lex_div(text):
    tokens = word_tokenize(text.lower())
    tokens = [t for t in tokens if t.isalpha()]
    if len(tokens) == 0:
        return 0.0, 0, 0
    unique_tokens = set(tokens)
    return len(unique_tokens) / len(tokens), len(unique_tokens), len(tokens)


def analyze_originality(api_key, passage_a, passage_b):
    if not api_key.strip():
        return "Error: OpenAI API ํ‚ค๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”."
    if not passage_a.strip() or not passage_b.strip():
        return "Error: ๋‘ ๋‹จ๋ฝ ๋ชจ๋‘ ์ž…๋ ฅํ•˜์„ธ์š”."

    try:
        client = OpenAI(api_key=api_key.strip())

        # Passage A ๋ถ„์„
        sem_div_a, sentences_a = calculate_sem_div(client, passage_a)
        lex_div_a, unique_a, total_a = calculate_lex_div(passage_a)
        score_a = 0.50 * sem_div_a + 0.50 * lex_div_a

        # Passage B ๋ถ„์„
        sem_div_b, sentences_b = calculate_sem_div(client, passage_b)
        lex_div_b, unique_b, total_b = calculate_lex_div(passage_b)
        score_b = 0.50 * sem_div_b + 0.50 * lex_div_b

        # ์ฐจ์ด ๊ณ„์‚ฐ
        diff = score_a - score_b
        lower_score = min(score_a, score_b)
        diff_percent = (abs(diff) / lower_score) * 100 if lower_score > 0 else 0

        # ํŒ์ •
        if diff_percent < 5:
            judgment = "๋น„์Šทํ•จ"
        elif diff_percent < 10:
            judgment = "์ฐจ์ด ์žˆ์Œ"
        elif diff_percent < 15:
            judgment = "์œ ์˜๋ฏธํ•œ ์ฐจ์ด"
        else:
            judgment = "ํ™•์‹คํ•œ ์ฐจ์ด"

        # ๊ฒฐ๊ณผ ํ…์ŠคํŠธ
        result = f"""
{'='*50}
                   ๋ถ„์„ ๊ฒฐ๊ณผ
{'='*50}

{'ํ•ญ๋ชฉ':<15} {'Passage A':>15} {'Passage B':>15}
{'-'*50}
{'๋ฌธ์žฅ ์ˆ˜':<15} {len(sentences_a):>15} {len(sentences_b):>15}
{'๊ณ ์œ  ๋‹จ์–ด':<15} {unique_a:>15} {unique_b:>15}
{'์ „์ฒด ๋‹จ์–ด':<15} {total_a:>15} {total_b:>15}
{'-'*50}
{'sem_div':<15} {sem_div_a:>15.4f} {sem_div_b:>15.4f}
{'lex_div':<15} {lex_div_a:>15.4f} {lex_div_b:>15.4f}
{'๋…์ฐฝ์„ฑ ์ ์ˆ˜':<15} {score_a:>15.4f} {score_b:>15.4f}

{'='*50}
                   ์ฐจ์ด ๋น„์œจ
{'='*50}
์ ์ˆ˜ ์ฐจ์ด: {abs(diff):.4f}
์ฐจ์ด ๋น„์œจ: {diff_percent:.1f}%
ํŒ์ •: {judgment}

{'='*50}
                   ์ตœ์ข… ํŒ์ •
{'='*50}
"""
        if diff_percent < 5:
            result += f"๋‘ ํ…์ŠคํŠธ์˜ ๋…์ฐฝ์„ฑ์€ ๋น„์Šทํ•จ (์ฐจ์ด {diff_percent:.1f}%)"
        elif diff > 0:
            result += f"Passage A๊ฐ€ ๋” ๋…์ฐฝ์  (์ฐจ์ด {diff_percent:.1f}%, {judgment})"
        else:
            result += f"Passage B๊ฐ€ ๋” ๋…์ฐฝ์  (์ฐจ์ด {diff_percent:.1f}%, {judgment})"

        return result

    except Exception as e:
        return f"Error: {str(e)}"


# Gradio ์ธํ„ฐํŽ˜์ด์Šค
demo = gr.Interface(
    fn=analyze_originality,
    inputs=[
        gr.Textbox(label="OpenAI API Key", type="password", placeholder="sk-..."),
        gr.Textbox(label="Passage A", lines=8, placeholder="์ฒซ ๋ฒˆ์งธ ๋‹จ๋ฝ ์ž…๋ ฅ..."),
        gr.Textbox(label="Passage B", lines=8, placeholder="๋‘ ๋ฒˆ์งธ ๋‹จ๋ฝ ์ž…๋ ฅ...")
    ],
    outputs=gr.Textbox(label="๋ถ„์„ ๊ฒฐ๊ณผ", lines=25),
    title="OpenAI Embedding ๋…์ฐฝ์„ฑ ๋ถ„์„",
    description="๋‘ ๋‹จ๋ฝ์˜ ๋…์ฐฝ์„ฑ์„ ๋น„๊ตํ•ฉ๋‹ˆ๋‹ค. (sem_div 50% + lex_div 50%)",
    flagging_mode="never"
)

if __name__ == "__main__":
    demo.launch()