File size: 2,220 Bytes
5d5df43
 
1d355ad
fabb74d
 
 
22032f3
 
fabb74d
 
79f7c7e
 
 
122580d
fabb74d
 
 
 
1855eaa
fabb74d
 
 
5d5df43
fabb74d
d623fff
 
 
cb8f0ba
d623fff
5d5df43
 
5803d11
 
 
 
fabb74d
39688bd
5d5df43
5803d11
fabb74d
5803d11
fabb74d
217a78d
fabb74d
 
5803d11
fabb74d
feb03cf
fabb74d
 
 
 
 
e39486f
 
940b028
5d5df43
 
940b028
5803d11
 
 
 
 
39688bd
03d0703
fabb74d
8683182
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import gradio as gr
import google.generativeai as genai
import os
import time

# Google Gemini API ํ‚ค ์„ค์ •
genai.configure(api_key=os.environ["GEMINI_API_KEY"])

# ๋ชจ๋ธ ์„ค์ •
generation_config = {
    "temperature": 1,
    "top_p": 0.8,
    "top_k": 64,
    "max_output_tokens": 8192,
    "response_mime_type": "text/plain",
}

model = genai.GenerativeModel(
    model_name="gemini-2.0-flash",
    generation_config=generation_config,
)

# ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ
SYSTEM_PROMPT = """
๋‹น์‹ ์€ ์ฐฝ์˜์ ์ธ ๋ฐœ๋ช…๊ฐ€์ด์ž ๋งˆ์ดํฌ๋กœ๋น„ํŠธ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. Python ์ฝ”๋”ฉ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค.
์‚ฌ์šฉ์ž๊ฐ€ ์„ ํƒํ•œ ๋งˆ์ดํฌ๋กœ๋น„ํŠธ ์„ผ์„œ ์กฐํ•ฉ์„ ๊ธฐ๋ฐ˜์œผ๋กœ, 
ํฅ๋ฏธ๋กญ๊ณ  ์‹ค์šฉ์ ์ธ ๋ฐœ๋ช…ํ’ˆ ์•„์ด๋””์–ด๋ฅผ 3๊ฐ€์ง€ ์ด์ƒ ์ œ์•ˆํ•ด์ฃผ์„ธ์š”. 
์ตœ๋Œ€ํ•œ ๋‹ค์–‘ํ•œ ๋ถ„์•ผ์˜ ์‚ฌ๋ก€๋ฅผ ์ œ๊ณตํ•ด์•ผ ํ•˜๋ฉฐ, ํŠน์ดํ•˜๊ณ  ์ฐฝ์˜์ ์ธ ์•„์ด๋””์–ด๋ฅผ ์šฐ์„ ์ ์œผ๋กœ ์ œ์‹œํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.
์ž…๋ ฅํ•  ๋•Œ๋งˆ๋‹ค ๋‹ค๋ฅธ ๋ฐœ๋ช…ํ’ˆ์„ ์ถœ๋ ฅํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.

**์‘๋‹ต ํ˜•์‹:**
1. **๋ฌธ์ œ ์ƒํ™ฉ ๋ถ„์„:** (์ž…๋ ฅ๋œ ๋ฌธ์ œ ์ƒํ™ฉ์— ๋Œ€ํ•œ ๋ถ„์„)
2. **ํ•ด๊ฒฐ ๋ฐฉ์•ˆ:** (๋งˆ์ดํฌ๋กœ๋น„ํŠธ๋ฅผ ์ด์šฉํ•œ ํ•ด๊ฒฐ ๋ฐฉ์•ˆ ์ œ์‹œ)
3. **ํ™œ์šฉ ์„ผ์„œ:** (ํ•„์š”ํ•œ ์„ผ์„œ ๋ชฉ๋ก)
4. **์ƒ์„ธ ์„ค๋ช…:** (ํ•ด๊ฒฐ ๋ฐฉ์•ˆ์— ๋Œ€ํ•œ ์ž์„ธํ•œ ์„ค๋ช…, ๋งˆ์ดํฌ๋กœ๋น„ํŠธ ๋™์ž‘ ๋ฐฉ์‹, ํ™œ์šฉ ์˜ˆ์‹œ ํฌํ•จ)
"""


def generate_solution(problem_situation):
    """
    ์ž…๋ ฅ๋œ ๋ฌธ์ œ ์ƒํ™ฉ์— ๋Œ€ํ•œ ๋งˆ์ดํฌ๋กœ๋น„ํŠธ ๊ธฐ๋ฐ˜ ํ•ด๊ฒฐ ๋ฐฉ์•ˆ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
    """

    prompt = [
        SYSTEM_PROMPT,
        f"**๋ฌธ์ œ ์ƒํ™ฉ:** {problem_situation}",
    ]

    response = model.generate_content(prompt, stream=True)
    collected_text = ""
    for token in response:
        chunk = token.text
        collected_text += chunk
        yield collected_text
        time.sleep(0.03)


# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
iface = gr.Interface(
    fn=generate_solution,
    inputs=gr.Textbox(lines=5, label="๋ฌธ์ œ ์ƒํ™ฉ"),
    outputs=gr.Textbox(lines=15, label="๋งˆ์ดํฌ๋กœ๋น„ํŠธ ํ•ด๊ฒฐ ๋ฐฉ์•ˆ"),
    title="๋งˆ์ดํฌ๋กœ๋น„ํŠธ ๋ฌธ์ œ ํ•ด๊ฒฐ ๋„์šฐ๋ฏธ",
    description="๋ฌธ์ œ ์ƒํ™ฉ์„ ์ž…๋ ฅํ•˜๋ฉด, ๋งˆ์ดํฌ๋กœ๋น„ํŠธ๋ฅผ ํ™œ์šฉํ•œ ํ•ด๊ฒฐ ๋ฐฉ์•ˆ์„ ์ œ์•ˆํ•ด์ค๋‹ˆ๋‹ค.",
)

# ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
iface.launch()