ll7098ll commited on
Commit
cd9df3f
ยท
verified ยท
1 Parent(s): 165679f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -62
app.py CHANGED
@@ -1,74 +1,54 @@
1
  import os
2
- import google.generativeai as genai
3
- import gradio as gr
4
  import time
5
- import asyncio # asyncio ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ž„ํฌํŠธ
6
 
7
- genai.configure(api_key=os.environ["GEMINI_API_KEY"])
 
8
 
9
- # ๋ชจ๋ธ ์ƒ์„ฑ ํ•จ์ˆ˜ (๋งค ์š”์ฒญ๋งˆ๋‹ค ์ƒˆ๋กœ์šด ๋ชจ๋ธ ์ƒ์„ฑ)
10
- def create_model():
11
- generation_config = {
12
  "temperature": 1,
13
  "top_p": 0.95,
14
  "top_k": 64,
15
  "max_output_tokens": 15000,
16
- }
17
-
18
- safety_settings = [
19
- {
20
- "category": "HARM_CATEGORY_HARASSMENT",
21
- "threshold": "BLOCK_NONE"
22
- },
23
- {
24
- "category": "HARM_CATEGORY_HATE_SPEECH",
25
- "threshold": "BLOCK_NONE"
26
- },
27
- {
28
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
29
- "threshold": "BLOCK_NONE"
30
- },
31
- {
32
- "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
33
- "threshold": "BLOCK_NONE"
34
- },
35
- ]
36
-
37
- return genai.GenerativeModel(model_name="gemini-1.5-pro",
38
- generation_config=generation_config,
39
- safety_settings=safety_settings)
40
-
41
- # ๋น„๋™๊ธฐ์ ์œผ๋กœ ๊ต์œก ๊ณผ์ • ์ƒ์„ฑ
42
- async def generate_curriculum(prompt):
43
- model = create_model() # ์ƒˆ๋กœ์šด ๋ชจ๋ธ ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
44
-
45
- prompt_parts = [
46
- "Concept-Based Curriculum Expert, ๊ฐœ๋…๊ธฐ๋ฐ˜๊ต์œก๊ณผ์ • ์„ค๊ณ„ ์ „๋ฌธ๊ฐ€. ์ดˆ๋“ฑํ•™๊ต ๊ต์œก๊ณผ์ • ๋ฒ”์œ„ ๋‚ด์—์„œ ๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ • ๋ฐ ์ˆ˜์—…์„ ์ฒด๊ณ„์ ์œผ๋กœ ์„ค๊ณ„.",
47
- "์„ฑ์ทจ๊ธฐ์ค€ ์„ฑ์ทจ ๊ธฐ์ค€ ๋ฐ ์ˆ˜์—… ๋ชฉํ‘œ",
48
- "๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ • 1. ํ•ต์‹ฌ ์•„์ด๋””์–ด(์ผ๋ฐ˜ํ™”๋œ ์ง€์‹)\n\n2. ๋งคํฌ๋กœ ๊ฐœ๋…(๊ฐœ๋…์  ๋ Œ์ฆˆ), ๋งˆ์ดํฌ๋กœ ๊ฐœ๋…(๊ต๊ณผ ๊ฐœ๋…)\n\n3. ์‚ฌ์‹ค์  ์ง€์‹, ์ฃผ์ œ, ๊ฐœ๋…, ์›๋ฆฌ ๋ฐ ์ผ๋ฐ˜ํ™”, ์ด๋ก \n\n4. ์ŠคํŠธ๋žœ๋“œ์™€ ์ฐจ์‹œ๋ณ„ ํ•™์Šตํ™œ๋™\n\n5. ํ‰๊ฐ€ ๋‚ด์šฉ ๋ฐ ๋ฐฉ๋ฒ•, ํ‰๊ฐ€ ๋ฃจ๋ธŒ๋ฆญ(์ƒ, ์ค‘, ํ•˜ ํ‰์–ด ์˜ˆ์‹œ ํฌํ•จ)\n\n6. ๊ฐœ๋…์  ์ดํ•ด๋ฅผ ์œ„ํ•œ ํŒ",
49
- prompt,
50
- "๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ • ",
51
- ]
52
-
53
- full_text = ""
54
- yield full_text # ์ดˆ๊ธฐ ๋นˆ ํ…์ŠคํŠธ ์ถœ๋ ฅ
55
-
56
- try:
57
- response = model.generate_content(prompt_parts, stream=True)
58
- for chunk in response:
59
- full_text += chunk.text
60
- yield full_text
61
- await asyncio.sleep(0.05) # asyncio.sleep() ์‚ฌ์šฉ
62
- except Exception as e:
63
- yield f"์—๋Ÿฌ ๋ฐœ์ƒ: {str(e)}"
64
 
65
- # ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ • (queue() ํ•จ์ˆ˜ ์ถ”๊ฐ€)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  iface = gr.Interface(
67
- fn=generate_curriculum,
68
- inputs=gr.Textbox(lines=1, label="์„ฑ์ทจ๊ธฐ์ค€ ์ž…๋ ฅ"),
69
- outputs=gr.Textbox(lines=20, label="๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ •"),
70
- title="๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ • ์ฑ—๋ด‡",
71
- description="์„ฑ์ทจ๊ธฐ์ค€์„ ์ž…๋ ฅํ•˜๋ฉด ๊ฐœ๋…๊ธฐ๋ฐ˜ ๊ต์œก๊ณผ์ •์„ ์„ค๊ณ„ํ•ฉ๋‹ˆ๋‹ค."
 
 
 
 
72
  )
73
 
74
- iface.queue().launch() # queue() ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋น„๋™๊ธฐ์ ์œผ๋กœ ์‹คํ–‰
 
 
1
  import os
 
 
2
  import time
 
3
 
4
+ import gradio as gr
5
+ import google.generativeai as genai
6
 
7
+ # Gemini API ์„ค์ •
8
+ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
9
+ generation_config = {
10
  "temperature": 1,
11
  "top_p": 0.95,
12
  "top_k": 64,
13
  "max_output_tokens": 15000,
14
+ "response_mime_type": "text/plain",
15
+ }
16
+ model = genai.GenerativeModel(
17
+ model_name="gemini-1.5-pro",
18
+ generation_config=generation_config,
19
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ def generate_response(input_text, progress=gr.Progress(track_time=True)):
22
+ """Gemini API๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ์‘๋‹ต์„ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜ (์‹ค์‹œ๊ฐ„ ์ถœ๋ ฅ)"""
23
+ prompt = "\n".join([
24
+ "Concept-based inquiry learning expert",
25
+ "ํ•™์Šต์ฃผ์ œ ํ•™์Šต์ฃผ์ œ",
26
+ "์ฃผ์ œ์— ๋Œ€ํ•œ ๊ฐœ๋…๊ธฐ๋ฐ˜ ํƒ๊ตฌํ•™์Šต ๊ตฌ์ฒด์  ์ˆ˜์—… ๊ณ„ํš 0. ํ•™์Šต์ฃผ์ œ์— ๋Œ€ํ•œ ๋งคํฌ๋กœ ๊ฐœ๋…(๊ฐœ๋…์  ๋ Œ์ฆˆ), ๋งˆ์ดํฌ๋กœ ๊ฐœ๋…(๊ต๊ณผ๊ฐœ๋…), ์ผ๋ฐ˜ํ™”(๊ฐœ๋…์  ์ดํ•ด)\n\n1. ๊ด€๊ณ„๋งบ๊ธฐ: ์ง€์  ์ •์„œ์  ์ˆ˜์—…์ฐธ์—ฌ, ์‚ฌ์ „์ง€์‹ ์œ ๋„\n\n2. ์ง‘์ค‘ํ•˜๊ธฐ: ๊ต๊ณผ๊ฐœ๋… ์กฐ์‚ฌ, ๊ด€๋ จ ์‚ฌ์‹ค์  ์˜ˆ ์ค‘ 1-2๊ฐœ ์†Œ๊ฐœํ•˜๊ธฐ\n\n3. ์กฐ์‚ฌํ•˜๊ธฐ: ๊ฐœ๋… ๊ด€๋ จ ์‚ฌ๋ก€๋“ค ์กฐ์‚ฌํ•˜๊ธฐ, ์‚ฌ๋ก€ ์ถ”๊ฐ€์ œ์‹œ๋กœ ๊ฐœ๋…์ดํ•ด ํ™•์žฅํ•˜๊ธฐ \n\n4. ์กฐ์ง ๋ฐ ์ •๋ฆฌํ•˜๊ธฐ: ์‚ฌ์‹ค์  ๊ฐœ๋…์  ์ˆ˜์ค€์—์„œ ์ƒ๊ฐ ๊ตฌ์„ฑํ•˜๊ธฐ, ๋‹ค๋ฅธ ์ž๋ฃŒ๋กœ, ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์œผ๋กœ, ๊ต๊ณผ๋กœ ๊ฐœ๋…๊ณผ ์ƒ๊ฐ ๋‚˜ํƒ€๋‚ด๊ธฐ\n\n5. ์ผ๋ฐ˜ํ™”: ์‚ฌ์‹ค์  ์˜ˆ์‹œ์—์„œ ํŒจํ„ด ๋ฐœ๊ฒฌํ•˜๊ณ , ์—ฐ๊ฒฐ์„ฑ ์ฐพ๊ธฐ, ์ผ๋ฐ˜ํ™” ๋ช…๋ฃŒํ™”ํ•˜๊ธฐ\n\n6.์ „์ด: ์ผ๋ฐ˜ํ™” ์œ ํšจ์„ฑ๊ฒ€์ฆํ•˜๊ณ  ์ •๋‹นํ™”ํ•˜๊ธฐ, ์ƒˆ๋กœ์šด ์ƒํ™ฉ์— ์ ์šฉ, ํ•™์Šต์— ๋Œ€ํ•ด ์˜๋ฏธ์žˆ๋Š” ํ–‰๋™ ์ทจํ•˜๊ธฐ, ์˜ˆ์ธก, ๊ฐ€์ •์„ ํ˜•์„ฑํ•˜๊ธฐ ์œ„ํ•ด ๊ฒฝํ—˜๊ณผ ์ดํ•ด ํ™œ์šฉํ•˜๊ธฐ\n\n7. ์„ฑ์ฐฐํ•˜๊ธฐ: ํ•™์Šต์ฃผ์ฒด์ž„์„ ์ธ์‹ํ•˜๊ธฐ, ์ž์‹  ํ•™์Šต๊ณผ์ • ๊ณ„ํšํ•˜๊ณ  ํ†ต์ œํ•˜๊ธฐ, ๊ณผ์ • ํ‰๊ฐ€ํ•˜๊ธฐ\n\n8. ํ‰๊ฐ€ ๊ณ„ํš ๋ฐ ์ˆ˜ํ–‰๊ณผ์ œ(GRASPS), ํ‰๊ฐ€ ๋ฃจ๋ธŒ๋ฆญ(์ƒ, ์ค‘, ํ•˜ ํ‰์–ด)\n\n9. ํ•™์ƒ์˜ ํƒ๊ตฌ๋ฅผ ์œ„ํ•œ ํŒ",
27
+ "ํ•™์Šต์ฃผ์ œ ๋น›์˜ ์ง์ง„",
28
+ "์ฃผ์ œ์— ๋Œ€ํ•œ ๊ฐœ๋…๊ธฐ๋ฐ˜ ํƒ๊ตฌํ•™์Šต ๊ตฌ์ฒด์  ์ˆ˜์—… ๊ณ„ํš ",
29
+ input_text
30
+ ])
31
+
32
+ response = ""
33
+ for chunk in model.generate_content([prompt], stream=True):
34
+ if chunk.text:
35
+ response += chunk.text
36
+ # ์•ฝ๊ฐ„์˜ ์ง€์—ฐ์„ ์ถ”๊ฐ€ํ•˜์—ฌ ํƒ€์ดํ•‘ ํšจ๊ณผ๋ฅผ ์—ฐ์ถœํ•ฉ๋‹ˆ๋‹ค.
37
+ time.sleep(0.05)
38
+ yield response
39
+
40
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์„ค์ •
41
  iface = gr.Interface(
42
+ fn=generate_response,
43
+ inputs="text",
44
+ outputs="text",
45
+ title="๊ฐœ๋… ๊ธฐ๋ฐ˜ ํƒ๊ตฌ ํ•™์Šต ์ฑ—๋ด‡",
46
+ description="ํ•™์Šต ์ฃผ์ œ์— ๋Œ€ํ•œ ๊ฐœ๋… ๊ธฐ๋ฐ˜ ํƒ๊ตฌ ํ•™์Šต ์ˆ˜์—… ๊ณ„ํš์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. (์‹ค์‹œ๊ฐ„ ์ถœ๏ฟฝ๏ฟฝ๏ฟฝ)",
47
+ examples=[
48
+ ["์•ˆ๋…•ํ•˜์„ธ์š”!"],
49
+ ["๋น›์˜ ์ง์ง„์— ๋Œ€ํ•œ ์ˆ˜์—… ๊ณ„ํš์„ ์„ธ์šฐ๊ณ  ์‹ถ์–ด์š”."],
50
+ ],
51
  )
52
 
53
+ # ์ฑ—๋ด‡ ์‹คํ–‰
54
+ iface.launch()