Update app.py
Browse files
app.py
CHANGED
|
@@ -1,74 +1,54 @@
|
|
| 1 |
import os
|
| 2 |
-
import google.generativeai as genai
|
| 3 |
-
import gradio as gr
|
| 4 |
import time
|
| 5 |
-
import asyncio # asyncio ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ํฌํธ
|
| 6 |
|
| 7 |
-
|
|
|
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
"temperature": 1,
|
| 13 |
"top_p": 0.95,
|
| 14 |
"top_k": 64,
|
| 15 |
"max_output_tokens": 15000,
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
},
|
| 23 |
-
{
|
| 24 |
-
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 25 |
-
"threshold": "BLOCK_NONE"
|
| 26 |
-
},
|
| 27 |
-
{
|
| 28 |
-
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 29 |
-
"threshold": "BLOCK_NONE"
|
| 30 |
-
},
|
| 31 |
-
{
|
| 32 |
-
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 33 |
-
"threshold": "BLOCK_NONE"
|
| 34 |
-
},
|
| 35 |
-
]
|
| 36 |
-
|
| 37 |
-
return genai.GenerativeModel(model_name="gemini-1.5-pro",
|
| 38 |
-
generation_config=generation_config,
|
| 39 |
-
safety_settings=safety_settings)
|
| 40 |
-
|
| 41 |
-
# ๋น๋๊ธฐ์ ์ผ๋ก ๊ต์ก ๊ณผ์ ์์ฑ
|
| 42 |
-
async def generate_curriculum(prompt):
|
| 43 |
-
model = create_model() # ์๋ก์ด ๋ชจ๋ธ ์ธ์คํด์ค ์์ฑ
|
| 44 |
-
|
| 45 |
-
prompt_parts = [
|
| 46 |
-
"Concept-Based Curriculum Expert, ๊ฐ๋
๊ธฐ๋ฐ๊ต์ก๊ณผ์ ์ค๊ณ ์ ๋ฌธ๊ฐ. ์ด๋ฑํ๊ต ๊ต์ก๊ณผ์ ๋ฒ์ ๋ด์์ ๊ฐ๋
๊ธฐ๋ฐ ๊ต์ก๊ณผ์ ๋ฐ ์์
์ ์ฒด๊ณ์ ์ผ๋ก ์ค๊ณ.",
|
| 47 |
-
"์ฑ์ทจ๊ธฐ์ค ์ฑ์ทจ ๊ธฐ์ค ๋ฐ ์์
๋ชฉํ",
|
| 48 |
-
"๊ฐ๋
๊ธฐ๋ฐ ๊ต์ก๊ณผ์ 1. ํต์ฌ ์์ด๋์ด(์ผ๋ฐํ๋ ์ง์)\n\n2. ๋งคํฌ๋ก ๊ฐ๋
(๊ฐ๋
์ ๋ ์ฆ), ๋ง์ดํฌ๋ก ๊ฐ๋
(๊ต๊ณผ ๊ฐ๋
)\n\n3. ์ฌ์ค์ ์ง์, ์ฃผ์ , ๊ฐ๋
, ์๋ฆฌ ๋ฐ ์ผ๋ฐํ, ์ด๋ก \n\n4. ์คํธ๋๋์ ์ฐจ์๋ณ ํ์ตํ๋\n\n5. ํ๊ฐ ๋ด์ฉ ๋ฐ ๋ฐฉ๋ฒ, ํ๊ฐ ๋ฃจ๋ธ๋ฆญ(์, ์ค, ํ ํ์ด ์์ ํฌํจ)\n\n6. ๊ฐ๋
์ ์ดํด๋ฅผ ์ํ ํ",
|
| 49 |
-
prompt,
|
| 50 |
-
"๊ฐ๋
๊ธฐ๋ฐ ๊ต์ก๊ณผ์ ",
|
| 51 |
-
]
|
| 52 |
-
|
| 53 |
-
full_text = ""
|
| 54 |
-
yield full_text # ์ด๊ธฐ ๋น ํ
์คํธ ์ถ๋ ฅ
|
| 55 |
-
|
| 56 |
-
try:
|
| 57 |
-
response = model.generate_content(prompt_parts, stream=True)
|
| 58 |
-
for chunk in response:
|
| 59 |
-
full_text += chunk.text
|
| 60 |
-
yield full_text
|
| 61 |
-
await asyncio.sleep(0.05) # asyncio.sleep() ์ฌ์ฉ
|
| 62 |
-
except Exception as e:
|
| 63 |
-
yield f"์๋ฌ ๋ฐ์: {str(e)}"
|
| 64 |
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
iface = gr.Interface(
|
| 67 |
-
fn=
|
| 68 |
-
inputs=
|
| 69 |
-
outputs=
|
| 70 |
-
title="
|
| 71 |
-
description="
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
-
|
|
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import time
|
|
|
|
| 3 |
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
|
| 7 |
+
# Gemini API ์ค์
|
| 8 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
| 9 |
+
generation_config = {
|
| 10 |
"temperature": 1,
|
| 11 |
"top_p": 0.95,
|
| 12 |
"top_k": 64,
|
| 13 |
"max_output_tokens": 15000,
|
| 14 |
+
"response_mime_type": "text/plain",
|
| 15 |
+
}
|
| 16 |
+
model = genai.GenerativeModel(
|
| 17 |
+
model_name="gemini-1.5-pro",
|
| 18 |
+
generation_config=generation_config,
|
| 19 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
def generate_response(input_text, progress=gr.Progress(track_time=True)):
|
| 22 |
+
"""Gemini API๋ฅผ ํธ์ถํ์ฌ ์๋ต์ ์์ฑํ๋ ํจ์ (์ค์๊ฐ ์ถ๋ ฅ)"""
|
| 23 |
+
prompt = "\n".join([
|
| 24 |
+
"Concept-based inquiry learning expert",
|
| 25 |
+
"ํ์ต์ฃผ์ ํ์ต์ฃผ์ ",
|
| 26 |
+
"์ฃผ์ ์ ๋ํ ๊ฐ๋
๊ธฐ๋ฐ ํ๊ตฌํ์ต ๊ตฌ์ฒด์ ์์
๊ณํ 0. ํ์ต์ฃผ์ ์ ๋ํ ๋งคํฌ๋ก ๊ฐ๋
(๊ฐ๋
์ ๋ ์ฆ), ๋ง์ดํฌ๋ก ๊ฐ๋
(๊ต๊ณผ๊ฐ๋
), ์ผ๋ฐํ(๊ฐ๋
์ ์ดํด)\n\n1. ๊ด๊ณ๋งบ๊ธฐ: ์ง์ ์ ์์ ์์
์ฐธ์ฌ, ์ฌ์ ์ง์ ์ ๋\n\n2. ์ง์คํ๊ธฐ: ๊ต๊ณผ๊ฐ๋
์กฐ์ฌ, ๊ด๋ จ ์ฌ์ค์ ์ ์ค 1-2๊ฐ ์๊ฐํ๊ธฐ\n\n3. ์กฐ์ฌํ๊ธฐ: ๊ฐ๋
๊ด๋ จ ์ฌ๋ก๋ค ์กฐ์ฌํ๊ธฐ, ์ฌ๋ก ์ถ๊ฐ์ ์๋ก ๊ฐ๋
์ดํด ํ์ฅํ๊ธฐ \n\n4. ์กฐ์ง ๋ฐ ์ ๋ฆฌํ๊ธฐ: ์ฌ์ค์ ๊ฐ๋
์ ์์ค์์ ์๊ฐ ๊ตฌ์ฑํ๊ธฐ, ๋ค๋ฅธ ์๋ฃ๋ก, ๋ค๋ฅธ ๋ฐฉ๋ฒ์ผ๋ก, ๊ต๊ณผ๋ก ๊ฐ๋
๊ณผ ์๊ฐ ๋ํ๋ด๊ธฐ\n\n5. ์ผ๋ฐํ: ์ฌ์ค์ ์์์์ ํจํด ๋ฐ๊ฒฌํ๊ณ , ์ฐ๊ฒฐ์ฑ ์ฐพ๊ธฐ, ์ผ๋ฐํ ๋ช
๋ฃํํ๊ธฐ\n\n6.์ ์ด: ์ผ๋ฐํ ์ ํจ์ฑ๊ฒ์ฆํ๊ณ ์ ๋นํํ๊ธฐ, ์๋ก์ด ์ํฉ์ ์ ์ฉ, ํ์ต์ ๋ํด ์๋ฏธ์๋ ํ๋ ์ทจํ๊ธฐ, ์์ธก, ๊ฐ์ ์ ํ์ฑํ๊ธฐ ์ํด ๊ฒฝํ๊ณผ ์ดํด ํ์ฉํ๊ธฐ\n\n7. ์ฑ์ฐฐํ๊ธฐ: ํ์ต์ฃผ์ฒด์์ ์ธ์ํ๊ธฐ, ์์ ํ์ต๊ณผ์ ๊ณํํ๊ณ ํต์ ํ๊ธฐ, ๊ณผ์ ํ๊ฐํ๊ธฐ\n\n8. ํ๊ฐ ๊ณํ ๋ฐ ์ํ๊ณผ์ (GRASPS), ํ๊ฐ ๋ฃจ๋ธ๋ฆญ(์, ์ค, ํ ํ์ด)\n\n9. ํ์์ ํ๊ตฌ๋ฅผ ์ํ ํ",
|
| 27 |
+
"ํ์ต์ฃผ์ ๋น์ ์ง์ง",
|
| 28 |
+
"์ฃผ์ ์ ๋ํ ๊ฐ๋
๊ธฐ๋ฐ ํ๊ตฌํ์ต ๊ตฌ์ฒด์ ์์
๊ณํ ",
|
| 29 |
+
input_text
|
| 30 |
+
])
|
| 31 |
+
|
| 32 |
+
response = ""
|
| 33 |
+
for chunk in model.generate_content([prompt], stream=True):
|
| 34 |
+
if chunk.text:
|
| 35 |
+
response += chunk.text
|
| 36 |
+
# ์ฝ๊ฐ์ ์ง์ฐ์ ์ถ๊ฐํ์ฌ ํ์ดํ ํจ๊ณผ๋ฅผ ์ฐ์ถํฉ๋๋ค.
|
| 37 |
+
time.sleep(0.05)
|
| 38 |
+
yield response
|
| 39 |
+
|
| 40 |
+
# Gradio ์ธํฐํ์ด์ค ์ค์
|
| 41 |
iface = gr.Interface(
|
| 42 |
+
fn=generate_response,
|
| 43 |
+
inputs="text",
|
| 44 |
+
outputs="text",
|
| 45 |
+
title="๊ฐ๋
๊ธฐ๋ฐ ํ๊ตฌ ํ์ต ์ฑ๋ด",
|
| 46 |
+
description="ํ์ต ์ฃผ์ ์ ๋ํ ๊ฐ๋
๊ธฐ๋ฐ ํ๊ตฌ ํ์ต ์์
๊ณํ์ ์์ฑํฉ๋๋ค. (์ค์๊ฐ ์ถ๏ฟฝ๏ฟฝ๏ฟฝ)",
|
| 47 |
+
examples=[
|
| 48 |
+
["์๋
ํ์ธ์!"],
|
| 49 |
+
["๋น์ ์ง์ง์ ๋ํ ์์
๊ณํ์ ์ธ์ฐ๊ณ ์ถ์ด์."],
|
| 50 |
+
],
|
| 51 |
)
|
| 52 |
|
| 53 |
+
# ์ฑ๋ด ์คํ
|
| 54 |
+
iface.launch()
|