CSB261 commited on
Commit
b4cda3d
·
verified ·
1 Parent(s): 225b5d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -572
app.py CHANGED
@@ -1,581 +1,44 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import openai
4
- import anthropic
5
- from typing import Optional
6
 
7
- #############################
8
- # [기본코드] - 수정/삭제 불가
9
- #############################
10
 
11
- # 제거할 모델들을 MODELS 사전에서 제외
12
- MODELS = {
13
- "Zephyr 7B Beta": "HuggingFaceH4/zephyr-7b-beta",
14
- "Meta Llama 3.1 8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
15
- "Meta-Llama 3.1 70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
16
- "Microsoft": "microsoft/Phi-3-mini-4k-instruct",
17
- "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
18
- "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
19
- "Aya-23-35B": "CohereForAI/aya-23-35B"}
20
 
21
- # Cohere Command R+ 모델 ID 정의
22
- COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
23
 
24
- def get_client(model_name, hf_token):
25
- """
26
- 모델 이름에 맞춰 InferenceClient 생성.
27
- hf_token을 UI에서 입력받은 값으로 사용하도록 변경.
28
- """
29
- if not hf_token:
30
- raise ValueError("HuggingFace API 토큰이 필요합니다.")
31
 
32
- if model_name in MODELS:
33
- model_id = MODELS[model_name]
34
- elif model_name == "Cohere Command R+":
35
- model_id = COHERE_MODEL
36
- else:
37
- raise ValueError("유효하지 않은 모델 이름입니다.")
38
- return InferenceClient(model_id, token=hf_token)
39
 
 
 
40
 
41
- def respond_hf_qna(
42
- question: str,
43
- model_name: str,
44
- max_tokens: int,
45
- temperature: float,
46
- top_p: float,
47
- system_message: str,
48
- hf_token: str
49
- ):
50
- """
51
- HuggingFace 모델(Zephyr 등)에 대해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
52
- """
53
- try:
54
- client = get_client(model_name, hf_token)
55
- except ValueError as e:
56
- return f"오류: {str(e)}"
57
-
58
- messages = [
59
- {"role": "system", "content": system_message},
60
- {"role": "user", "content": question}
61
- ]
62
-
63
- try:
64
- response = client.chat_completion(
65
- messages,
66
- max_tokens=max_tokens,
67
- temperature=temperature,
68
- top_p=top_p,
69
- stream=False,
70
- )
71
- assistant_message = response.choices[0].message.content
72
- return assistant_message
73
-
74
- except Exception as e:
75
- return f"오류가 발생했습니다: {str(e)}"
76
-
77
-
78
- def respond_cohere_qna(
79
- question: str,
80
- system_message: str,
81
- max_tokens: int,
82
- temperature: float,
83
- top_p: float,
84
- hf_token: str
85
- ):
86
- """
87
- Cohere Command R+ 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
88
- """
89
- model_name = "Cohere Command R+"
90
- try:
91
- client = get_client(model_name, hf_token)
92
- except ValueError as e:
93
- return f"오류: {str(e)}"
94
-
95
- messages = [
96
- {"role": "system", "content": system_message},
97
- {"role": "user", "content": question}
98
- ]
99
-
100
- try:
101
- response_full = client.chat_completion(
102
- messages,
103
- max_tokens=max_tokens,
104
- temperature=temperature,
105
- top_p=top_p,
106
- )
107
- assistant_message = response_full.choices[0].message.content
108
- return assistant_message
109
- except Exception as e:
110
- return f"오류가 발생했습니다: {str(e)}"
111
-
112
-
113
- def respond_chatgpt_qna(
114
- question: str,
115
- system_message: str,
116
- max_tokens: int,
117
- temperature: float,
118
- top_p: float,
119
- openai_token: str
120
- ):
121
- """
122
- ChatGPT(OpenAI) 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
123
- """
124
- if not openai_token:
125
- return "OpenAI API 토큰이 필요합니다."
126
-
127
- openai.api_key = openai_token
128
-
129
- messages = [
130
- {"role": "system", "content": system_message},
131
- {"role": "user", "content": question}
132
- ]
133
-
134
- try:
135
- response = openai.ChatCompletion.create(
136
- model="gpt-4o-mini", # 필요한 경우 변경
137
- messages=messages,
138
- max_tokens=max_tokens,
139
- temperature=temperature,
140
- top_p=top_p,
141
- )
142
- assistant_message = response.choices[0].message['content']
143
- return assistant_message
144
- except Exception as e:
145
- return f"오류가 발생했습니다: {str(e)}"
146
-
147
-
148
- def respond_deepseek_qna(
149
- question: str,
150
- system_message: str,
151
- max_tokens: int,
152
- temperature: float,
153
- top_p: float,
154
- deepseek_token: str
155
- ):
156
- """
157
- DeepSeek 모델을 이용해 한 번의 질문(question)에 대한 답변을 반환하는 함수.
158
- """
159
- if not deepseek_token:
160
- return "DeepSeek API 토큰이 필요합니다."
161
-
162
- openai.api_key = deepseek_token
163
- openai.api_base = "https://api.deepseek.com/v1"
164
-
165
- messages = [
166
- {"role": "system", "content": system_message},
167
- {"role": "user", "content": question}
168
- ]
169
-
170
- try:
171
- response = openai.ChatCompletion.create(
172
- model="deepseek-chat",
173
- messages=messages,
174
- max_tokens=max_tokens,
175
- temperature=temperature,
176
- top_p=top_p,
177
- )
178
- assistant_message = response.choices[0].message['content']
179
- return assistant_message
180
- except Exception as e:
181
- return f"오류가 발생했습니다: {str(e)}"
182
-
183
-
184
- def respond_claude_qna(
185
- question: str,
186
- system_message: str,
187
- max_tokens: int,
188
- temperature: float,
189
- top_p: float,
190
- claude_api_key: str
191
- ) -> str:
192
- """
193
- Claude API를 사용한 개선된 응답 생성 함수
194
- """
195
- if not claude_api_key:
196
- return "Claude API 토큰이 필요합니다."
197
-
198
- try:
199
- client = anthropic.Anthropic(api_key=claude_api_key)
200
 
201
- # 메시지 생성
202
- message = client.messages.create(
203
- model="claude-3-haiku-20240307",
204
- max_tokens=max_tokens,
205
- temperature=temperature,
206
- system=system_message,
207
- messages=[
208
- {
209
- "role": "user",
210
- "content": question
211
- }
212
- ]
213
- )
214
 
215
- return message.content[0].text
216
-
217
- except anthropic.APIError as ae:
218
- return f"Claude API 오류: {str(ae)}"
219
- except anthropic.RateLimitError:
220
- return "요청 한도를 초했습니다. 잠시 후 다시 시도해주세요."
221
- except Exception as e:
222
- return f"예상치 못한 오류가 발생했습니다: {str(e)}"
223
-
224
-
225
- #############################
226
- # [기본코드] UI 부분 - 수정/삭제 불가
227
- #############################
228
-
229
- with gr.Blocks() as demo:
230
- gr.Markdown("# LLM 플레이그라운드")
231
-
232
- # 토큰 비밀번호 입력창
233
- with gr.Row():
234
- token_password_box = gr.Textbox(
235
- label="토큰 비밀번호",
236
- type="password",
237
- placeholder="비밀번호를 입력하세요...",
238
- lines=1
239
- )
240
-
241
- # 토큰 입력창 (초기에는 숨김)
242
- with gr.Row(visible=False) as token_row:
243
- hf_token_box = gr.Textbox(
244
- label="HuggingFace 토큰",
245
- type="password",
246
- placeholder="HuggingFace API 토큰을 입력하세요..."
247
- )
248
- openai_token_box = gr.Textbox(
249
- label="OpenAI 토큰",
250
- type="password",
251
- placeholder="OpenAI API 토큰을 입력하세요..."
252
- )
253
- claude_token_box = gr.Textbox(
254
- label="Claude 토큰",
255
- type="password",
256
- placeholder="Claude API 토큰을 입력하세요...",
257
- show_copy_button=False
258
- )
259
- deepseek_token_box = gr.Textbox(
260
- label="DeepSeek 토큰",
261
- type="password",
262
- placeholder="DeepSeek API 토큰을 입력하세요..."
263
- )
264
-
265
- # 말투 바꾸기 탭
266
- with gr.Tab("말투 바꾸기"):
267
- tone_radio = gr.Radio(
268
- choices=["친근하게", "일반적인", "전문적인"],
269
- label="말투 선택",
270
- value="일반적인"
271
- )
272
-
273
- # 참조글 입력 탭
274
- with gr.Tab("참조글 입력"):
275
- ref_input1 = gr.Textbox(label="참조글 1", lines=5)
276
- ref_input2 = gr.Textbox(label="참조글 2", lines=5)
277
- ref_input3 = gr.Textbox(label="참조글 3", lines=5)
278
-
279
- #################
280
- # 일반 모델 탭
281
- #################
282
- with gr.Tab("일반 모델"):
283
- # 모델명 선택
284
- model_name = gr.Radio(
285
- choices=list(MODELS.keys()),
286
- label="Language Model (HuggingFace)",
287
- value="Zephyr 7B Beta"
288
- )
289
-
290
- # 입력1 ~ 입력5 (세로로 하나씩)
291
- input1 = gr.Textbox(label="입력1", lines=1)
292
- input2 = gr.Textbox(label="입력2", lines=1)
293
- input3 = gr.Textbox(label="입력3", lines=1)
294
- input4 = gr.Textbox(label="입력4", lines=1)
295
- input5 = gr.Textbox(label="입력5", lines=1)
296
-
297
- # 결과
298
- answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
299
-
300
- # 고급 설정 - System Message를 Max Tokens 위로 이동
301
- with gr.Accordion("고급 설정 (일반 모델)", open=False):
302
- system_message = gr.Textbox(
303
- value="""반드시 한글로 답변할 것.
304
- 너는 최고의 비서이다.
305
- 내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
306
- """,
307
- label="System Message",
308
- lines=3
309
- )
310
- max_tokens = gr.Slider(minimum=0, maximum=2000, value=500, step=100, label="Max Tokens")
311
- temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
312
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
313
-
314
- submit_button = gr.Button("전송")
315
-
316
- def merge_and_call_hf(i1, i2, i3, i4, i5, m_name, mt, temp, top_p_, sys_msg, hf_token):
317
- # 입력1~5를 공백 기준으로 합쳐서 question 구성
318
- question = " ".join([i1, i2, i3, i4, i5])
319
- return respond_hf_qna(
320
- question=question,
321
- model_name=m_name,
322
- max_tokens=mt,
323
- temperature=temp,
324
- top_p=top_p_,
325
- system_message=sys_msg,
326
- hf_token=hf_token
327
- )
328
-
329
- submit_button.click(
330
- fn=merge_and_call_hf,
331
- inputs=[
332
- input1, input2, input3, input4, input5,
333
- model_name,
334
- max_tokens,
335
- temperature,
336
- top_p,
337
- system_message,
338
- hf_token_box
339
- ],
340
- outputs=answer_output
341
- )
342
-
343
- #################
344
- # Cohere Command R+ 탭
345
- #################
346
- with gr.Tab("Cohere Command R+"):
347
- cohere_input1 = gr.Textbox(label="입력1", lines=1)
348
- cohere_input2 = gr.Textbox(label="입력2", lines=1)
349
- cohere_input3 = gr.Textbox(label="입력3", lines=1)
350
- cohere_input4 = gr.Textbox(label="입력4", lines=1)
351
- cohere_input5 = gr.Textbox(label="입력5", lines=1)
352
-
353
- cohere_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
354
-
355
- with gr.Accordion("고급 설정 (Cohere)", open=False):
356
- cohere_system_message = gr.Textbox(
357
- value="""반드시 한글로 답변할 것.
358
- 너는 최고의 비서이다.
359
- 내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
360
- """,
361
- label="System Message",
362
- lines=3
363
- )
364
- cohere_max_tokens = gr.Slider(minimum=100, maximum=10000, value=4000, step=100, label="Max Tokens")
365
- cohere_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
366
- cohere_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
367
-
368
- cohere_submit_button = gr.Button("전송")
369
-
370
- def merge_and_call_cohere(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, hf_token):
371
- question = " ".join([i1, i2, i3, i4, i5])
372
- return respond_cohere_qna(
373
- question=question,
374
- system_message=sys_msg,
375
- max_tokens=mt,
376
- temperature=temp,
377
- top_p=top_p_,
378
- hf_token=hf_token
379
- )
380
-
381
- cohere_submit_button.click(
382
- fn=merge_and_call_cohere,
383
- inputs=[
384
- cohere_input1, cohere_input2, cohere_input3, cohere_input4, cohere_input5,
385
- cohere_system_message,
386
- cohere_max_tokens,
387
- cohere_temperature,
388
- cohere_top_p,
389
- hf_token_box
390
- ],
391
- outputs=cohere_answer_output
392
- )
393
-
394
- #################
395
- # ChatGPT 탭
396
- #################
397
- with gr.Tab("gpt-4o-mini"):
398
- chatgpt_input1 = gr.Textbox(label="입력1", lines=1)
399
- chatgpt_input2 = gr.Textbox(label="입력2", lines=1)
400
- chatgpt_input3 = gr.Textbox(label="입력3", lines=1)
401
- chatgpt_input4 = gr.Textbox(label="입력4", lines=1)
402
- chatgpt_input5 = gr.Textbox(label="입력5", lines=1)
403
-
404
- chatgpt_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
405
-
406
- with gr.Accordion("고급 설정 (ChatGPT)", open=False):
407
- chatgpt_system_message = gr.Textbox(
408
- value="""반드시 한글로 답변할 것.
409
- 너는 ChatGPT, OpenAI에서 개발한 언어 모델이다.
410
- 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
411
- """,
412
- label="System Message",
413
- lines=3
414
- )
415
- chatgpt_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
416
- chatgpt_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
417
- chatgpt_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
418
-
419
- chatgpt_submit_button = gr.Button("전송")
420
-
421
- def merge_and_call_chatgpt(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, openai_token):
422
- question = " ".join([i1, i2, i3, i4, i5])
423
- return respond_chatgpt_qna(
424
- question=question,
425
- system_message=sys_msg,
426
- max_tokens=mt,
427
- temperature=temp,
428
- top_p=top_p_,
429
- openai_token=openai_token
430
- )
431
-
432
- chatgpt_submit_button.click(
433
- fn=merge_and_call_chatgpt,
434
- inputs=[
435
- chatgpt_input1, chatgpt_input2, chatgpt_input3, chatgpt_input4, chatgpt_input5,
436
- chatgpt_system_message,
437
- chatgpt_max_tokens,
438
- chatgpt_temperature,
439
- chatgpt_top_p,
440
- openai_token_box
441
- ],
442
- outputs=chatgpt_answer_output
443
- )
444
-
445
- #################
446
- # Claude 탭
447
- #################
448
- with gr.Tab("claude-3-haiku"):
449
- claude_input1 = gr.Textbox(label="입력1", lines=1)
450
- claude_input2 = gr.Textbox(label="입력2", lines=1)
451
- claude_input3 = gr.Textbox(label="입력3", lines=1)
452
- claude_input4 = gr.Textbox(label="입력4", lines=1)
453
- claude_input5 = gr.Textbox(label="입력5", lines=1)
454
-
455
- claude_answer_output = gr.Textbox(label="결과", interactive=False, lines=5)
456
-
457
- with gr.Accordion("고급 설정 (Claude)", open=False):
458
- claude_system_message = gr.Textbox(
459
- label="System Message",
460
- value="""반드시 한글로 답변할 것.
461
- 너는 Anthropic에서 개발한 클로드이다.
462
- 최대한 정확하고 친절하게 답변하라.""",
463
- lines=3
464
- )
465
- claude_max_tokens = gr.Slider(
466
- minimum=100,
467
- maximum=4000,
468
- value=2000,
469
- step=100,
470
- label="Max Tokens"
471
- )
472
- claude_temperature = gr.Slider(
473
- minimum=0.1,
474
- maximum=2.0,
475
- value=0.7,
476
- step=0.05,
477
- label="Temperature"
478
- )
479
- claude_top_p = gr.Slider(
480
- minimum=0.1,
481
- maximum=1.0,
482
- value=0.95,
483
- step=0.05,
484
- label="Top-p"
485
- )
486
-
487
- claude_submit_button = gr.Button("전송")
488
-
489
- def merge_and_call_claude(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, claude_key):
490
- question = " ".join([i1, i2, i3, i4, i5])
491
- return respond_claude_qna(
492
- question=question,
493
- system_message=sys_msg,
494
- max_tokens=mt,
495
- temperature=temp,
496
- top_p=top_p_,
497
- claude_api_key=claude_key
498
- )
499
-
500
- claude_submit_button.click(
501
- fn=merge_and_call_claude,
502
- inputs=[
503
- claude_input1, claude_input2, claude_input3, claude_input4, claude_input5,
504
- claude_system_message,
505
- claude_max_tokens,
506
- claude_temperature,
507
- claude_top_p,
508
- claude_token_box
509
- ],
510
- outputs=claude_answer_output
511
- )
512
-
513
- #################
514
- # DeepSeek 탭
515
- #################
516
- with gr.Tab("DeepSeek-V3"):
517
- deepseek_input1 = gr.Textbox(label="입력1", lines=1)
518
- deepseek_input2 = gr.Textbox(label="입력2", lines=1)
519
- deepseek_input3 = gr.Textbox(label="입력3", lines=1)
520
- deepseek_input4 = gr.Textbox(label="입력4", lines=1)
521
- deepseek_input5 = gr.Textbox(label="입력5", lines=1)
522
-
523
- deepseek_answer_output = gr.Textbox(label="결과", lines=5, interactive=False)
524
-
525
- with gr.Accordion("고급 설정 (DeepSeek)", open=False):
526
- deepseek_system_message = gr.Textbox(
527
- value="""반드시 한글로 답변할 것.
528
- 너는 DeepSeek-V3, 최고의 언어 모델이다.
529
- 내가 요구하는 것을 최대한 자세하고 정확하게 답변하라.
530
- """,
531
- label="System Message",
532
- lines=3
533
- )
534
- deepseek_max_tokens = gr.Slider(minimum=100, maximum=4000, value=2000, step=100, label="Max Tokens")
535
- deepseek_temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature")
536
- deepseek_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P")
537
-
538
- deepseek_submit_button = gr.Button("전송")
539
-
540
- def merge_and_call_deepseek(i1, i2, i3, i4, i5, sys_msg, mt, temp, top_p_, deepseek_token):
541
- question = " ".join([i1, i2, i3, i4, i5])
542
- return respond_deepseek_qna(
543
- question=question,
544
- system_message=sys_msg,
545
- max_tokens=mt,
546
- temperature=temp,
547
- top_p=top_p_,
548
- deepseek_token=deepseek_token
549
- )
550
-
551
- deepseek_submit_button.click(
552
- fn=merge_and_call_deepseek,
553
- inputs=[
554
- deepseek_input1, deepseek_input2, deepseek_input3, deepseek_input4, deepseek_input5,
555
- deepseek_system_message,
556
- deepseek_max_tokens,
557
- deepseek_temperature,
558
- deepseek_top_p,
559
- deepseek_token_box
560
- ],
561
- outputs=deepseek_answer_output
562
- )
563
-
564
- # 토큰 비밀번호 입력 후 토큰 입력창 표시
565
- def toggle_token_row(password):
566
- if password == "123":
567
- return gr.Row.update(visible=True)
568
- else:
569
- return gr.Row.update(visible=False)
570
-
571
- token_password_box.submit(
572
- fn=toggle_token_row,
573
- inputs=token_password_box,
574
- outputs=token_row
575
- )
576
-
577
- #############################
578
- # 메인 실행부
579
- #############################
580
- if __name__ == "__main__":
581
- demo.launch()
 
1
+ from flask import Flask, render_template, request
 
 
 
 
2
 
3
+ app = Flask(__name__)
 
 
4
 
5
+ # 모델 대한 처리 함수 (가상)
6
+ def 일반모델_처리(text):
7
+ return f"일반모델 처리 결과: {text}"
 
 
 
 
 
 
8
 
9
+ def 코히어모델_처리(text):
10
+ return f"코히어모델 처리 결과: {text}"
11
 
12
+ def 지피티모델_처리(text):
13
+ return f"지피티모델 처리 결과: {text}"
 
 
 
 
 
14
 
15
+ def 클로드모델_처리(text):
16
+ return f"클로드모델 처리 결과: {text}"
 
 
 
 
 
17
 
18
+ def 딥시크모델_처리(text):
19
+ return f"딥시크모델 처리 결과: {text}"
20
 
21
+ @app.route('/', methods=['GET', 'POST'])
22
+ def index():
23
+ if request.method == 'POST':
24
+ input_text = request.form['reference_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # 모든 모델에 대해 처리
27
+ 일반모델_결과 = 일반모델_처리(input_text)
28
+ 코히어모델_결과 = 코히어모델_처리(input_text)
29
+ 지피티모델_결과 = 지피티모델_처리(input_text)
30
+ 클로드모델_결과 = 클로드모델_처리(input_text)
31
+ 딥시크모델_결과 = 딥시크모델_처리(input_text)
 
 
 
 
 
 
 
32
 
33
+ # 결과를 result.html에 전달
34
+ return render_template('result.html',
35
+ 일반모델_결과=일반모델_결과,
36
+ 코히어모델_결과=코히어모델_결과,
37
+ 지피티모델_결과=지피티모델_결과,
38
+ 클로드모델_결=클로드모델_결과,
39
+ 딥시크모델_결과=딥시크모델_결과)
40
+
41
+ return render_template('index.html')
42
+
43
+ if __name__ == '__main__':
44
+ app.run(debug=True)