kijeoung commited on
Commit
8b5a323
·
verified ·
1 Parent(s): 08d8888

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -1
app.py CHANGED
@@ -1,10 +1,46 @@
1
  import gradio as gr
2
  import os
 
3
 
4
  #############################
5
  # [블로그 생성기 - Cohere Command R+]
6
  #############################
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # 블로그 생성 함수 정의
9
  def generate_blog(tone, ref1, ref2, ref3):
10
  """
@@ -19,7 +55,7 @@ def generate_blog(tone, ref1, ref2, ref3):
19
  # 프롬프트 구성
20
  prompt = f"말투: {tone}\n참조문 1: {ref1}\n참조문 2: {ref2}\n참조문 3: {ref3}"
21
 
22
- # 응답 생성 (respond_cohere_qna는 별도 함수로 가정)
23
  return respond_cohere_qna(
24
  question=prompt,
25
  system_message=SYSTEM_MESSAGE,
 
1
  import gradio as gr
2
  import os
3
+ from huggingface_hub import InferenceClient
4
 
5
  #############################
6
  # [블로그 생성기 - Cohere Command R+]
7
  #############################
8
 
9
+ # Cohere Command R+ 모델 ID 정의
10
+ COHERE_MODEL = "CohereForAI/c4ai-command-r-plus-08-2024"
11
+ SYSTEM_MESSAGE = "블로그 글을 작성해주세요."
12
+ MAX_TOKENS = 4000
13
+ TEMPERATURE = 0.7
14
+ TOP_P = 0.95
15
+
16
+ def get_client(hf_token):
17
+ """
18
+ HuggingFace InferenceClient 생성.
19
+ """
20
+ if not hf_token:
21
+ raise ValueError("HuggingFace API 토큰이 필요합니다.")
22
+
23
+ return InferenceClient(COHERE_MODEL, token=hf_token)
24
+
25
+ def respond_cohere_qna(question, system_message, max_tokens, temperature, top_p, hf_token):
26
+ """
27
+ Cohere 모델을 사용하여 질문에 대한 응답 생성.
28
+ """
29
+ client = get_client(hf_token)
30
+
31
+ messages = [
32
+ {"role": "system", "content": system_message},
33
+ {"role": "user", "content": question}
34
+ ]
35
+
36
+ response = client.chat_completion(
37
+ messages=messages,
38
+ max_tokens=max_tokens,
39
+ temperature=temperature,
40
+ top_p=top_p,
41
+ )
42
+ return response.choices[0].message.content
43
+
44
  # 블로그 생성 함수 정의
45
  def generate_blog(tone, ref1, ref2, ref3):
46
  """
 
55
  # 프롬프트 구성
56
  prompt = f"말투: {tone}\n참조문 1: {ref1}\n참조문 2: {ref2}\n참조문 3: {ref3}"
57
 
58
+ # 응답 생성
59
  return respond_cohere_qna(
60
  question=prompt,
61
  system_message=SYSTEM_MESSAGE,