Browen0311 commited on
Commit
e5cd568
·
verified ·
1 Parent(s): 34c33dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -1
app.py CHANGED
@@ -29,7 +29,108 @@ MODEL_CONFIGS = {
29
  }
30
  }
31
 
32
- [... 其他函數保持不變 ...]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  # 模型選擇切換函數
35
  def update_model_choices(provider):
 
29
  }
30
  }
31
 
32
+ def get_llm_response(prompt, provider, model):
33
+ if provider == "openai":
34
+ response = openai_client.chat.completions.create(
35
+ model=model,
36
+ messages=[
37
+ {"role": "system", "content": "你是一位資深的國文作文評閱委員,請依據提供的評分規準進行評分。"},
38
+ {"role": "user", "content": prompt}
39
+ ],
40
+ **MODEL_CONFIGS["openai"][model]
41
+ )
42
+ return response.choices[0].message.content
43
+ else: # groq
44
+ completion = groq_client.chat.completions.create(
45
+ model=model,
46
+ messages=[
47
+ {"role": "system", "content": "你是一位資深的國文作文評閱委員,請依據提供的評分規準進行評分。"},
48
+ {"role": "user", "content": prompt}
49
+ ],
50
+ **MODEL_CONFIGS["groq"][model],
51
+ stream=False,
52
+ top_p=1,
53
+ stop=None
54
+ )
55
+ return completion.choices[0].message.content
56
+
57
+ def evaluate_essay(message, additional_prompt, provider, model):
58
+ if not message.strip():
59
+ return [], gr.Markdown("### 請輸入作文內容進行評分")
60
+
61
+ criteria = {
62
+ '題旨發揮': {'weight': 0.4, 'max_score': 40},
63
+ '資料掌握': {'weight': 0.2, 'max_score': 20},
64
+ '結構安排': {'weight': 0.2, 'max_score': 20},
65
+ '字句運用': {'weight': 0.2, 'max_score': 20}
66
+ }
67
+
68
+ grade_scores = {
69
+ 'A+': 95, 'A': 90, 'A-': 85,
70
+ 'B+': 80, 'B': 75, 'B-': 70,
71
+ 'C+': 65, 'C': 60, 'C-': 55,
72
+ '0': 0
73
+ }
74
+
75
+ try:
76
+ history = []
77
+ total_score = 0
78
+ history.append(("作文內容:", message))
79
+ history.append(("", f"正在使用 {provider} ({model}) 進行評分分析..."))
80
+
81
+ all_feedback = {}
82
+
83
+ for criterion, details in criteria.items():
84
+ prompt = f"""評估以下作文的{criterion}(權重{details['weight']*100}%):
85
+
86
+ 作文內容:
87
+ {message}
88
+
89
+ {additional_prompt if additional_prompt else ''}
90
+
91
+ 請依三等九級制(A+、A、A-、B+、B、B-、C+、C、C-)評分,並提供簡短評語。
92
+ 如果是缺考、未作答、完全文不對題或作答內容完全照抄試題,請給予0分。
93
+
94
+ 請按以下格式回覆:
95
+ 等第:
96
+ 評語:"""
97
+
98
+ result = get_llm_response(prompt, provider, model)
99
+ lines = result.lower().split('\n')
100
+ grade = '0'
101
+ comment = ""
102
+
103
+ for line in lines:
104
+ if '等第:' in line or '等第:' in line:
105
+ grade_text = line.split(':')[-1].strip().upper()
106
+ if grade_text in grade_scores:
107
+ grade = grade_text
108
+ elif '評語:' in line or '評語:' in line:
109
+ comment = line.split(':')[-1].strip()
110
+
111
+ weighted_score = (grade_scores[grade] / 100) * details['max_score']
112
+ total_score += weighted_score
113
+
114
+ feedback = f"### {criterion}\n"
115
+ feedback += f"- **等第**:{grade}\n"
116
+ feedback += f"- **得分**:{weighted_score:.1f}/{details['max_score']}\n"
117
+ feedback += f"- **評語**:{comment}\n"
118
+
119
+ all_feedback[criterion] = feedback
120
+
121
+ for criterion in criteria:
122
+ history.append(("", all_feedback[criterion]))
123
+
124
+ total_score_display = f"""
125
+ # 總評分結果
126
+ ## 使用模型:{provider} ({model})
127
+ ## 總分:{total_score:.1f}/100
128
+ """
129
+
130
+ return history, gr.Markdown(total_score_display)
131
+
132
+ except Exception as e:
133
+ return [("", f"評分過程發生錯誤:{str(e)}")], gr.Markdown("### ❌ 評分失敗")
134
 
135
  # 模型選擇切換函數
136
  def update_model_choices(provider):