Spaces:
Paused
Paused
Upload 4 files
Browse files
app.py
CHANGED
|
@@ -55,7 +55,7 @@ safety_settings = [
|
|
| 55 |
{
|
| 56 |
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 57 |
"threshold": "BLOCK_NONE"
|
| 58 |
-
}
|
| 59 |
]
|
| 60 |
|
| 61 |
class APIKeyManager:
|
|
@@ -121,7 +121,7 @@ GEMINI_MODELS = [
|
|
| 121 |
|
| 122 |
@app.route('/')
|
| 123 |
def index():
|
| 124 |
-
main_content = "Moonfanz Reminiproxy v2.
|
| 125 |
html_template = """
|
| 126 |
<!DOCTYPE html>
|
| 127 |
<html>
|
|
@@ -285,8 +285,13 @@ def chat_completions():
|
|
| 285 |
hint = "流式" if stream else "非流"
|
| 286 |
logger.info(f"\n{model} [{hint}] → {current_api_key[:11]}...")
|
| 287 |
|
| 288 |
-
gemini_history, user_message, error_response = func.process_messages_for_gemini(messages)
|
| 289 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
if error_response:
|
| 291 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
| 292 |
return jsonify(error_response), 400
|
|
@@ -310,7 +315,8 @@ def chat_completions():
|
|
| 310 |
gen_model = genai.GenerativeModel(
|
| 311 |
model_name=model,
|
| 312 |
generation_config=generation_config,
|
| 313 |
-
safety_settings=safety_settings
|
|
|
|
| 314 |
)
|
| 315 |
|
| 316 |
try:
|
|
@@ -416,17 +422,16 @@ def chat_completions():
|
|
| 416 |
try:
|
| 417 |
text_content = response.text
|
| 418 |
except (AttributeError, IndexError, TypeError, ValueError) as e:
|
| 419 |
-
if "response.candidates
|
| 420 |
-
logger.error(f"
|
| 421 |
return jsonify({
|
| 422 |
'error': {
|
| 423 |
-
'message': '
|
| 424 |
'type': 'prompt_blocked_error',
|
| 425 |
'details': str(e)
|
| 426 |
}
|
| 427 |
}), 400
|
| 428 |
else:
|
| 429 |
-
logger.error(f"AI响应处理失败")
|
| 430 |
return jsonify({
|
| 431 |
'error': {
|
| 432 |
'message': 'AI响应处理失败',
|
|
|
|
| 55 |
{
|
| 56 |
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 57 |
"threshold": "BLOCK_NONE"
|
| 58 |
+
}
|
| 59 |
]
|
| 60 |
|
| 61 |
class APIKeyManager:
|
|
|
|
| 121 |
|
| 122 |
@app.route('/')
|
| 123 |
def index():
|
| 124 |
+
main_content = "Moonfanz Reminiproxy v2.2.0 2025-01-11"
|
| 125 |
html_template = """
|
| 126 |
<!DOCTYPE html>
|
| 127 |
<html>
|
|
|
|
| 285 |
hint = "流式" if stream else "非流"
|
| 286 |
logger.info(f"\n{model} [{hint}] → {current_api_key[:11]}...")
|
| 287 |
|
| 288 |
+
gemini_history, user_message, system_instruction, error_response = func.process_messages_for_gemini(messages)
|
| 289 |
+
# r_g = json.dumps(gemini_history, indent=4, ensure_ascii=False).replace('\\n', '\n')
|
| 290 |
+
# r_u = json.dumps(user_message, indent=4, ensure_ascii=False).replace('\\n', '\n')
|
| 291 |
+
# r_s = json.dumps(system_instruction, indent=4, ensure_ascii=False).replace('\\n', '\n')
|
| 292 |
+
# logger.info(f"历史对话: {r_g}")
|
| 293 |
+
# logger.info(f"用户消息: {r_u}")
|
| 294 |
+
# logger.info(f"系统指令: {r_s}")
|
| 295 |
if error_response:
|
| 296 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
| 297 |
return jsonify(error_response), 400
|
|
|
|
| 315 |
gen_model = genai.GenerativeModel(
|
| 316 |
model_name=model,
|
| 317 |
generation_config=generation_config,
|
| 318 |
+
safety_settings=safety_settings,
|
| 319 |
+
system_instruction=system_instruction
|
| 320 |
)
|
| 321 |
|
| 322 |
try:
|
|
|
|
| 422 |
try:
|
| 423 |
text_content = response.text
|
| 424 |
except (AttributeError, IndexError, TypeError, ValueError) as e:
|
| 425 |
+
if "response.candidates" in str(e) or "response.text" in str(e):
|
| 426 |
+
logger.error(f"用户输入被AI安全过滤器阻止")
|
| 427 |
return jsonify({
|
| 428 |
'error': {
|
| 429 |
+
'message': '用户输入被AI安全过滤器阻止',
|
| 430 |
'type': 'prompt_blocked_error',
|
| 431 |
'details': str(e)
|
| 432 |
}
|
| 433 |
}), 400
|
| 434 |
else:
|
|
|
|
| 435 |
return jsonify({
|
| 436 |
'error': {
|
| 437 |
'message': 'AI响应处理失败',
|
func.py
CHANGED
|
@@ -37,19 +37,29 @@ def authenticate_request(request):
|
|
| 37 |
def process_messages_for_gemini(messages):
|
| 38 |
gemini_history = []
|
| 39 |
errors = []
|
|
|
|
|
|
|
| 40 |
for message in messages:
|
| 41 |
role = message.get('role')
|
| 42 |
content = message.get('content')
|
| 43 |
|
| 44 |
if isinstance(content, str):
|
| 45 |
-
if role == 'system':
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
gemini_history.append({"role": "model", "parts": [content]})
|
| 51 |
else:
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
elif isinstance(content, list):
|
| 54 |
parts = []
|
| 55 |
for item in content:
|
|
@@ -103,6 +113,6 @@ def process_messages_for_gemini(messages):
|
|
| 103 |
user_message = {"role": "user", "parts": [""]}
|
| 104 |
|
| 105 |
if errors:
|
| 106 |
-
return gemini_history, user_message, (jsonify({'error': errors}), 400)
|
| 107 |
else:
|
| 108 |
-
return gemini_history, user_message, None
|
|
|
|
| 37 |
def process_messages_for_gemini(messages):
|
| 38 |
gemini_history = []
|
| 39 |
errors = []
|
| 40 |
+
system_instruction_text = ""
|
| 41 |
+
is_system_phase = True
|
| 42 |
for message in messages:
|
| 43 |
role = message.get('role')
|
| 44 |
content = message.get('content')
|
| 45 |
|
| 46 |
if isinstance(content, str):
|
| 47 |
+
if is_system_phase and role == 'system':
|
| 48 |
+
if system_instruction_text:
|
| 49 |
+
system_instruction_text += "\n" + content
|
| 50 |
+
else:
|
| 51 |
+
system_instruction_text = content
|
|
|
|
| 52 |
else:
|
| 53 |
+
is_system_phase = False
|
| 54 |
+
|
| 55 |
+
if role == 'user':
|
| 56 |
+
gemini_history.append({"role": "user", "parts": [{"text": content}]})
|
| 57 |
+
elif role == 'system':
|
| 58 |
+
gemini_history.append({"role": "user", "parts": [{"text": content}]})
|
| 59 |
+
elif role == 'assistant':
|
| 60 |
+
gemini_history.append({"role": "model", "parts": [{"text": content}]})
|
| 61 |
+
else:
|
| 62 |
+
errors.append(f"Invalid role: {role}")
|
| 63 |
elif isinstance(content, list):
|
| 64 |
parts = []
|
| 65 |
for item in content:
|
|
|
|
| 113 |
user_message = {"role": "user", "parts": [""]}
|
| 114 |
|
| 115 |
if errors:
|
| 116 |
+
return gemini_history, user_message, {"parts": [{"text": system_instruction_text}]}, (jsonify({'error': errors}), 400)
|
| 117 |
else:
|
| 118 |
+
return gemini_history, user_message, {"parts": [{"text": system_instruction_text}]}, None
|