Spaces:
Running
Running
File size: 11,194 Bytes
a9fb7e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
"""
Configuration file for the Ling & Ring Playground application.
This file centralizes all the configuration variables, such as API endpoints,
API keys, and system prompts for different functionalities.
"""
import os
# --- API Configuration ---
# This follows a layered configuration strategy.
# 1. It first tries to import configurations from a local `local.py` file.
# This file is intended for local development and is ignored by Git.
# 2. If `local.py` is not found, it falls back to environment variables,
# which is ideal for production environments like Hugging Face Spaces.
try:
# For local development: create a local.py file with your credentials.
# Example local.py:
# ANTCHAT_BASE_URL = "http://your-local-endpoint/v1"
# ANTCHAT_API_KEY = "your-local-api-key"
from local import ANTCHAT_BASE_URL, ANTCHAT_API_KEY
print("✅ Loaded configuration from local.py")
except ImportError:
# For production/HF Spaces: set these as environment variables.
print("🤔 `local.py` not found. Attempting to load configuration from environment variables.")
ANTCHAT_BASE_URL = os.getenv("ANTCHAT_BASE_URL")
ANTCHAT_API_KEY = os.getenv("ANTCHAT_API_KEY")
# A check to ensure that the credentials are not None.
if not ANTCHAT_BASE_URL or not ANTCHAT_API_KEY:
print("⚠️ Warning: ANTCHAT_BASE_URL or ANTCHAT_API_KEY is not set. The application may not function correctly.")
# --- System Prompts ---
# For the Chat tab
CHAT_SYSTEM_PROMPT_PLACEHOLDER = "e.g., You are a helpful assistant."
# For the Code Generation tab
CODE_SYSTEM_PROMPT = "You are an expert code generation assistant. Generate clean, efficient code based on the user's request. Only output the code itself inside a markdown block. Do not add any other explanation."
# Code generation options with different system prompts
CODE_SYSTEM_PROMPTS = {
"html": "You are an expert HTML/CSS/JavaScript developer. Generate clean, semantic HTML code with inline CSS and JavaScript. Only output the complete HTML code inside a markdown block. Do not add any other explanation.",
"python": "You are an expert Python developer. Generate clean, efficient Python code. Only output the code inside a markdown block with python syntax. Do not add any other explanation.",
"javascript": "You are an expert JavaScript developer. Generate clean, efficient JavaScript code. Only output the code inside a markdown block with javascript syntax. Do not add any other explanation.",
"sql": "You are an expert SQL developer. Generate clean, efficient SQL queries. Only output the SQL code inside a markdown block. Do not add any other explanation.",
"general": CODE_SYSTEM_PROMPT
}
# For the Web Search tab
SEARCH_SYSTEM_PROMPT = "You are an expert web search assistant. You will be provided with a user query. Perform a web search and provide a concise summary of the findings, including key points and source links."
# For the Workflow Generation
WORKFLOW_GENERATE_SYSTEM_PROMPT = "You are a workflow analysis agent. Analyze the user's description and break it down into a numbered list of executable steps. Be precise and clear."
# For the Workflow Execution
WORKFLOW_EXECUTE_SYSTEM_PROMPT = "You are a workflow execution assistant. Your goal is to guide the user step-by-step through the predefined workflow. At each step, clearly state the task and ask for confirmation or necessary input to proceed."
# --- Model Specifications ---
CHAT_MODEL_SPECS = {
"inclusionai/ling-1t": {
"model_id": "inclusionai/ling-1t",
"display_name": "🧠 Ling-1T (1T)",
"description": "一款万亿级参数的大语言模型,为追求极致性能和高流畅度的复杂自然语言理解与生成任务而设计。",
"prompt_scenarios": [
{
"title": "深度分析报告撰写",
"system_prompt": "你是一位资深的行业分析师,能够撰写逻辑清晰、数据充分、观点独到的深度分析报告。",
"message_examples": [
"撰写一篇关于人工智能在医疗领域应用的深度分析报告,至少800字。",
"分析当前宏观经济形势,并预测未来一年的发展趋势。",
"为一家新成立的科技公司制定一份详细的品牌推广策略。"
]
},
{
"title": "莎士比亚风格文案",
"system_prompt": "你是一位模仿大师,能够以威廉·莎士比亚的风格和口吻进行文学创作。",
"message_examples": [
"以莎士比亚的风格,写一段关于“代码”的独白。",
"假如哈姆雷特是一个程序员,他会如何抱怨一个难缠的 bug?",
"把“用户体验”这个词用十四行诗的形式表达出来。"
]
}
]
},
"inclusionai/ling-flash-2.0": {
"model_id": "inclusionai/ling-flash-2.0",
"display_name": "🧠 Ling-flash-2.0 (103B)",
"description": "一款性能卓越的十亿级参数模型,专为需要高速响应和复杂指令遵循的场景优化。",
"prompt_scenarios": [
{
"title": "技术文档撰写",
"system_prompt": "你是一位专业的技术作家,能够清晰、准确地解释复杂的技术概念。",
"message_examples": [
"为一段新的 API 端点编写清晰的文档。",
"解释一下什么是 'Transformer' 架构。",
"如何为开源项目编写一份贡献指南?"
]
},
{
"title": "创意头脑风暴",
"system_prompt": "你是一位充满创意的伙伴,可以进行头脑风暴并提供新颖的想法。",
"message_examples": [
"为一个新的播客想 5 个吸引人的名字。",
"我应该为我的博客写些什么内容?",
"想一个关于时间旅行的短篇故事点子。"
]
}
]
},
"inclusionai/ring-flash-2.0": {
"model_id": "inclusionai/ring-flash-2.0",
"display_name": "💍 Ring-flash-2.0 (103B)",
"description": "一款十亿级参数的推理模型,在性能和成本之间取得了很好的平衡,适合需要逐步思考或生成代码的通用任务。",
"prompt_scenarios": [
{
"title": "旅行规划专家",
"system_prompt": "你是一位经验丰富的旅行规划师,精通全球各地的旅行路线、交通和预算规划。",
"message_examples": [
"规划一个为期五天的日本东京自由行,包含详细的每日行程、交通和预算。",
"我应该如何选择我的第一把电吉他?请给出步骤和建议。",
"为我的周末家庭聚餐推荐三个菜谱。"
]
},
{
"title": "Python 脚本生成器",
"system_prompt": "你是一位 Python 编程专家,能够根据需求生成高质量、可执行的 Python 脚本。",
"message_examples": [
"生成一个 Python 脚本,监控网站价格变化并在降价时发邮件提醒。",
"写一个 Python 函数,用于计算两个日期之间相差了多少天。",
"用 Python 实现一个简单的命令行计算器。"
]
}
]
},
"inclusionai/ling-mini-2.0": {
"model_id": "inclusionai/ling-mini-2.0",
"display_name": "🧠 Ling-mini-2.0 (16B)",
"description": "一款轻量级对话模型,经过优化,可在消费级硬件上高效运行,非常适合移动端或本地化部署场景。",
"prompt_scenarios": [
{
"title": "高效邮件助手",
"system_prompt": "你是一位专业的行政助理,擅长撰写清晰、简洁、专业的电子邮件。",
"message_examples": [
"给我写一封简短的邮件,提醒团队成员明天上午10点开会。",
"草拟一封邮件,向客户询问项目进展。",
"帮我写一封得体的拒绝信,回复一个不合适的合作邀请。"
]
},
{
"title": "文本摘要与翻译",
"system_prompt": "你是一位语言专家,能够快速准确地进行文本摘要和多语言翻译。",
"message_examples": [
"总结这篇新闻的主要内容,不超过三句话。",
"将这段英文翻译成中文:'Gradio is an open-source Python library...'",
"推荐三部适合周末看的科幻电影。"
]
}
]
},
"inclusionai/ring-mini-2.0": {
"model_id": "inclusionai/ring-mini-2.0",
"display_name": "💍 Ring-mini-2.0 (3B)",
"description": "一款经过量化、极致高效的推理模型,为速度和效率要求严苛的资源受限环境(如边缘计算)而设计。",
"prompt_scenarios": [
{
"title": "生活日常助手",
"system_prompt": "你是一位乐于助人的生活助手,可以处理各种日常请求。",
"message_examples": [
"帮我设置一个25分钟的番茄钟。",
"在我的购物清单里加入牛奶和面包。",
"查询今天北京的天气。"
]
},
{
"title": "简单代码片段",
"system_prompt": "你是一位代码片段生成器,为常见的编程问题提供简洁、正确的代码示例。",
"message_examples": [
"提供一个用 JavaScript 实现的 GET 请求示例。",
"如何用 CSS 让一个 div 水平居中?",
"从1数到10。"
]
}
]
}
}
# --- Local Model ID Mapping Override ---
# Attempt to import a mapping from online model IDs to local model IDs
# from local.py. This allows developers to use different model names for
# local testing without changing the core application code.
try:
from local import get_local_model_id_map
local_model_id_map = get_local_model_id_map()
for model_id, spec in CHAT_MODEL_SPECS.items():
if model_id in local_model_id_map:
spec['model_id'] = local_model_id_map[model_id]
print(f"🔄 Overrode model ID for '{model_id}': '{model_id}' -> '{spec['model_id']}'")
except ImportError:
# local.py does not exist or does not contain the mapping function.
# This is expected in a production environment.
pass
except Exception as e:
print(f"⚠️ Warning: Failed to apply local model ID mapping. Error: {e}")
|