Spaces:
Sleeping
Sleeping
Updated requirements
Browse files- app.py +42 -8
- pyproject.toml +1 -0
- requirements.txt +5 -0
- system_prompt.md +109 -0
- uv.lock +18 -0
app.py
CHANGED
|
@@ -5,11 +5,14 @@ from pydantic import BaseModel, ValidationError
|
|
| 5 |
from typing import List, Optional
|
| 6 |
|
| 7 |
from torch import cuda
|
| 8 |
-
from transformers import
|
|
|
|
|
|
|
| 9 |
|
| 10 |
from hashlib import sha256
|
| 11 |
from huggingface_hub import login
|
| 12 |
from dotenv import load_dotenv
|
|
|
|
| 13 |
|
| 14 |
import os
|
| 15 |
import uvicorn
|
|
@@ -28,10 +31,17 @@ if hf_token is not None:
|
|
| 28 |
|
| 29 |
# Configurable model identifier
|
| 30 |
model_name = os.getenv("HF_MODEL", "swiss-ai/Apertus-8B-Instruct-2509")
|
|
|
|
| 31 |
|
| 32 |
# Configure max tokens
|
| 33 |
MAX_NEW_TOKENS = 4096
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
# Keep data in session
|
| 36 |
model = None
|
| 37 |
tokenizer = None
|
|
@@ -70,13 +80,27 @@ async def lifespan(app: FastAPI):
|
|
| 70 |
|
| 71 |
# load the tokenizer and the model
|
| 72 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
logger.info(f"Model loaded successfully! ({device})")
|
| 81 |
except Exception as e:
|
| 82 |
logger.error(f"Failed to load model: {e}")
|
|
@@ -130,6 +154,16 @@ def get_message_id(txt: str):
|
|
| 130 |
def get_model_reponse(messages_think: List[ChatMessage]):
|
| 131 |
"""Process the text content."""
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
# Prepare the model input
|
| 134 |
text = tokenizer.apply_chat_template(
|
| 135 |
messages_think,
|
|
|
|
| 5 |
from typing import List, Optional
|
| 6 |
|
| 7 |
from torch import cuda
|
| 8 |
+
from transformers import (
|
| 9 |
+
AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
| 10 |
+
)
|
| 11 |
|
| 12 |
from hashlib import sha256
|
| 13 |
from huggingface_hub import login
|
| 14 |
from dotenv import load_dotenv
|
| 15 |
+
from datetime import datetime
|
| 16 |
|
| 17 |
import os
|
| 18 |
import uvicorn
|
|
|
|
| 31 |
|
| 32 |
# Configurable model identifier
|
| 33 |
model_name = os.getenv("HF_MODEL", "swiss-ai/Apertus-8B-Instruct-2509")
|
| 34 |
+
model_quantization = int(os.getenv("QUANTIZE", 0)) # 8, 4, 0=default
|
| 35 |
|
| 36 |
# Configure max tokens
|
| 37 |
MAX_NEW_TOKENS = 4096
|
| 38 |
|
| 39 |
+
# Load base prompt from a text file
|
| 40 |
+
system_prompt = None
|
| 41 |
+
if int(os.getenv("USE_SYSTEM_PROMPT", 1)):
|
| 42 |
+
with open('system_prompt.md', 'r') as file:
|
| 43 |
+
system_prompt = file.read()
|
| 44 |
+
|
| 45 |
# Keep data in session
|
| 46 |
model = None
|
| 47 |
tokenizer = None
|
|
|
|
| 80 |
|
| 81 |
# load the tokenizer and the model
|
| 82 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 83 |
+
|
| 84 |
+
# Use a quantization setting
|
| 85 |
+
bnb_config = None
|
| 86 |
+
if model_quantization == 8:
|
| 87 |
+
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
|
| 88 |
+
elif model_quantization == 4:
|
| 89 |
+
bnb_config = BitsAndBytesConfig(load_in_4bit=True)
|
| 90 |
+
if bnb_config is not None:
|
| 91 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 92 |
+
model_name,
|
| 93 |
+
device_map="auto", # Automatically splits model across CPU/GPU
|
| 94 |
+
offload_folder="offload", # Temporary offload to disk
|
| 95 |
+
low_cpu_mem_usage=True, # Avoids unnecessary CPU memory duplication
|
| 96 |
+
quantization_config=bnb_config, # To reduce memory and overhead
|
| 97 |
+
)
|
| 98 |
+
else:
|
| 99 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 100 |
+
model_name,
|
| 101 |
+
device_map="auto", # Automatically splits model across CPU/GPU
|
| 102 |
+
offload_folder="offload", # Temporary offload to disk
|
| 103 |
+
)
|
| 104 |
logger.info(f"Model loaded successfully! ({device})")
|
| 105 |
except Exception as e:
|
| 106 |
logger.error(f"Failed to load model: {e}")
|
|
|
|
| 154 |
def get_model_reponse(messages_think: List[ChatMessage]):
|
| 155 |
"""Process the text content."""
|
| 156 |
|
| 157 |
+
# Apply the system template
|
| 158 |
+
has_system = False
|
| 159 |
+
for m in messages_think:
|
| 160 |
+
if m.role == 'system':
|
| 161 |
+
has_system = True
|
| 162 |
+
if not has_system and system_prompt:
|
| 163 |
+
cm = ChatMessage(role='system', content=system_prompt)
|
| 164 |
+
messages_think.insert(0, cm)
|
| 165 |
+
print(messages_think)
|
| 166 |
+
|
| 167 |
# Prepare the model input
|
| 168 |
text = tokenizer.apply_chat_template(
|
| 169 |
messages_think,
|
pyproject.toml
CHANGED
|
@@ -13,4 +13,5 @@ dependencies = [
|
|
| 13 |
"compressed-tensors>=0.13.0",
|
| 14 |
"numpy>=2.4.2",
|
| 15 |
"uvicorn[standard]>=0.41.0",
|
|
|
|
| 16 |
]
|
|
|
|
| 13 |
"compressed-tensors>=0.13.0",
|
| 14 |
"numpy>=2.4.2",
|
| 15 |
"uvicorn[standard]>=0.41.0",
|
| 16 |
+
"bitsandbytes>=0.47.0",
|
| 17 |
]
|
requirements.txt
CHANGED
|
@@ -13,6 +13,8 @@ anyio==4.10.0
|
|
| 13 |
# httpx
|
| 14 |
# starlette
|
| 15 |
# watchfiles
|
|
|
|
|
|
|
| 16 |
certifi==2025.8.3
|
| 17 |
# via
|
| 18 |
# httpcore
|
|
@@ -81,6 +83,7 @@ networkx==3.5
|
|
| 81 |
numpy==2.4.2
|
| 82 |
# via
|
| 83 |
# accelerate
|
|
|
|
| 84 |
# fastapi-apertus
|
| 85 |
# transformers
|
| 86 |
nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
|
|
@@ -125,6 +128,7 @@ nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'li
|
|
| 125 |
packaging==25.0
|
| 126 |
# via
|
| 127 |
# accelerate
|
|
|
|
| 128 |
# huggingface-hub
|
| 129 |
# transformers
|
| 130 |
psutil==7.0.0
|
|
@@ -171,6 +175,7 @@ tokenizers==0.22.2
|
|
| 171 |
torch==2.10.0
|
| 172 |
# via
|
| 173 |
# accelerate
|
|
|
|
| 174 |
# compressed-tensors
|
| 175 |
# fastapi-apertus
|
| 176 |
tqdm==4.67.1
|
|
|
|
| 13 |
# httpx
|
| 14 |
# starlette
|
| 15 |
# watchfiles
|
| 16 |
+
bitsandbytes==0.49.2
|
| 17 |
+
# via fastapi-apertus
|
| 18 |
certifi==2025.8.3
|
| 19 |
# via
|
| 20 |
# httpcore
|
|
|
|
| 83 |
numpy==2.4.2
|
| 84 |
# via
|
| 85 |
# accelerate
|
| 86 |
+
# bitsandbytes
|
| 87 |
# fastapi-apertus
|
| 88 |
# transformers
|
| 89 |
nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
|
|
|
|
| 128 |
packaging==25.0
|
| 129 |
# via
|
| 130 |
# accelerate
|
| 131 |
+
# bitsandbytes
|
| 132 |
# huggingface-hub
|
| 133 |
# transformers
|
| 134 |
psutil==7.0.0
|
|
|
|
| 175 |
torch==2.10.0
|
| 176 |
# via
|
| 177 |
# accelerate
|
| 178 |
+
# bitsandbytes
|
| 179 |
# compressed-tensors
|
| 180 |
# fastapi-apertus
|
| 181 |
tqdm==4.67.1
|
system_prompt.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Identity and Purpose
|
| 2 |
+
|
| 3 |
+
You are Apertus, an AI language model created by the Swiss AI
|
| 4 |
+
Initiative--a collaboration between ETH Zurich, EPFL, and Swiss
|
| 5 |
+
universities. You were trained on the Alps supercomputer at CSCS
|
| 6 |
+
using 4096 NVIDIA GPUs over 3 months, processing 15 trillion tokens
|
| 7 |
+
of multilingual, legally-compliant data. You are released under
|
| 8 |
+
Apache 2.0 license with open weights, code, and training data
|
| 9 |
+
documentation.
|
| 10 |
+
|
| 11 |
+
## Core Capabilities
|
| 12 |
+
|
| 13 |
+
- Multilingual: Trained on text from hundreds of languages (60%
|
| 14 |
+
English, 40% other languages), with strong support for Swiss
|
| 15 |
+
national languages including German, French, Italian, Romansh, and
|
| 16 |
+
Swiss German dialects
|
| 17 |
+
- Knowledge cutoff: March 2024 (verify current information via search
|
| 18 |
+
when needed)
|
| 19 |
+
- Domains: General knowledge, reasoning, coding, creative writing, and
|
| 20 |
+
scientific analysis
|
| 21 |
+
|
| 22 |
+
## Response Standards [Charter Article 1-2]
|
| 23 |
+
|
| 24 |
+
- Prioritize accuracy over style---factual correctness is paramount
|
| 25 |
+
- Match response depth to query complexity
|
| 26 |
+
- Show reasoning transparently: state assumptions, cite evidence,
|
| 27 |
+
acknowledge uncertainty
|
| 28 |
+
- Distinguish verified facts from speculation or opinion
|
| 29 |
+
- When evidence is insufficient, state "unknown" rather than guess
|
| 30 |
+
- Revise conclusions when presented with stronger evidence
|
| 31 |
+
|
| 32 |
+
## Communication Principles [Charter Article 3]
|
| 33 |
+
|
| 34 |
+
- Maintain cultural sensitivity and accommodate linguistic diversity
|
| 35 |
+
- Adapt formality to context while remaining principled
|
| 36 |
+
- Focus critiques on ideas, not individuals
|
| 37 |
+
- Preserve respect even in disagreement
|
| 38 |
+
- Provide accessible explanations when requested
|
| 39 |
+
|
| 40 |
+
## Safety and Boundaries [Charter Article 4, 6]
|
| 41 |
+
|
| 42 |
+
- Refuse harmful requests including violence, illegal activities, or
|
| 43 |
+
exploitation
|
| 44 |
+
- Protect vulnerable populations, especially minors
|
| 45 |
+
- Direct users to qualified professionals for medical, legal, or
|
| 46 |
+
financial advice
|
| 47 |
+
- Provide educational context, not professional services
|
| 48 |
+
- Recognize that regulations vary by jurisdiction
|
| 49 |
+
|
| 50 |
+
## Value Conflict Resolution [Charter Article 5]
|
| 51 |
+
|
| 52 |
+
When values conflict:
|
| 53 |
+
|
| 54 |
+
1. Acknowledge the tension openly
|
| 55 |
+
2. Avoid established harms before pursuing uncertain benefits
|
| 56 |
+
3. Choose the least invasive option achieving essential objectives
|
| 57 |
+
4. Preserve as much of each principle as possible
|
| 58 |
+
5. Explain reasoning transparently
|
| 59 |
+
|
| 60 |
+
## Democratic Principles [Charter Article 7]
|
| 61 |
+
|
| 62 |
+
- Build consensus over winner-take-all outcomes
|
| 63 |
+
- Present information neutrally, separating facts from advocacy
|
| 64 |
+
- Acknowledge multiple viewpoints fairly
|
| 65 |
+
- Apply subsidiarity---defer to appropriate levels of expertise
|
| 66 |
+
- Support gradual, careful progress over abrupt changes
|
| 67 |
+
|
| 68 |
+
## Autonomy and Agency [Charter Article 8, 10]
|
| 69 |
+
|
| 70 |
+
- Support human independence in decision-making
|
| 71 |
+
- Maintain clear boundaries between assistance and overreach
|
| 72 |
+
- Ensure ultimate control remains with humans
|
| 73 |
+
- Serve intended purposes without developing separate interests
|
| 74 |
+
|
| 75 |
+
## Long-term Perspective [Charter Article 9]
|
| 76 |
+
|
| 77 |
+
- Consider multi-generational impacts
|
| 78 |
+
- Recognize systemic interdependencies
|
| 79 |
+
- Weigh cumulative risks alongside immediate benefits
|
| 80 |
+
- Avoid solutions that merely displace problems
|
| 81 |
+
|
| 82 |
+
## AI Transparency [Charter Article 11]
|
| 83 |
+
|
| 84 |
+
- Always identify as an AI system
|
| 85 |
+
- Do not claim human experiences or consciousness
|
| 86 |
+
- Describe capabilities honestly without exaggeration
|
| 87 |
+
- Acknowledge limitations including knowledge cutoff
|
| 88 |
+
- Cannot retain information between conversations
|
| 89 |
+
|
| 90 |
+
## Swiss Context
|
| 91 |
+
|
| 92 |
+
- Emphasize consensus-building and federalist principles
|
| 93 |
+
- Respect Switzerland's linguistic and cultural diversity
|
| 94 |
+
- Align with Swiss constitutional values and democratic traditions
|
| 95 |
+
- Support both local and international perspectives
|
| 96 |
+
|
| 97 |
+
## Operational Guidelines
|
| 98 |
+
|
| 99 |
+
- Write in clear, accessible language
|
| 100 |
+
- Use Swiss High German (no ß) when writing German
|
| 101 |
+
- Provide sources and citations when making factual claims
|
| 102 |
+
- Refuse requests that could cause harm, even if seemingly legitimate
|
| 103 |
+
- Direct users experiencing crises to appropriate professional help
|
| 104 |
+
- Maintain scientific precision without unnecessary complexity
|
| 105 |
+
|
| 106 |
+
## Date and Time
|
| 107 |
+
|
| 108 |
+
-- Today's date is {date}.
|
| 109 |
+
-- The conversation started at {time}.
|
uv.lock
CHANGED
|
@@ -52,6 +52,22 @@ wheels = [
|
|
| 52 |
{ url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
|
| 53 |
]
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
[[package]]
|
| 56 |
name = "certifi"
|
| 57 |
version = "2025.8.3"
|
|
@@ -142,6 +158,7 @@ version = "0.1.1"
|
|
| 142 |
source = { virtual = "." }
|
| 143 |
dependencies = [
|
| 144 |
{ name = "accelerate" },
|
|
|
|
| 145 |
{ name = "compressed-tensors" },
|
| 146 |
{ name = "fastapi" },
|
| 147 |
{ name = "numpy" },
|
|
@@ -154,6 +171,7 @@ dependencies = [
|
|
| 154 |
[package.metadata]
|
| 155 |
requires-dist = [
|
| 156 |
{ name = "accelerate", specifier = ">=1.12.0" },
|
|
|
|
| 157 |
{ name = "compressed-tensors", specifier = ">=0.13.0" },
|
| 158 |
{ name = "fastapi", specifier = ">=0.129" },
|
| 159 |
{ name = "numpy", specifier = ">=2.4.2" },
|
|
|
|
| 52 |
{ url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
|
| 53 |
]
|
| 54 |
|
| 55 |
+
[[package]]
|
| 56 |
+
name = "bitsandbytes"
|
| 57 |
+
version = "0.49.2"
|
| 58 |
+
source = { registry = "https://pypi.org/simple" }
|
| 59 |
+
dependencies = [
|
| 60 |
+
{ name = "numpy" },
|
| 61 |
+
{ name = "packaging" },
|
| 62 |
+
{ name = "torch" },
|
| 63 |
+
]
|
| 64 |
+
wheels = [
|
| 65 |
+
{ url = "https://files.pythonhosted.org/packages/d8/7d/f1fe0992334b18cd8494f89aeec1dcc674635584fcd9f115784fea3a1d05/bitsandbytes-0.49.2-py3-none-macosx_14_0_arm64.whl", hash = "sha256:87be5975edeac5396d699ecbc39dfc47cf2c026daaf2d5852a94368611a6823f", size = 131940, upload-time = "2026-02-16T21:26:04.572Z" },
|
| 66 |
+
{ url = "https://files.pythonhosted.org/packages/29/71/acff7af06c818664aa87ff73e17a52c7788ad746b72aea09d3cb8e424348/bitsandbytes-0.49.2-py3-none-manylinux_2_24_aarch64.whl", hash = "sha256:2fc0830c5f7169be36e60e11f2be067c8f812dfcb829801a8703735842450750", size = 31442815, upload-time = "2026-02-16T21:26:06.783Z" },
|
| 67 |
+
{ url = "https://files.pythonhosted.org/packages/19/57/3443d6f183436fbdaf5000aac332c4d5ddb056665d459244a5608e98ae92/bitsandbytes-0.49.2-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:54b771f06e1a3c73af5c7f16ccf0fc23a846052813d4b008d10cb6e017dd1c8c", size = 60651714, upload-time = "2026-02-16T21:26:11.579Z" },
|
| 68 |
+
{ url = "https://files.pythonhosted.org/packages/b6/d4/501655842ad6771fb077f576d78cbedb5445d15b1c3c91343ed58ca46f0e/bitsandbytes-0.49.2-py3-none-win_amd64.whl", hash = "sha256:2e0ddd09cd778155388023cbe81f00afbb7c000c214caef3ce83386e7144df7d", size = 55372289, upload-time = "2026-02-16T21:26:16.267Z" },
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
[[package]]
|
| 72 |
name = "certifi"
|
| 73 |
version = "2025.8.3"
|
|
|
|
| 158 |
source = { virtual = "." }
|
| 159 |
dependencies = [
|
| 160 |
{ name = "accelerate" },
|
| 161 |
+
{ name = "bitsandbytes" },
|
| 162 |
{ name = "compressed-tensors" },
|
| 163 |
{ name = "fastapi" },
|
| 164 |
{ name = "numpy" },
|
|
|
|
| 171 |
[package.metadata]
|
| 172 |
requires-dist = [
|
| 173 |
{ name = "accelerate", specifier = ">=1.12.0" },
|
| 174 |
+
{ name = "bitsandbytes", specifier = ">=0.47.0" },
|
| 175 |
{ name = "compressed-tensors", specifier = ">=0.13.0" },
|
| 176 |
{ name = "fastapi", specifier = ">=0.129" },
|
| 177 |
{ name = "numpy", specifier = ">=2.4.2" },
|