AION Protocol Development Claude commited on
Commit
951b6b3
·
1 Parent(s): 1cfaffd

perf: Limit max_tokens to 64K for demo stability

Browse files

PERFORMANCE OPTIMIZATION:

Token Limits Updated:
- Claude Sonnet 4.5: 200K → 64K (demo stability)
- GPT-4o: 16K (unchanged)
- Groq models: 32K (unchanged)
- Gemini 2.0 Flash: 65K → 64K
- System prompt: Updated to reflect 64K limit

RATIONALE:
- Prevents timeout issues in HF Spaces
- Maintains quality while ensuring responsiveness
- 64K sufficient for most demo use cases
- Production Ectus-R still supports 200K

Changes:
- app.py line 125: max_tokens=64000
- app.py line 173: max_output_tokens: 64000
- SYSTEM_PROMPT: Context window updated

🤖 Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -94,7 +94,7 @@ OUTPUT FORMAT:
94
  3. Dockerfile (if deployment mentioned)
95
  4. Brief README with usage instructions
96
 
97
- Context window: 200,000 tokens output - you can generate comprehensive solutions.
98
 
99
  Be complete and thorough. Focus on quality and production-readiness."""
100
 
@@ -122,7 +122,7 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
122
  client = anthropic.Anthropic(api_key=os.getenv(config["api_key_env"]))
123
  response = client.messages.create(
124
  model=config["model"],
125
- max_tokens=200000,
126
  temperature=temperature,
127
  system=SYSTEM_PROMPT,
128
  messages=[{"role": "user", "content": prompt}]
@@ -143,7 +143,7 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
143
  {"role": "user", "content": prompt}
144
  ],
145
  temperature=temperature,
146
- max_tokens=16000 # GPT-4o max is 16K
147
  )
148
  generated_code = response.choices[0].message.content
149
  input_tokens = response.usage.prompt_tokens
@@ -158,7 +158,7 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
158
  {"role": "user", "content": prompt}
159
  ],
160
  temperature=temperature,
161
- max_tokens=32000 # Groq supports up to 32K
162
  )
163
  generated_code = response.choices[0].message.content
164
  input_tokens = response.usage.prompt_tokens
@@ -170,7 +170,7 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
170
  model = genai.GenerativeModel(config["model"])
171
  response = model.generate_content(
172
  f"{SYSTEM_PROMPT}\n\nUser request: {prompt}",
173
- generation_config={"temperature": temperature, "max_output_tokens": 65536} # Gemini 2.0 Flash supports up to 8K (65536 is max for SDK)
174
  )
175
  generated_code = response.text
176
  input_tokens = response.usage_metadata.prompt_token_count
 
94
  3. Dockerfile (if deployment mentioned)
95
  4. Brief README with usage instructions
96
 
97
+ Context window: 64,000 tokens output (demo limit) - you can generate comprehensive solutions.
98
 
99
  Be complete and thorough. Focus on quality and production-readiness."""
100
 
 
122
  client = anthropic.Anthropic(api_key=os.getenv(config["api_key_env"]))
123
  response = client.messages.create(
124
  model=config["model"],
125
+ max_tokens=64000, # Limited for demo stability
126
  temperature=temperature,
127
  system=SYSTEM_PROMPT,
128
  messages=[{"role": "user", "content": prompt}]
 
143
  {"role": "user", "content": prompt}
144
  ],
145
  temperature=temperature,
146
+ max_tokens=16000 # GPT-4o limit
147
  )
148
  generated_code = response.choices[0].message.content
149
  input_tokens = response.usage.prompt_tokens
 
158
  {"role": "user", "content": prompt}
159
  ],
160
  temperature=temperature,
161
+ max_tokens=32000 # Groq limit (kept at 32K)
162
  )
163
  generated_code = response.choices[0].message.content
164
  input_tokens = response.usage.prompt_tokens
 
170
  model = genai.GenerativeModel(config["model"])
171
  response = model.generate_content(
172
  f"{SYSTEM_PROMPT}\n\nUser request: {prompt}",
173
+ generation_config={"temperature": temperature, "max_output_tokens": 64000} # Gemini 2.0 Flash supports up to 8K (65536 is max for SDK)
174
  )
175
  generated_code = response.text
176
  input_tokens = response.usage_metadata.prompt_token_count