AION Protocol Development commited on
Commit
827e553
Β·
1 Parent(s): f429e16

feat: Add language/framework optional inputs + context window control

Browse files

CHANGES:
- Language and Framework now optional text inputs (empty = AI decides)
- Temperature default changed to 0.5 (more deterministic)
- Added Context Window slider (1000-8000 tokens, default 4000)
- Applied to both Single Model and Multi-Model Comparison tabs
- Updated examples with new parameters

UX IMPROVEMENTS:
- Users can now leave language/framework empty for AI auto-detection
- More precise control over response length via context window
- Better default temperature (0.5 vs 0.7) for production code

Files changed (1) hide show
  1. app.py +85 -31
app.py CHANGED
@@ -247,24 +247,38 @@ def generate_code_with_model(prompt: str, model_name: str, temperature: float =
247
  "tokens_per_sec": tokens_per_sec
248
  }
249
 
250
- def single_model_generation(prompt: str, model: str, temperature: float, language: str):
251
  """Generate code with selected model"""
252
 
253
  if not prompt.strip():
254
  return "Please enter a project description."
255
 
256
- enhanced_prompt = f"Generate {language} code for the following project:\n\n{prompt}"
 
 
 
 
 
 
 
 
 
257
 
258
  result = generate_code_with_model(enhanced_prompt, model, temperature)
259
 
 
 
 
 
260
  output = f"""# Generated Code: {model}
261
 
262
  **Generation Time:** {result['elapsed_time']:.2f}s
263
- **Language:** {language}
264
  **Lines of Code:** {result['loc']}
265
  **Tokens:** {result['input_tokens']} in β†’ {result['output_tokens']} out
266
  **Speed:** {result['tokens_per_sec']:.0f} tokens/sec
267
  **Cost:** ${result['cost']:.4f}
 
268
 
269
  ---
270
 
@@ -273,18 +287,26 @@ def single_model_generation(prompt: str, model: str, temperature: float, languag
273
 
274
  return output
275
 
276
- def multi_model_comparison(prompt: str, language: str):
277
  """Compare all models on same prompt"""
278
 
279
  if not prompt.strip():
280
  return pd.DataFrame(), "Please enter a project description."
281
 
282
- enhanced_prompt = f"Generate {language} code for: {prompt}"
 
 
 
 
 
 
 
 
283
 
284
  results = []
285
 
286
  for model_name in MODEL_CONFIGS.keys():
287
- result = generate_code_with_model(enhanced_prompt, model_name, 0.7)
288
 
289
  results.append({
290
  "Model": model_name,
@@ -344,24 +366,37 @@ with gr.Blocks(
344
  value="Create a simple TODO list API with CRUD operations using REST principles."
345
  )
346
 
 
 
 
 
 
 
 
347
  with gr.Row():
348
- model_select = gr.Dropdown(
349
- choices=list(MODEL_CONFIGS.keys()),
350
- value="Claude Sonnet 4.5",
351
- label="AI Model",
352
- info="Select the model to generate code"
353
  )
354
- language_select = gr.Radio(
355
- choices=["Rust", "Python", "TypeScript", "Go", "Java"],
356
- value="Python",
357
- label="Language"
358
  )
359
 
360
- temp_slider = gr.Slider(
361
- 0.0, 1.0, 0.7,
362
- label="Temperature",
363
- info="Higher = more creative, Lower = more deterministic"
364
- )
 
 
 
 
 
 
 
365
 
366
  generate_btn = gr.Button("Generate Code", variant="primary", size="lg")
367
 
@@ -373,17 +408,17 @@ with gr.Blocks(
373
 
374
  generate_btn.click(
375
  single_model_generation,
376
- inputs=[prompt_input, model_select, temp_slider, language_select],
377
  outputs=output_single
378
  )
379
 
380
  gr.Examples(
381
  examples=[
382
- ["Create a REST API for a blog with users and posts", "Claude Sonnet 4.5", 0.7, "Rust"],
383
- ["Build a CLI tool for file encryption using AES-256", "GPT-4o", 0.5, "Python"],
384
- ["Implement a rate limiter middleware for web APIs", "Qwen2.5-72B", 0.7, "TypeScript"],
385
  ],
386
- inputs=[prompt_input, model_select, temp_slider, language_select]
387
  )
388
 
389
  with gr.Tab("⚑ Multi-Model Comparison"):
@@ -398,11 +433,30 @@ with gr.Blocks(
398
  value="Create a minimal REST API for a TODO list with create, read, update, delete operations."
399
  )
400
 
401
- language_compare = gr.Radio(
402
- choices=["Rust", "Python", "TypeScript", "Go"],
403
- value="Python",
404
- label="Language"
405
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
 
407
  compare_btn = gr.Button("Compare All Models", variant="primary", size="lg")
408
 
@@ -415,7 +469,7 @@ with gr.Blocks(
415
 
416
  compare_btn.click(
417
  multi_model_comparison,
418
- inputs=[prompt_compare, language_compare],
419
  outputs=[comparison_table, winner_msg]
420
  )
421
 
 
247
  "tokens_per_sec": tokens_per_sec
248
  }
249
 
250
+ def single_model_generation(prompt: str, model: str, temperature: float, language: str, framework: str, context_window: int):
251
  """Generate code with selected model"""
252
 
253
  if not prompt.strip():
254
  return "Please enter a project description."
255
 
256
+ # Build enhanced prompt with language/framework if specified
257
+ enhanced_prompt = prompt
258
+ if language.strip():
259
+ enhanced_prompt = f"Generate {language} code"
260
+ if framework.strip():
261
+ enhanced_prompt += f" using {framework}"
262
+ enhanced_prompt += f" for the following project:\n\n{prompt}"
263
+
264
+ # Add context window info to prompt
265
+ enhanced_prompt += f"\n\nNote: Keep response within {context_window} tokens."
266
 
267
  result = generate_code_with_model(enhanced_prompt, model, temperature)
268
 
269
+ lang_info = f"{language}" if language.strip() else "Auto-detected"
270
+ if framework.strip():
271
+ lang_info += f" + {framework}"
272
+
273
  output = f"""# Generated Code: {model}
274
 
275
  **Generation Time:** {result['elapsed_time']:.2f}s
276
+ **Language/Framework:** {lang_info}
277
  **Lines of Code:** {result['loc']}
278
  **Tokens:** {result['input_tokens']} in β†’ {result['output_tokens']} out
279
  **Speed:** {result['tokens_per_sec']:.0f} tokens/sec
280
  **Cost:** ${result['cost']:.4f}
281
+ **Context Window:** {context_window} tokens
282
 
283
  ---
284
 
 
287
 
288
  return output
289
 
290
+ def multi_model_comparison(prompt: str, language: str, framework: str, temperature: float, context_window: int):
291
  """Compare all models on same prompt"""
292
 
293
  if not prompt.strip():
294
  return pd.DataFrame(), "Please enter a project description."
295
 
296
+ # Build enhanced prompt with language/framework if specified
297
+ enhanced_prompt = prompt
298
+ if language.strip():
299
+ enhanced_prompt = f"Generate {language} code"
300
+ if framework.strip():
301
+ enhanced_prompt += f" using {framework}"
302
+ enhanced_prompt += f" for: {prompt}"
303
+
304
+ enhanced_prompt += f"\n\nNote: Keep response within {context_window} tokens."
305
 
306
  results = []
307
 
308
  for model_name in MODEL_CONFIGS.keys():
309
+ result = generate_code_with_model(enhanced_prompt, model_name, temperature)
310
 
311
  results.append({
312
  "Model": model_name,
 
366
  value="Create a simple TODO list API with CRUD operations using REST principles."
367
  )
368
 
369
+ model_select = gr.Dropdown(
370
+ choices=list(MODEL_CONFIGS.keys()),
371
+ value="Claude Sonnet 4.5 πŸ’Ž",
372
+ label="AI Model",
373
+ info="Select the model to generate code"
374
+ )
375
+
376
  with gr.Row():
377
+ language_input = gr.Textbox(
378
+ label="Language (Optional)",
379
+ placeholder="e.g., Rust, Python, TypeScript, Go, Java - Leave empty for AI to decide",
380
+ value=""
 
381
  )
382
+ framework_input = gr.Textbox(
383
+ label="Framework (Optional)",
384
+ placeholder="e.g., Axum, FastAPI, Express, Django - Leave empty for AI to decide",
385
+ value=""
386
  )
387
 
388
+ with gr.Row():
389
+ temp_slider = gr.Slider(
390
+ 0.0, 1.0, 0.5,
391
+ label="Temperature",
392
+ info="Higher = more creative, Lower = more deterministic"
393
+ )
394
+ context_slider = gr.Slider(
395
+ 1000, 8000, 4000,
396
+ step=500,
397
+ label="Context Window (tokens)",
398
+ info="Maximum tokens in response"
399
+ )
400
 
401
  generate_btn = gr.Button("Generate Code", variant="primary", size="lg")
402
 
 
408
 
409
  generate_btn.click(
410
  single_model_generation,
411
+ inputs=[prompt_input, model_select, temp_slider, language_input, framework_input, context_slider],
412
  outputs=output_single
413
  )
414
 
415
  gr.Examples(
416
  examples=[
417
+ ["Create a REST API for a blog with users and posts", "Claude Sonnet 4.5 πŸ’Ž", 0.5, "Rust", "Axum", 4000],
418
+ ["Build a CLI tool for file encryption using AES-256", "GPT-4o πŸ’Ž", 0.5, "Python", "Click", 3000],
419
+ ["Implement a rate limiter middleware for web APIs", "Llama 3.3 70B (Groq) πŸš€", 0.5, "TypeScript", "Express", 4000],
420
  ],
421
+ inputs=[prompt_input, model_select, temp_slider, language_input, framework_input, context_slider]
422
  )
423
 
424
  with gr.Tab("⚑ Multi-Model Comparison"):
 
433
  value="Create a minimal REST API for a TODO list with create, read, update, delete operations."
434
  )
435
 
436
+ with gr.Row():
437
+ language_compare = gr.Textbox(
438
+ label="Language (Optional)",
439
+ placeholder="e.g., Python, Rust, TypeScript - Leave empty for AI to decide",
440
+ value=""
441
+ )
442
+ framework_compare = gr.Textbox(
443
+ label="Framework (Optional)",
444
+ placeholder="e.g., FastAPI, Axum, Express - Leave empty for AI to decide",
445
+ value=""
446
+ )
447
+
448
+ with gr.Row():
449
+ temp_compare = gr.Slider(
450
+ 0.0, 1.0, 0.5,
451
+ label="Temperature",
452
+ info="Higher = more creative, Lower = more deterministic"
453
+ )
454
+ context_compare = gr.Slider(
455
+ 1000, 8000, 4000,
456
+ step=500,
457
+ label="Context Window (tokens)",
458
+ info="Maximum tokens in response"
459
+ )
460
 
461
  compare_btn = gr.Button("Compare All Models", variant="primary", size="lg")
462
 
 
469
 
470
  compare_btn.click(
471
  multi_model_comparison,
472
+ inputs=[prompt_compare, language_compare, framework_compare, temp_compare, context_compare],
473
  outputs=[comparison_table, winner_msg]
474
  )
475