| import matplotlib.pyplot as plt |
| import numpy as np |
| from matplotlib.patheffects import withStroke |
|
|
| |
| |
| |
| steps = [0, 1] |
| score_easy = [0.12, 0.94] |
| score_med = [0.12, 1.00] |
| score_hard = [0.12, 0.90] |
|
|
| colors = { |
| 'easy': '#00F5FF', |
| 'medium': '#FF00E5', |
| 'hard': '#FFD700', |
| 'bg': '#0D0F14', |
| 'grid': '#1A1D23', |
| 'text': '#E0E0E0' |
| } |
|
|
| plt.style.use('dark_background') |
| fig, ax = plt.subplots(figsize=(10, 6), dpi=300) |
| fig.patch.set_facecolor(colors['bg']) |
| ax.set_facecolor(colors['bg']) |
|
|
| def plot_trajectory(x, y, label, color, marker): |
| |
| for w in range(1, 12, 2): |
| ax.plot(x, y, color=color, linewidth=w, alpha=0.03, zorder=2) |
| |
| ax.plot(x, y, marker=marker, markersize=10, linewidth=3.5, |
| label=label, color=color, zorder=5, alpha=1.0, |
| path_effects=[withStroke(linewidth=3, foreground='black')]) |
|
|
| |
| plot_trajectory(steps, score_easy, 'Task Easy (Clarification)', colors['easy'], 'o') |
| plot_trajectory(steps, score_med, 'Task Medium (New Rule)', colors['medium'], 's') |
| plot_trajectory(steps, score_hard, 'Task Hard (Evolution Trade-offs)', colors['hard'], 'D') |
|
|
| |
| ax.annotate(' Naive Proposal\n (Vague / Implicit)', xy=(0, 0.12), xytext=(-0.1, 0.3), |
| arrowprops=dict(facecolor='#FF5252', shrink=0.05, width=1, headwidth=6), |
| fontsize=11, fontweight='bold', color='#FF5252', bbox=dict(facecolor='#000', alpha=0.5)) |
|
|
| ax.annotate(' RLVR In-Context\n Adaptation', xy=(1, 0.94), xytext=(0.6, 0.5), |
| arrowprops=dict(facecolor='#00FF00', shrink=0.05, width=1, headwidth=6), |
| fontsize=11, fontweight='bold', color='#00FF00', bbox=dict(facecolor='#000', alpha=0.5)) |
|
|
| ax.set_title('PolicyEvolverEnv: Strategic Governance Optimization', fontsize=18, fontweight='black', pad=25) |
| ax.set_xlabel('Environment Interaction Phase', fontsize=12, labelpad=10) |
| ax.set_ylabel('In-Context Grader Reward (0.0 to 1.0)', fontsize=12, labelpad=10) |
|
|
| ax.set_xticks(steps) |
| ax.set_xticklabels(['Naive Baseline', 'Optimized (0.90+ Tier)']) |
| ax.set_ylim(0, 1.1) |
| ax.grid(True, linestyle='-', color=colors['grid'], alpha=0.4, zorder=1) |
|
|
| |
| for spine in ax.spines.values(): spine.set_visible(False) |
| legend = ax.legend(fontsize=11, loc='upper left', frameon=True, facecolor='#15181E', edgecolor='#2A2D35') |
| for text in legend.get_texts(): text.set_color(colors['text']) |
|
|
| |
| ax.text(0.98, 0.02, 'Llama-3.1-8b-instant | 100% Deterministic Reproducibility', |
| transform=ax.transAxes, ha='right', va='bottom', fontsize=9, alpha=0.6, color=colors['text']) |
|
|
| plt.tight_layout() |
| plt.savefig('reward_progression.png', dpi=300, facecolor=colors['bg'], bbox_inches='tight') |
| print("Updated High-Fidelity 0.9+ Chart Generated! ππ") |
|
|