Commit
Β·
10ece01
1
Parent(s):
7e7e3a1
Add input price and fix uncached input calculation
Browse files- Add Input price field ($/1M) loaded from litellm
- Calculate uncached_input = prompt - cache_read - cache_creation per instance
- Fix bug: sum uncached per-instance instead of from totals
- Add Uncached Input to all token/cost charts
- Show β
/β indicators for all 4 price fields
app.py
CHANGED
|
@@ -250,7 +250,7 @@ def load_all_trajectories(folder: str) -> pd.DataFrame:
|
|
| 250 |
return df
|
| 251 |
|
| 252 |
|
| 253 |
-
def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_creation_price: float, completion_price: float):
|
| 254 |
if df.empty:
|
| 255 |
return None, None, None, None, None
|
| 256 |
|
|
@@ -294,14 +294,17 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 294 |
font=dict(size=12),
|
| 295 |
)
|
| 296 |
|
| 297 |
-
total_prompt = df["prompt_tokens"].sum()
|
| 298 |
total_completion = df["completion_tokens"].sum()
|
| 299 |
total_cache_read = df["cache_read_tokens"].sum()
|
| 300 |
total_cache_creation = df["cache_creation_tokens"].sum()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
|
| 302 |
token_data = pd.DataFrame({
|
| 303 |
-
"Token Type": ["
|
| 304 |
-
"Total Tokens": [
|
| 305 |
})
|
| 306 |
|
| 307 |
fig_tokens = px.bar(
|
|
@@ -310,7 +313,7 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 310 |
y="Total Tokens",
|
| 311 |
title="Total Tokens by Type",
|
| 312 |
color="Token Type",
|
| 313 |
-
color_discrete_sequence=["#EF553B", "#
|
| 314 |
)
|
| 315 |
fig_tokens.update_layout(
|
| 316 |
xaxis_title="Token Type",
|
|
@@ -327,14 +330,15 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 327 |
font=dict(size=12),
|
| 328 |
)
|
| 329 |
|
| 330 |
-
# Cost by token type
|
| 331 |
-
|
| 332 |
cost_cache_read = total_cache_read * cache_read_price / 1e6
|
| 333 |
cost_cache_creation = total_cache_creation * cache_creation_price / 1e6
|
|
|
|
| 334 |
|
| 335 |
cost_data = pd.DataFrame({
|
| 336 |
-
"Token Type": ["
|
| 337 |
-
"Cost ($)": [
|
| 338 |
})
|
| 339 |
|
| 340 |
fig_tokens_cost = px.bar(
|
|
@@ -343,7 +347,7 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 343 |
y="Cost ($)",
|
| 344 |
title="Total Cost by Token Type ($)",
|
| 345 |
color="Token Type",
|
| 346 |
-
color_discrete_sequence=["#
|
| 347 |
)
|
| 348 |
fig_tokens_cost.update_layout(
|
| 349 |
xaxis_title="Token Type",
|
|
@@ -352,7 +356,7 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 352 |
margin=dict(l=40, r=20, t=40, b=40),
|
| 353 |
)
|
| 354 |
|
| 355 |
-
total_cost =
|
| 356 |
fig_tokens_cost.add_annotation(
|
| 357 |
text=f"Total: ${total_cost:.2f}",
|
| 358 |
xref="paper", yref="paper",
|
|
@@ -362,9 +366,19 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 362 |
|
| 363 |
df_sorted = df.sort_values("cache_read_tokens", ascending=False).reset_index(drop=True)
|
| 364 |
df_sorted["instance_idx"] = range(len(df_sorted))
|
|
|
|
|
|
|
| 365 |
|
| 366 |
fig_stacked = go.Figure()
|
| 367 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
fig_stacked.add_trace(go.Bar(
|
| 369 |
name="Cache Read",
|
| 370 |
x=df_sorted["instance_idx"],
|
|
@@ -401,19 +415,31 @@ def create_basic_histograms(df: pd.DataFrame, cache_read_price: float, cache_cre
|
|
| 401 |
return fig_steps, fig_cost, fig_tokens, fig_tokens_cost, fig_stacked
|
| 402 |
|
| 403 |
|
| 404 |
-
def create_cost_breakdown(df: pd.DataFrame, cache_read_price: float, cache_creation_price: float, completion_price: float):
|
| 405 |
if df.empty:
|
| 406 |
return None
|
| 407 |
|
| 408 |
df_sorted = df.sort_values("cache_read_tokens", ascending=False).reset_index(drop=True)
|
| 409 |
df_sorted["instance_idx"] = range(len(df_sorted))
|
| 410 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
df_sorted["cost_cache_read"] = df_sorted["cache_read_tokens"] * cache_read_price / 1e6
|
| 412 |
df_sorted["cost_cache_creation"] = df_sorted["cache_creation_tokens"] * cache_creation_price / 1e6
|
| 413 |
df_sorted["cost_completion"] = df_sorted["completion_tokens"] * completion_price / 1e6
|
| 414 |
|
| 415 |
fig = go.Figure()
|
| 416 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
fig.add_trace(go.Bar(
|
| 418 |
name=f"Cache Read (${cache_read_price:.2f}/1M)",
|
| 419 |
x=df_sorted["instance_idx"],
|
|
@@ -439,6 +465,7 @@ def create_cost_breakdown(df: pd.DataFrame, cache_read_price: float, cache_creat
|
|
| 439 |
))
|
| 440 |
|
| 441 |
total_cost = (
|
|
|
|
| 442 |
df_sorted["cost_cache_read"].sum() +
|
| 443 |
df_sorted["cost_cache_creation"].sum() +
|
| 444 |
df_sorted["cost_completion"].sum()
|
|
@@ -474,20 +501,21 @@ def extract_model_from_folder(folder: str) -> str:
|
|
| 474 |
return folder
|
| 475 |
|
| 476 |
|
| 477 |
-
def get_prices_for_folder(folder: str) -> tuple[float, float, float, str]:
|
| 478 |
-
"""Get prices from litellm based on folder name. Returns (cache_read, cache_creation, completion, model_name)"""
|
| 479 |
model_hint = extract_model_from_folder(folder)
|
| 480 |
if not model_hint:
|
| 481 |
-
return 0, 0, 0, ""
|
| 482 |
|
| 483 |
prices = get_model_prices(model_hint)
|
| 484 |
if prices:
|
|
|
|
| 485 |
cache_read = prices.get("cache_read_input_token_cost", 0) * 1e6
|
| 486 |
cache_creation = prices.get("cache_creation_input_token_cost", 0) * 1e6
|
| 487 |
completion = prices.get("output_cost_per_token", 0) * 1e6
|
| 488 |
-
return cache_read, cache_creation, completion, model_hint
|
| 489 |
|
| 490 |
-
return 0, 0, 0, model_hint
|
| 491 |
|
| 492 |
|
| 493 |
def on_row_select(evt: gr.SelectData, df: pd.DataFrame):
|
|
@@ -496,6 +524,7 @@ def on_row_select(evt: gr.SelectData, df: pd.DataFrame):
|
|
| 496 |
"", "",
|
| 497 |
gr.update(interactive=False),
|
| 498 |
gr.update(visible=False),
|
|
|
|
| 499 |
gr.update(value=0, label="π² Cache Read"),
|
| 500 |
gr.update(value=0, label="π² Cache Creation"),
|
| 501 |
gr.update(value=0, label="π² Completion"),
|
|
@@ -509,7 +538,7 @@ def on_row_select(evt: gr.SelectData, df: pd.DataFrame):
|
|
| 509 |
|
| 510 |
show_analyze = check_trajectories_downloaded(folder)
|
| 511 |
|
| 512 |
-
cache_read, cache_creation, completion, model_hint = get_prices_for_folder(folder)
|
| 513 |
|
| 514 |
def price_update(value, name):
|
| 515 |
if value > 0:
|
|
@@ -521,6 +550,7 @@ def on_row_select(evt: gr.SelectData, df: pd.DataFrame):
|
|
| 521 |
folder, name,
|
| 522 |
gr.update(interactive=True),
|
| 523 |
gr.update(visible=show_analyze),
|
|
|
|
| 524 |
price_update(cache_read, "Cache Read"),
|
| 525 |
price_update(cache_creation, "Cache Creation"),
|
| 526 |
price_update(completion, "Completion"),
|
|
@@ -576,6 +606,7 @@ def build_app():
|
|
| 576 |
gr.Markdown("---")
|
| 577 |
gr.Markdown("### π° Token Prices ($/1M) Β· *[litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)*")
|
| 578 |
detected_model = gr.Textbox(label="Detected Model", interactive=False)
|
|
|
|
| 579 |
price_cache_read = gr.Number(label="π² Cache Read", value=0, precision=2)
|
| 580 |
price_cache_creation = gr.Number(label="π² Cache Creation", value=0, precision=2)
|
| 581 |
price_completion = gr.Number(label="π² Completion", value=0, precision=2)
|
|
@@ -583,7 +614,7 @@ def build_app():
|
|
| 583 |
leaderboard_table.select(
|
| 584 |
fn=on_row_select,
|
| 585 |
inputs=[leaderboard_table],
|
| 586 |
-
outputs=[selected_folder, selected_name, download_btn, analyze_btn, price_cache_read, price_cache_creation, price_completion, detected_model],
|
| 587 |
)
|
| 588 |
|
| 589 |
download_btn.click(
|
|
@@ -592,7 +623,7 @@ def build_app():
|
|
| 592 |
outputs=[download_status, analyze_btn],
|
| 593 |
)
|
| 594 |
|
| 595 |
-
def load_and_analyze(folder, cache_read_price, cache_creation_price, completion_price):
|
| 596 |
empty_result = (
|
| 597 |
gr.update(visible=False),
|
| 598 |
None, None, None, None, None, None,
|
|
@@ -613,9 +644,9 @@ def build_app():
|
|
| 613 |
return
|
| 614 |
|
| 615 |
fig_steps, fig_cost, fig_tokens, fig_tokens_cost, fig_stacked = create_basic_histograms(
|
| 616 |
-
df, cache_read_price, cache_creation_price, completion_price
|
| 617 |
)
|
| 618 |
-
fig_cost_breakdown = create_cost_breakdown(df, cache_read_price, cache_creation_price, completion_price)
|
| 619 |
|
| 620 |
yield (
|
| 621 |
gr.update(visible=True),
|
|
@@ -624,7 +655,7 @@ def build_app():
|
|
| 624 |
|
| 625 |
analyze_btn.click(
|
| 626 |
fn=load_and_analyze,
|
| 627 |
-
inputs=[selected_folder, price_cache_read, price_cache_creation, price_completion],
|
| 628 |
outputs=[
|
| 629 |
analysis_section,
|
| 630 |
plot_steps, plot_cost, plot_tokens, plot_tokens_cost, plot_stacked, plot_cost_breakdown,
|
|
|
|
| 250 |
return df
|
| 251 |
|
| 252 |
|
| 253 |
+
def create_basic_histograms(df: pd.DataFrame, input_price: float, cache_read_price: float, cache_creation_price: float, completion_price: float):
|
| 254 |
if df.empty:
|
| 255 |
return None, None, None, None, None
|
| 256 |
|
|
|
|
| 294 |
font=dict(size=12),
|
| 295 |
)
|
| 296 |
|
|
|
|
| 297 |
total_completion = df["completion_tokens"].sum()
|
| 298 |
total_cache_read = df["cache_read_tokens"].sum()
|
| 299 |
total_cache_creation = df["cache_creation_tokens"].sum()
|
| 300 |
+
# Uncached input = prompt - cache_read - cache_creation (per instance, then sum)
|
| 301 |
+
df_temp = df.copy()
|
| 302 |
+
df_temp["uncached_input"] = (df_temp["prompt_tokens"] - df_temp["cache_read_tokens"] - df_temp["cache_creation_tokens"]).clip(lower=0)
|
| 303 |
+
total_uncached_input = df_temp["uncached_input"].sum()
|
| 304 |
|
| 305 |
token_data = pd.DataFrame({
|
| 306 |
+
"Token Type": ["Uncached Input", "Cache Read", "Cache Creation", "Completion"],
|
| 307 |
+
"Total Tokens": [total_uncached_input, total_cache_read, total_cache_creation, total_completion],
|
| 308 |
})
|
| 309 |
|
| 310 |
fig_tokens = px.bar(
|
|
|
|
| 313 |
y="Total Tokens",
|
| 314 |
title="Total Tokens by Type",
|
| 315 |
color="Token Type",
|
| 316 |
+
color_discrete_sequence=["#EF553B", "#19D3F3", "#FFA15A", "#AB63FA"],
|
| 317 |
)
|
| 318 |
fig_tokens.update_layout(
|
| 319 |
xaxis_title="Token Type",
|
|
|
|
| 330 |
font=dict(size=12),
|
| 331 |
)
|
| 332 |
|
| 333 |
+
# Cost by token type
|
| 334 |
+
cost_uncached_input = total_uncached_input * input_price / 1e6
|
| 335 |
cost_cache_read = total_cache_read * cache_read_price / 1e6
|
| 336 |
cost_cache_creation = total_cache_creation * cache_creation_price / 1e6
|
| 337 |
+
cost_completion = total_completion * completion_price / 1e6
|
| 338 |
|
| 339 |
cost_data = pd.DataFrame({
|
| 340 |
+
"Token Type": ["Uncached Input", "Cache Read", "Cache Creation", "Completion"],
|
| 341 |
+
"Cost ($)": [cost_uncached_input, cost_cache_read, cost_cache_creation, cost_completion],
|
| 342 |
})
|
| 343 |
|
| 344 |
fig_tokens_cost = px.bar(
|
|
|
|
| 347 |
y="Cost ($)",
|
| 348 |
title="Total Cost by Token Type ($)",
|
| 349 |
color="Token Type",
|
| 350 |
+
color_discrete_sequence=["#EF553B", "#19D3F3", "#FFA15A", "#AB63FA"],
|
| 351 |
)
|
| 352 |
fig_tokens_cost.update_layout(
|
| 353 |
xaxis_title="Token Type",
|
|
|
|
| 356 |
margin=dict(l=40, r=20, t=40, b=40),
|
| 357 |
)
|
| 358 |
|
| 359 |
+
total_cost = cost_uncached_input + cost_cache_read + cost_cache_creation + cost_completion
|
| 360 |
fig_tokens_cost.add_annotation(
|
| 361 |
text=f"Total: ${total_cost:.2f}",
|
| 362 |
xref="paper", yref="paper",
|
|
|
|
| 366 |
|
| 367 |
df_sorted = df.sort_values("cache_read_tokens", ascending=False).reset_index(drop=True)
|
| 368 |
df_sorted["instance_idx"] = range(len(df_sorted))
|
| 369 |
+
# Uncached input = prompt - cache_read - cache_creation
|
| 370 |
+
df_sorted["uncached_input_tokens"] = (df_sorted["prompt_tokens"] - df_sorted["cache_read_tokens"] - df_sorted["cache_creation_tokens"]).clip(lower=0)
|
| 371 |
|
| 372 |
fig_stacked = go.Figure()
|
| 373 |
|
| 374 |
+
fig_stacked.add_trace(go.Bar(
|
| 375 |
+
name="Uncached Input",
|
| 376 |
+
x=df_sorted["instance_idx"],
|
| 377 |
+
y=df_sorted["uncached_input_tokens"],
|
| 378 |
+
marker_color="#EF553B",
|
| 379 |
+
hovertemplate="Instance: %{x}<br>Uncached Input: %{y:,.0f}<extra></extra>",
|
| 380 |
+
))
|
| 381 |
+
|
| 382 |
fig_stacked.add_trace(go.Bar(
|
| 383 |
name="Cache Read",
|
| 384 |
x=df_sorted["instance_idx"],
|
|
|
|
| 415 |
return fig_steps, fig_cost, fig_tokens, fig_tokens_cost, fig_stacked
|
| 416 |
|
| 417 |
|
| 418 |
+
def create_cost_breakdown(df: pd.DataFrame, input_price: float, cache_read_price: float, cache_creation_price: float, completion_price: float):
|
| 419 |
if df.empty:
|
| 420 |
return None
|
| 421 |
|
| 422 |
df_sorted = df.sort_values("cache_read_tokens", ascending=False).reset_index(drop=True)
|
| 423 |
df_sorted["instance_idx"] = range(len(df_sorted))
|
| 424 |
|
| 425 |
+
# Uncached input = prompt - cache_read - cache_creation
|
| 426 |
+
df_sorted["uncached_input_tokens"] = (df_sorted["prompt_tokens"] - df_sorted["cache_read_tokens"] - df_sorted["cache_creation_tokens"]).clip(lower=0)
|
| 427 |
+
|
| 428 |
+
df_sorted["cost_uncached_input"] = df_sorted["uncached_input_tokens"] * input_price / 1e6
|
| 429 |
df_sorted["cost_cache_read"] = df_sorted["cache_read_tokens"] * cache_read_price / 1e6
|
| 430 |
df_sorted["cost_cache_creation"] = df_sorted["cache_creation_tokens"] * cache_creation_price / 1e6
|
| 431 |
df_sorted["cost_completion"] = df_sorted["completion_tokens"] * completion_price / 1e6
|
| 432 |
|
| 433 |
fig = go.Figure()
|
| 434 |
|
| 435 |
+
fig.add_trace(go.Bar(
|
| 436 |
+
name=f"Uncached Input (${input_price:.2f}/1M)",
|
| 437 |
+
x=df_sorted["instance_idx"],
|
| 438 |
+
y=df_sorted["cost_uncached_input"],
|
| 439 |
+
marker_color="#EF553B",
|
| 440 |
+
hovertemplate="Instance: %{x}<br>Cost: $%{y:.4f}<extra></extra>",
|
| 441 |
+
))
|
| 442 |
+
|
| 443 |
fig.add_trace(go.Bar(
|
| 444 |
name=f"Cache Read (${cache_read_price:.2f}/1M)",
|
| 445 |
x=df_sorted["instance_idx"],
|
|
|
|
| 465 |
))
|
| 466 |
|
| 467 |
total_cost = (
|
| 468 |
+
df_sorted["cost_uncached_input"].sum() +
|
| 469 |
df_sorted["cost_cache_read"].sum() +
|
| 470 |
df_sorted["cost_cache_creation"].sum() +
|
| 471 |
df_sorted["cost_completion"].sum()
|
|
|
|
| 501 |
return folder
|
| 502 |
|
| 503 |
|
| 504 |
+
def get_prices_for_folder(folder: str) -> tuple[float, float, float, float, str]:
|
| 505 |
+
"""Get prices from litellm based on folder name. Returns (input, cache_read, cache_creation, completion, model_name)"""
|
| 506 |
model_hint = extract_model_from_folder(folder)
|
| 507 |
if not model_hint:
|
| 508 |
+
return 0, 0, 0, 0, ""
|
| 509 |
|
| 510 |
prices = get_model_prices(model_hint)
|
| 511 |
if prices:
|
| 512 |
+
input_price = prices.get("input_cost_per_token", 0) * 1e6
|
| 513 |
cache_read = prices.get("cache_read_input_token_cost", 0) * 1e6
|
| 514 |
cache_creation = prices.get("cache_creation_input_token_cost", 0) * 1e6
|
| 515 |
completion = prices.get("output_cost_per_token", 0) * 1e6
|
| 516 |
+
return input_price, cache_read, cache_creation, completion, model_hint
|
| 517 |
|
| 518 |
+
return 0, 0, 0, 0, model_hint
|
| 519 |
|
| 520 |
|
| 521 |
def on_row_select(evt: gr.SelectData, df: pd.DataFrame):
|
|
|
|
| 524 |
"", "",
|
| 525 |
gr.update(interactive=False),
|
| 526 |
gr.update(visible=False),
|
| 527 |
+
gr.update(value=0, label="π² Input"),
|
| 528 |
gr.update(value=0, label="π² Cache Read"),
|
| 529 |
gr.update(value=0, label="π² Cache Creation"),
|
| 530 |
gr.update(value=0, label="π² Completion"),
|
|
|
|
| 538 |
|
| 539 |
show_analyze = check_trajectories_downloaded(folder)
|
| 540 |
|
| 541 |
+
input_price, cache_read, cache_creation, completion, model_hint = get_prices_for_folder(folder)
|
| 542 |
|
| 543 |
def price_update(value, name):
|
| 544 |
if value > 0:
|
|
|
|
| 550 |
folder, name,
|
| 551 |
gr.update(interactive=True),
|
| 552 |
gr.update(visible=show_analyze),
|
| 553 |
+
price_update(input_price, "Input"),
|
| 554 |
price_update(cache_read, "Cache Read"),
|
| 555 |
price_update(cache_creation, "Cache Creation"),
|
| 556 |
price_update(completion, "Completion"),
|
|
|
|
| 606 |
gr.Markdown("---")
|
| 607 |
gr.Markdown("### π° Token Prices ($/1M) Β· *[litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json)*")
|
| 608 |
detected_model = gr.Textbox(label="Detected Model", interactive=False)
|
| 609 |
+
price_input = gr.Number(label="π² Input", value=0, precision=2)
|
| 610 |
price_cache_read = gr.Number(label="π² Cache Read", value=0, precision=2)
|
| 611 |
price_cache_creation = gr.Number(label="π² Cache Creation", value=0, precision=2)
|
| 612 |
price_completion = gr.Number(label="π² Completion", value=0, precision=2)
|
|
|
|
| 614 |
leaderboard_table.select(
|
| 615 |
fn=on_row_select,
|
| 616 |
inputs=[leaderboard_table],
|
| 617 |
+
outputs=[selected_folder, selected_name, download_btn, analyze_btn, price_input, price_cache_read, price_cache_creation, price_completion, detected_model],
|
| 618 |
)
|
| 619 |
|
| 620 |
download_btn.click(
|
|
|
|
| 623 |
outputs=[download_status, analyze_btn],
|
| 624 |
)
|
| 625 |
|
| 626 |
+
def load_and_analyze(folder, input_price, cache_read_price, cache_creation_price, completion_price):
|
| 627 |
empty_result = (
|
| 628 |
gr.update(visible=False),
|
| 629 |
None, None, None, None, None, None,
|
|
|
|
| 644 |
return
|
| 645 |
|
| 646 |
fig_steps, fig_cost, fig_tokens, fig_tokens_cost, fig_stacked = create_basic_histograms(
|
| 647 |
+
df, input_price, cache_read_price, cache_creation_price, completion_price
|
| 648 |
)
|
| 649 |
+
fig_cost_breakdown = create_cost_breakdown(df, input_price, cache_read_price, cache_creation_price, completion_price)
|
| 650 |
|
| 651 |
yield (
|
| 652 |
gr.update(visible=True),
|
|
|
|
| 655 |
|
| 656 |
analyze_btn.click(
|
| 657 |
fn=load_and_analyze,
|
| 658 |
+
inputs=[selected_folder, price_input, price_cache_read, price_cache_creation, price_completion],
|
| 659 |
outputs=[
|
| 660 |
analysis_section,
|
| 661 |
plot_steps, plot_cost, plot_tokens, plot_tokens_cost, plot_stacked, plot_cost_breakdown,
|