File size: 87,468 Bytes
f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 524931b b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 b196d69 f022f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=0.67">
<title>RAGADAST // NEON RAG CONFIGURATOR v1.0</title>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Audiowide&family=Orbitron:wght@400;700&family=Roboto+Mono:wght@400;700&display=swap" rel="stylesheet">
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<style>
/* Reset some default styles */
body, h1, h2, p, ul, li, label, button, select {
margin: 0;
padding: 0;
box-sizing: border-box; /* Easier layout */
}
/* --- SYNTHWAVE CORE STYLES --- */
:root {
--neon-pink: #ff00ff;
--neon-cyan: #00ffff;
--neon-orange: #ff8c00;
--dark-bg-start: #1a0033; /* Deep purple */
--dark-bg-end: #0d001a; /* Darker purple/black */
--light-text: #e0e0e0;
--code-text: #00ff8a; /* Neon green for code */
--glow-pink: 0 0 5px var(--neon-pink), 0 0 10px var(--neon-pink), 0 0 15px rgba(255, 0, 255, 0.5);
--glow-cyan: 0 0 5px var(--neon-cyan), 0 0 10px var(--neon-cyan), 0 0 15px rgba(0, 255, 255, 0.5);
}
body {
font-family: 'Orbitron', sans-serif;
background: linear-gradient(to bottom, var(--dark-bg-start), var(--dark-bg-end));
color: var(--light-text);
line-height: 1.6;
padding: 20px;
min-height: 100vh;
}
/* Header styles */
.header {
display: flex;
justify-content: center;
align-items: center;
margin-bottom: 30px;
gap: 30px; /* Space between elements */
}
.header img {
max-width: 150px;
height: auto;
border: 1px solid var(--neon-cyan); /* Subtle border */
box-shadow: var(--glow-cyan);
}
h1 {
font-family: 'Audiowide', cursive;
text-align: center;
color: var(--neon-pink);
letter-spacing: 4px;
font-size: 3.5em; /* Larger title */
text-shadow: var(--glow-pink);
margin: 0 20px; /* Give title space */
}
h2 {
font-family: 'Orbitron', sans-serif;
text-align: center;
color: var(--neon-cyan);
margin-bottom: 20px;
font-size: 1.8em;
text-shadow: var(--glow-cyan);
letter-spacing: 2px;
}
/* Container styles */
.container {
display: flex;
flex-direction: column;
align-items: center;
max-width: 2800px;
margin: 0 auto;
padding: 20px;
}
/* Image Map & Overlays */
.image-container {
position: relative;
display: inline-block;
margin-bottom: 30px;
border: 2px solid var(--neon-pink);
box-shadow: var(--glow-pink);
}
.image-container img {
display: block; /* remove bottom space */
}
area {
outline: none;
cursor: pointer;
/* Add a subtle effect for discovery if needed */
/* transition: all 0.2s ease-in-out; */
}
/* CSS for highlighting on hover */
.area-highlight-hover {
/* Using filter might be better than outline for complex shapes */
/* filter: drop-shadow(0 0 5px var(--neon-cyan)); */
/* Or keep outline but make it neon */
outline: 3px dotted var(--neon-cyan);
outline-offset: -3px;
}
/* Style for the selected overlay */
.area-selected-overlay {
position: absolute;
background-color: rgba(255, 0, 255, 0.3); /* Neon Pink transparent overlay */
border: 1px solid var(--neon-pink);
pointer-events: none;
box-shadow: inset 0 0 10px rgba(255, 0, 255, 0.5);
}
/* Config/RAG container style */
#config-container, #rag-container, .plot-section-container /* Added class for plot container */ {
background-color: rgba(13, 0, 26, 0.6); /* Dark transparent background */
margin-top: 20px;
padding: 20px;
border: 1px solid var(--neon-cyan);
border-radius: 8px;
box-shadow: 0 0 15px rgba(0, 255, 255, 0.3); /* Cyan glow */
width: 100%;
}
#rag-container {
display: flex;
flex-direction: column;
gap: 15px;
}
/* Response Area */
#response-container {
margin-top: 15px;
padding: 15px;
background-color: rgba(0, 0, 0, 0.5); /* Darker transparent */
border-radius: 4px;
border: 1px solid var(--neon-orange);
box-shadow: inset 0 0 8px rgba(255, 140, 0, 0.4);
}
#response-container p { /* Label for the response area */
margin-bottom: 10px;
font-weight: bold;
font-size: 1.5em; /* Adjusted size */
color: var(--neon-orange);
text-shadow: 0 0 5px var(--neon-orange);
letter-spacing: 1px;
}
#rag-output {
font-family: 'Roboto Mono', monospace;
font-size: 1.1em; /* Adjusted size */
color: var(--code-text); /* Neon green for output */
white-space: pre-wrap; /* Wrap text but preserve line breaks */
word-break: break-word;
line-height: 1.5;
min-height: 100px; /* Ensure minimum height */
}
/* Form Elements */
label {
display: block;
margin-bottom: 8px;
color: var(--neon-cyan);
font-size: 1.3em; /* Adjusted size */
font-weight: bold;
text-shadow: var(--glow-cyan);
}
select {
padding: 12px 15px; /* More padding */
border: 1px solid var(--neon-pink);
border-radius: 4px;
margin-bottom: 15px;
width: 100%;
background-color: rgba(13, 0, 26, 0.8); /* Dark background */
color: var(--light-text);
font-family: 'Orbitron', sans-serif;
font-size: 1.2em; /* Adjusted size */
cursor: pointer;
appearance: none; /* Remove default arrow */
background-image: url('data:image/svg+xml;utf8,<svg fill="%23ff00ff" height="24" viewBox="0 0 24 24" width="24" xmlns="http://www.w3.org/2000/svg"><path d="M7 10l5 5 5-5z"/><path d="M0 0h24v24H0z" fill="none"/></svg>'); /* Custom arrow */
background-repeat: no-repeat;
background-position: right 10px center;
background-size: 24px;
}
select:focus {
outline: none;
box-shadow: var(--glow-pink);
}
button {
background-color: var(--neon-pink);
color: var(--dark-bg-end); /* Dark text on bright button */
border: none;
padding: 12px 25px; /* Bigger button */
border-radius: 4px;
cursor: pointer;
transition: all 0.3s ease;
align-self: flex-start;
margin-bottom: 10px;
font-family: 'Orbitron', sans-serif;
font-size: 1.4em; /* Adjusted size */
font-weight: bold;
letter-spacing: 1px;
text-transform: uppercase; /* UPPERCASE */
box-shadow: var(--glow-pink);
}
button:hover, button:focus {
background-color: var(--light-text); /* Light bg on hover */
color: var(--neon-pink);
box-shadow: 0 0 10px var(--neon-pink), 0 0 20px var(--neon-pink), 0 0 30px rgba(255,0,255,0.7);
outline: none;
}
/* Config Output & General Text */
#config-container p, .plot-section-container p {
margin-bottom: 15px;
color: var(--light-text);
font-size: 1.2em; /* Adjusted size */
line-height: 1.7;
}
#config-container p b, .plot-section-container p b {
color: var(--neon-cyan);
font-weight: bold;
}
#config-output {
font-family: 'Roboto Mono', monospace;
font-size: 1.1em; /* Adjusted size */
color: var(--neon-cyan); /* Use cyan for config */
white-space: pre-wrap;
word-break: break-word;
background-color: rgba(0,0,0,0.3);
padding: 10px;
border-radius: 4px;
border: 1px dashed var(--neon-cyan);
min-height: 50px;
}
a {
color: var(--neon-orange);
text-decoration: none;
transition: color 0.2s ease, text-shadow 0.2s ease;
}
a:hover {
color: var(--neon-pink);
text-shadow: var(--glow-pink);
}
/* Specific Image Styling */
.explanation-image {
width: 100%;
max-width: 800px; /* Limit max width */
height: auto;
object-fit: contain;
display: block;
margin: 15px auto; /* Center images */
border: 1px solid var(--neon-orange);
box-shadow: 0 0 10px rgba(255, 140, 0, 0.4);
}
.full-width-image {
width: 100%;
height: auto;
object-fit: contain;
display: block;
margin: 20px 0; /* Add some vertical space */
border: 1px solid var(--neon-cyan);
box-shadow: 0 0 10px rgba(0, 255, 255, 0.3);
}
/* Plotly specific container adjustments */
.plotly-container { /* Renamed from .container to avoid conflict */
display: flex;
flex-wrap: wrap; /* Allow wrapping on smaller screens */
justify-content: center; /* Center plots */
gap: 20px; /* Space between plots */
width: 100%;
}
.plot-container {
flex: 1 1 45%; /* Allow plots to grow/shrink, aiming for 2 per row */
min-width: 400px; /* Minimum width before wrapping */
padding: 10px;
border: 1px solid var(--neon-pink);
background-color: rgba(255, 0, 255, 0.05); /* Faint pink background */
border-radius: 5px;
box-shadow: inset 0 0 10px rgba(255, 0, 255, 0.2);
}
/* Ensure Plotly graph divs take up space */
#embedding-plot-ann, #embedding-plot-mmr {
width: 100%;
min-height: 450px; /* Ensure plots have height */
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<img src="ragadast-icon.png" alt="RAGADAST Icon">
<h1> RAGADAST // NEON GRID </h1>
<img src="googlenext25.png" alt="Google Next Icon">
</div>
<h2>Configure RAG Parameters // Engage the Data Stream</h2>
<div class="full-width-image">
<img src="click-here.png" alt="Click Component Below" class="full-width-image">
</div>
<div class="image-container">
<img src="ragwizard.png" usemap="#image-map" alt="Configuration Grid">
<map name="image-map">
<!-- Coordinates remain the same -->
<area target="" alt="small" title="small" href="#" coords="492,42,917,259" shape="rect" data-column="chunks">
<area target="" alt="medium" title="medium" href="#" coords="487,270,917,497" shape="rect" data-column="chunks">
<area target="" alt="big" title="big" href="#" coords="487,502,917,722" shape="rect" data-column="chunks">
<area target="" alt="vai gecko" title="vai gecko" href="#" coords="960,38,1372,215" shape="rect" data-column="embedding">
<area target="" alt="mpnet" title="mpnet" href="#" coords="965,225,1369,430" shape="rect" data-column="embedding">
<area target="" alt="w2vec" title="w2vec" href="#" coords="965,465,1369,697" shape="rect" data-column="embedding">
<area target="" alt="vaisearch" title="vaisearch" href="#" coords="1422,49,1842,291" shape="rect" data-column="Vector DB">
<area target="" alt="qdrant-milvius-gke" title="qdrant-milvius-gke" href="#" coords="1407,330,1842,525" shape="rect" data-column="Vector DB">
<area target="" alt="sqlite-faiss" title="sqlite-faiss" href="#" coords="1407,567,1842,692" shape="rect" data-column="Vector DB">
<area target="" alt="ann" title="ann" href="#" coords="1886,26,2299,250" shape="rect" data-column="Retreival">
<area target="" alt="mmr" title="mmr" href="#" coords="1884,260,2301,485" shape="rect" data-column="Retreival">
<area target="" alt="smalltobig" title="smalltobig" href="#" coords="1884,492,2301,694" shape="rect" data-column="Retreival">
<area target="" alt="gemini" title="gemini" href="#" coords="2334,37,2766,112" shape="rect" data-column="Model">
<area target="" alt="gemma tpu" title="gemma tpu" href="#" coords="2336,137,2768,255" shape="rect" data-column="Model">
<area target="" alt="gemma-l4" title="gemma-l4" href="#" coords="2339,277,2551,487" shape="rect" data-column="Model">
<area target="" alt="gemma-a100" title="gemma-a100" href="#" coords="2574,271,2768,483" shape="rect" data-column="Model">
<area target="" alt="llama-l4" title="llama-l4" href="#" coords="2349,527,2546,717" shape="rect" data-column="Model">
<area target="" alt="mistral-l4" title="mistral-l4" href="#" coords="2576,519,2773,723" shape="rect" data-column="Model">
</map>
</div>
<div class="full-width-image">
<img src="archi.png" alt="System Architecture Diagram" class="full-width-image">
</div>
<div id="rag-container">
<div>
<label for="prompt-select">SELECT INPUT QUERY:</label>
<select id="prompt-select">
<option value="prompt1">What is the role of artificial intelligence in Google's security strategy?</option>
<option value="prompt2">I got a ticket mentioning the custom container in Vertex AI instance fails to start bookmark_border, how can i help the customer?</option>
</select>
</div>
<button id="ask-rag">ENGAGE THE GRID</button>
<div id="response-container">
<p>DATA STREAM:</p>
<pre id="rag-output">[ Output Will Render Here After Engaging The Grid... Standby... ]</pre>
</div>
</div>
<div id="config-container">
<h2>SYSTEM PARAMETERS:</h2>
<pre id="config-output">[ Configuration Will Update Based On Grid Selection Above ]</pre>
</div>
<!-- Explanations Sections -->
<div id="config-container">
<h2>COMPONENT ANALYSIS: DATA</h2>
<p><b>DATA SOURCE</b> // The knowledge core. For this simulation, we processed three distinct datasets: Vertex AI Q&A, HR Policies, and Datacenter Protocols. Access the raw data streams on Hugging Face (<a href="https://huggingface.co/datasets/fredmo/dc_rules_dataset" target="_blank">huggingface.co/fredmo</a>) for deep-dive analysis.</p>
<div><center>
<img src="dataset.png" alt="Dataset Structure" class="explanation-image">
</center></div>
</div>
<div id="config-container">
<h2>COMPONENT ANALYSIS: CHUNKING</h2>
<p><b>DATA SLICING // CHUNKING</b> is the process of segmenting large data streams into manageable units for the AI. This segmentation enhances precision, accelerates retrieval, and navigates input constraints. Optimal slice size and overlap are critical parameters, requiring calibration based on the specific mission, data topology, and query patterns. Balancing context window size against chunk granularity is key to peak RAG performance. Larger context allows the AI processor to synthesize insights from a wider data horizon.</p>
<div>
<img src="chunking.png" alt="Chunking Visualization" class="explanation-image">
</div>
</div>
<div id="config-container">
<h2>COMPONENT ANALYSIS: EMBEDDING</h2>
<p><b>SEMANTIC VECTORIZATION // EMBEDDING</b> translates textual data slices into high-dimensional vector coordinates. This numerical representation allows the RAG system to navigate the knowledge base using semantic proximity rather than just keyword matching. User queries are also vectorized, enabling the retrieval core to identify contextually relevant data chunks. The fidelity of the embedding model—its ability to capture subtle semantic nuances—directly dictates the RAG pipeline's accuracy and insight generation capabilities. Fine-tuning the embedding parameters is crucial for optimizing the retrieval signal.</p>
<p>Explore vector space with the <a href="https://projector.tensorflow.org/" target="_blank">TensorFlow Embedding Projector</a>.</p>
<div>
<img src="embedding.png" alt="Embedding Visualization" class="explanation-image">
</div>
</div>
<div id="config-container">
<h2>COMPONENT ANALYSIS: VECTOR DATABASE</h2>
<p><b>VECTOR DATASTORE // VECTOR DATABASE</b> is the high-speed repository for the vectorized data chunks (embeddings). Optimized for lightning-fast similarity searches in high-dimensional space, it enables the RAG pipeline to pinpoint the most semantically relevant data chunks corresponding to a user's vectorized query. This rapid, precise retrieval mechanism is fundamental to providing contextually accurate responses. The choice of datastore, its indexing algorithm (e.g., ANN), and scalability directly influence the RAG system's speed and efficiency.</p>
</div>
<!-- Plot Section -->
<div class="plot-section-container" id="config-container"> <!-- Reuse styling -->
<h2>COMPONENT ANALYSIS: RETRIEVAL</h2>
<p><b>DATA RETRIEVAL ALGORITHMS // RETRIEVAL METHODS</b> dictate how relevant data chunks are selected from the Vector Datastore.
<br><b>Approximate Nearest Neighbors (ANN):</b> Prioritizes speed and direct semantic similarity. Efficiently finds vectors closest to the query vector, focusing on relevance but potentially retrieving redundant information slices.
<br><b>Maximal Marginal Relevance (MMR):</b> Balances similarity with diversity. Aims to retrieve chunks that are relevant *and* offer distinct perspectives, often re-ranking initial ANN results to reduce overlap and broaden context.
<br><b>Retrieval Scope (Small vs. Big):</b> Determines the *quantity* of chunks retrieved. 'Small' retrievals offer high precision but risk missing context. 'Large' retrievals cast a wider net, increasing contextual richness but also the chance of noise. Tuning the retrieval method (ANN, MMR, hybrid) and scope is essential for optimizing the information quality fed to the final generation model.</p>
<div class="plotly-container">
<div class="plot-container">
<div id="embedding-plot-ann"></div>
</div>
<div class="plot-container">
<div id="embedding-plot-mmr"></div>
</div>
</div>
<p style="text-align: center; margin-top: 15px; font-style: italic;">Observe the vector space: ANN clusters tightly around the prompt vector. MMR selects nearby vectors but enforces diversity, pushing selections further apart while maintaining relevance.</p>
</div>
<!-- End Plot Section -->
<div id="config-container">
<h2>SYSTEM BLUEPRINTS & TOOLS:</h2>
<p><b>DIY CODE MODULES:</b><br>
<a href="https://huggingface.co/spaces/fredmo/ragadast/blob/main/Mistral_Mpnet_ANN_MMR.ipynb" target="_blank">> Mpnet // Mistral // FAISS // ANN // MMR Circuit</a><br>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/main/notebooks/official/vector_search" target="_blank">> Vertex AI Vector Search Protocol</a><br>
</p>
<p>
<b>MANAGED SERVICES // CLOUD PLATFORMS:</b> <br>
<a href="https://cloud.google.com/enterprise-search?e=0&hl=en" target="_blank">> Vertex AI Search</a> | <a href="https://cloud.google.com/products/agentspace?" target="_blank">Google Agentspace</a> | <a href="https://github.com/GoogleCloudPlatform/generative-ai/tree/main/gemini/rag-engine" target="_blank">Vertex AI RAG Engine</a>
</p>
</div>
<script>
// Original JavaScript for functionality (mostly unchanged)
document.addEventListener('DOMContentLoaded', () => {
const areas = document.querySelectorAll('area[data-column]');
const configOutput = document.getElementById('config-output');
const askRagButton = document.getElementById('ask-rag');
const promptSelect = document.getElementById('prompt-select');
const ragOutput = document.getElementById('rag-output');
const imageContainer = document.querySelector('.image-container');
let selectedAreas = {}; // Store currently selected areas, per column
let overlays = {}; // Store overlay elements by column
areas.forEach(area => {
area.addEventListener('mouseover', () => {
area.classList.add('area-highlight-hover');
});
area.addEventListener('mouseout', () => {
area.classList.remove('area-highlight-hover');
});
area.addEventListener('click', (event) => {
event.preventDefault(); // Prevent default behavior of href="#"
const column = area.dataset.column;
// Remove the previous overlay if it exists for this column
if (overlays[column]) {
overlays[column].remove();
overlays[column] = null;
}
// Create and position the neon overlay
const overlay = document.createElement('div');
overlay.classList.add('area-selected-overlay');
overlays[column] = overlay;
const coords = area.coords.split(',');
const shape = area.shape;
if (shape === 'rect') {
const imgRect = imageContainer.querySelector('img').getBoundingClientRect();
const mapScaleX = imgRect.width / imageContainer.querySelector('img').naturalWidth;
const mapScaleY = imgRect.height / imageContainer.querySelector('img').naturalHeight;
// Adjust coords based on actual image size vs natural size if needed,
// but for this setup assume the map coords match the displayed image size.
// If using responsive images, scaling calculations are needed here.
overlay.style.left = `${coords[0]}px`;
overlay.style.top = `${coords[1]}px`;
overlay.style.width = `${coords[2] - coords[0]}px`;
overlay.style.height = `${coords[3] - coords[1]}px`;
} else {
console.warn(`Overlay shape "${shape}" not implemented.`);
return;
}
imageContainer.appendChild(overlay);
selectedAreas[column] = area;
updateConfigText();
});
});
function updateConfigText() {
let config = {};
for (const column in selectedAreas) {
if (selectedAreas[column]) {
config[column] = selectedAreas[column].title;
}
}
// Pretty print JSON with neon styling in mind
configOutput.textContent = JSON.stringify(config, null, 2) || "[ Configuration Will Update Based On Grid Selection Above ]";
}
askRagButton.addEventListener('click', () => {
ragOutput.textContent = "[ Processing Query... Engaging RAG Matrix... ]"; // Placeholder
ragOutput.style.color = 'var(--neon-orange)'; // Indicate processing
// Add a slight delay to simulate processing before showing results
setTimeout(() => {
ragOutput.style.color = 'var(--code-text)'; // Reset color for output
const selectedPromptValue = promptSelect.value;
const configText = configOutput.textContent;
let config = {};
try {
// Handle case where config hasn't been selected yet
if (configText.startsWith('[')) {
config = { Model: 'default', Retreival: 'default', 'Vector DB': 'default' }; // Provide defaults or handle error
ragOutput.textContent = "[ ERROR: Please select configuration parameters from the grid above before engaging. ]";
ragOutput.style.color = 'red';
return; // Stop execution
} else {
config = JSON.parse(configText);
}
} catch (e) {
console.error("Error parsing config JSON:", e);
ragOutput.textContent = "[ ERROR: Invalid configuration state. Please re-select parameters. ]";
ragOutput.style.color = 'red';
return; // Stop execution
}
const selectedModel = config['Model'] || 'default'; // Add fallbacks
const selectedRetrieval = config['Retreival'] || 'default';
const selectedVectorDB = config['Vector DB'] || 'default';
// --- Paste the extensive 'answer' and 'retrievalText' logic here ---
// (The large block of if/else statements determining the text content)
// ... (omitted for brevity - use your original JS logic here) ...
let answer = "[ Default simulated response. Select parameters to see variations. ]";
let retrievalText = "\n\n[ Default retrieval info. Select parameters for details. ]";
// --- Start of the Answer Logic Block (Paste your original logic here) ---
if (selectedPromptValue === "prompt1") {
answer = "AI plays a significant role in Google's security strategy by automating threat detection, improving incident response, and enhancing security analysis. For example, AI can be used to identify suspicious activity, detect malware, and analyze security logs.";
if (selectedModel === "gemini" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "Based on the context, here's how we can define intelligence in Google's security: Intelligence in Google's security refers to the application of artificial intelligence (AI) to enhance various aspects of their security posture. This includes: Automating Threat Detection: AI algorithms are used to analyze vast amounts of data to identify patterns and anomalies that indicate potential security threats, often in real-time.Improving Incident Response: AI can help automate responses to security incidents, such as isolating infected systems or blocking malicious traffic, leading to faster and more efficient remediation.Enhancing Security Analysis: AI tools can analyze security logs, identify vulnerabilities, and provide insights that human analysts might miss, leading to a better understanding of security risks and how to mitigate them.Specific examples of how this intelligence is applied include:Identifying suspicious activity: AI can learn normal user behavior and flag deviations that might indicate a compromised account.Detecting malware: Machine learning models can be trained to recognize the characteristics of known and even novel malware.Analyzing security logs: AI can sift through massive amounts of log data to correlate events and identify potential attack patterns.In summary, when the context is focused on the role of AI, intelligence in Google's security refers to the use of artificial intelligence technologies to proactively and reactively enhance their security measures.It's important to note that intelligence in a broader security context can also refer to human expertise, threat intelligence gathered from external sources, and other forms of knowledge used to inform security decisions. However, based on the provided examples, the focus here is specifically on artificial intelligence.";
}
if (selectedModel === "gemini" && selectedRetrieval === "mmr"){
answer = "Let's analyze each output: AI Role: AI automates threat detection, improves incident response, and enhances security analysis. This suggests intelligence relates to the ability to learn, analyze, and act on security information. Threat Analysis Group: This group gathers and analyzes information about threats and threat actors. This suggests intelligence is also about knowledge about threats and how to defend against them. Measuring Effectiveness: Red Teaming involves simulating attacks to test defenses. This suggests intelligence isn't just about knowing the threats but also about testing and validating the effectiveness of the security measures based on that knowledge. Combining these aspects, I can define intelligence in Google's security as the ability to understand, analyze, and act on information related to security threats, vulnerabilities, and defenses to protect Google's systems and users. This involves both automated capabilities (AI) and human expertise (Threat Analysis Group), and it's a continuous process of learning and improvement (Red Teaming).Input: What is intelligence in Google's security ? Output: Intelligence in Google's security refers to the collection, analysis, and application of information to understand, anticipate, and mitigate security threats. This includes using artificial intelligence to automate threat detection and analysis, gathering and analyzing information on threat actors and their tactics (as done by the Threat Analysis Group), and using insights gained from security exercises like Red Teaming to improve defenses. Essentially, it's about having the knowledge and capabilities to proactively defend against security risks.";
}
//GEMMA TPU L4 A100 :: P1 ANN et MMR here
if ( (selectedModel === "gemma tpu" || selectedModel === "gemma-a100" || selectedModel === "gemma-l4" ) && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "It seems like you're asking about how Google uses AI for security. The output you provided is accurate. Here's a breakdown of how AI contributes to Google's security strategy: * **Automating Threat Detection:** AI algorithms can analyze massive amounts of data from various sources (network traffic, user behavior, system logs) to identify patterns and anomalies that might indicate a security threat. This automation helps security teams detect threats faster and more efficiently than manual analysis. * **Improving Incident Response:** When a security incident occurs, AI can help accelerate the response process. It can analyze the nature of the attack, identify affected systems, and suggest appropriate mitigation steps. This allows security teams to contain and resolve incidents more quickly, minimizing damage. * **Enhancing Security Analysis:** AI can sift through vast amounts of security data to uncover hidden relationships and trends. This can help security analysts understand the bigger picture of threats, identify emerging attack patterns, and develop more effective security strategies. **Examples of AI in Google Security:** * **VirusTotal:** Google's VirusTotal uses AI to analyze files and URLs for malware. * **Safe Browsing:** This service uses AI to identify malicious websites and warn users. * **Google Cloud Security Command Center:** This platform leverages AI to detect threats, analyze security posture, and provide recommendations for improvement. Let me know if you have any other questions about AI and security!";
}
if ((selectedModel === "gemma tpu" || selectedModel === "gemma-a100" || selectedModel === "gemma-l4" ) && selectedRetrieval === "mmr"){
answer = "Based on the context you provided, intelligence in Google's security refers to **information gathered and analyzed to understand and mitigate threats**. Here's a breakdown: * **Threat Intelligence:** This is the core of what Google's security intelligence encompasses. It involves: * **Monitoring threat actors:** Identifying who is trying to attack Google and its users (as done by the Threat Analysis Group). * **Understanding their tactics and techniques:** Learning how these attackers operate, what tools they use, and their goals. * **Sharing this information:** Disseminating threat intelligence to the wider security community to help everyone stay safer. * **AI-powered Analysis:** Google leverages AI to process vast amounts of security data, uncovering patterns and anomalies that might indicate threats. This helps automate threat detection and improve the speed and accuracy of security analysis. * **Measuring Effectiveness:** Google uses methods like Red Team exercises to test its security mechanisms against simulated attacks. This provides valuable insights into what works and what needs improvement, allowing them to continuously refine their security posture. Essentially, intelligence in Google's security is about being proactive, informed, and adaptable in the face of ever-evolving cyber threats. ";
}
//Lama P1 ANN et MMR here
if (selectedModel === "llama-l4" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "In Google's security strategy, intelligence refers to the use of artificial intelligence (AI) and machine learning (ML) technologies to analyze and detect potential security threats in real-time. This includes automating threat detection, improving incident response, and enhancing security analysis. Google's AI-powered security intelligence is designed to identify and respond to emerging threats more quickly and effectively than traditional security methods. Some examples of how AI is used in Google's security include: 1. **Anomaly detection**: AI can identify unusual patterns of behavior that may indicate a security threat, such as a sudden spike in login attempts from a specific location. 2. **Malware detection**: AI-powered systems can analyze code and detect malware that traditional antivirus software may miss. 3. **Security log analysis**: AI can quickly sift through large volumes of security logs to identify potential threats and alert security teams. 4. **Predictive analytics**: AI can analyze historical data and current trends to predict potential security threats and alert security teams before an attack occurs. 5. **Adversarial defense**: AI can help defend against advanced persistent threats (APTs) by identifying and blocking malicious activities that traditional security measures may not detect. By leveraging AI and ML, Google's security strategy is able to stay ahead of emerging threats and keep its users and infrastructure protected. Some examples of Google's AI-powered security products and services include: 1. **Google Cloud Security Command Center (Cloud SSC)**: A threat detection and incident response platform that uses AI to identify and respond to potential security threats. 2. **Google Cloud AI Platform**: A platform for building and deploying AI and ML models, including security-focused models. 3. **Google Cloud Security Scanner**: A web application security scanner that uses AI to identify vulnerabilities and recommend remediation steps. Overall, Google's use of AI and ML in its security strategy has enabled the company to stay ahead of emerging threats and protect its users and infrastructure from a wide range of security risks.";
}
if (selectedModel === "llama-l4" && selectedRetrieval === "mmr"){
answer = "The role of intelligence in Google's security involves using data and information to analyze and predict potential security threats. In that context, artificial intelligence can play a crucial role in various aspects of Google's security strategy. However, the provided information doesn't directly address what intelligence in Google's security entails. However, we can mix and match concepts from the given context to define intelligence in Google's security: Based on the provided information, intelligence in Google's security can be considered as real-time actionable data and patterns extracted, analyzed, and matched against external threat feeds, anomaly detection, and known attack patterns. The collected intelligence can be focused on threat data like: - External threat intelligence: Focused on threat actors, techniques, tactics, and malware. - Internal incident intelligence: Analyzing and correlating data from the various logs, such as system logs and network logs, for anomalies and suspicious behavior. The analysis of this intelligence allows Google's robust security mechanisms to provide proactive threat analysis, and incident response, proactively strengthening detection against attacks and manipulation of their systems, Infrastructure and products. In short, intelligence in Google's security entails analyzing real-time data to assess and predict potential threats, something which greatly benefits from using advanced technologies like AI and big data.";
}
//M7B P1 ANN et MMr here
if (selectedModel === "mistral-l4" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "I. Introduction Artificial Intelligence (AI) has become an essential component of modern cybersecurity strategies. Google, as a leading technology company, has also integrated AI into its security strategy to enhance its ability to detect and respond to cyber threats. In this article, we will explore the role of AI in Google's security strategy and how it contributes to the company's overall security posture. II. Automating Threat Detection One of the primary uses of AI in Google's security strategy is to automate threat detection. AI algorithms can analyze vast amounts of data from various sources, including network traffic, security logs, and user behavior, to identify potential threats. For example, AI can be used to detect anomalous activity that may indicate a cyber attack, such as a I. Introduction Artificial Intelligence (AI) has become an essential component of modern cybersecurity strategies. Google, as a leading technology company, has also integrated AI into its security strategy to enhance its ability to detect and respond to cyber threats. In this article, we will explore the role of AI in Google's security strategy and how it contributes to the company's overall security posture. II. Automating Threat Detection One of the primary uses of AI in Google's security strategy is to automate threat detection. AI algorithms can analyze vast amounts of data from various sources, including network traffic, security logs, and user behavior, to identify potential threats. For example, AI can be used to detect anomalous activity that may indicate a cyber attack, such as a sudden increase in traffic from a particular IP address or unusual user behavior. III. Improving Incident Response Another way that AI is used in Google's security strategy is to improve incident response. Once a threat has been detected, AI can help prioritize and respond to incidents based on their severity and potential impact. For example, AI can be used to automatically isolate a compromised system or block malicious traffic to prevent further damage. IV. Enhancing Security Analysis AI is also used to enhance security analysis in Google's security strategy. By analyzing large amounts of data, AI can identify patterns and trends that may indicate a cyber threat. For example, AI can be used to analyze security logs to identify potential vulnerabilities or to detect and analyze malware. V. Conclusion In conclusion, AI plays a significant role in Google's security strategy by automating threat detection, improving incident response, and enhancing security analysis. By leveraging AI, Google is able to quickly and effectively respond to cyber threats, reducing the risk of damage to its systems and user data. As cyber threats continue to evolve, the use of AI in security is becoming increasingly important, and Google is at the forefront of this trend.sudden increase in traffic from a particular IP address or unusual user behavior. III. Improving Incident Response Another way that AI is used in Google's security strategy is to improve incident response. Once a threat has been detected, AI can help prioritize and respond to incidents based on their severity and potential impact. For example, AI can be used to automatically isolate a compromised system or block malicious traffic to prevent further damage. IV. Enhancing Security Analysis AI is also used to enhance security analysis in Google's security strategy. By analyzing large amounts of data, AI can identify patterns and trends that may indicate a cyber threat. For example, AI can be used to analyze security logs to identify potential vulnerabilities or to detect and analyze malware. V. Conclusion In conclusion, AI plays a significant role in Google's security strategy by automating threat detection, improving incident response, and enhancing security analysis. By leveraging AI, Google is able to quickly and effectively respond to cyber threats, reducing the risk of damage to its systems and user data. As cyber threats continue to evolve, the use of AI in security is becoming increasingly important, and Google is at the forefront of this trend.";
}
if (selectedModel === "mistral-l4" && selectedRetrieval === "mmr"){
answer = "Artificial intelligence (AI) plays a significant role in Google's security strategy by automating threat detection, improving incident response, and enhancing security analysis. For example, AI can be used to identify suspicious activity, detect malware, and analyze security logs. The Threat Analysis Group (TAG) at Google is a team of security researchers who monitor threat actors, their tactics, and techniques, improving the safety and security of Google products and sharing intelligence with the online community. Google measures and improves the effectiveness of its security mechanisms through Red Team exercises. These simulations of attacks help evaluate the detection and response capabilities of Google's security systems.";
}
} else if (selectedPromptValue === "prompt2") {
answer = "To help the customer with the custom container failing to start in Vertex AI, you can investigate the container logs for error messages, check the container image configuration, ensure all necessary resources are allocated, and verify network settings.";
if (selectedModel === "gemini" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "Okay, here's how you can approach helping a customer whose custom container in a Vertex AI instance fails to start: Understanding the Problem: Custom Container: The issue lies with a user-provided Docker container that they are trying to run within a Vertex AI environment (likely a Notebook, Training Job, or Prediction endpoint). Fails to Start: This means the container isn't initializing correctly, preventing the intended workload from running. Bookmark_border: This seems to be an artifact from the conversation history, it is not relevant for helping the user. Troubleshooting Steps to Guide the Customer: Gather Information: What type of Vertex AI resource is using the container? (e.g., Notebook instance, training job, prediction endpoint). This will impact how to approach debugging. Where is the container image stored? (e.g., Artifact Registry, Container Registry) Can they share the Dockerfile or image build process? (This is crucial for identifying potential problems) What is the error message? It is crucial for any troubleshooting. Have they made any recent changes to their container? What are they trying to accomplish with the container? Understanding their goal can reveal if they are on the right track. Are other containers working in the same setup? This helps establish if the container image or the Vertex AI setup is the problem. Is it a public or private container? If the container is private, ensure that Vertex AI service account has permission to pull the container from Container Registry or Artifact Registry. Check the Basics: Container Registry/Artifact Registry Permissions: Verify that the service account used by Vertex AI has the necessary IAM permissions to pull the container image from the registry. This is a frequent cause of failures. Common Errors: 401 Unauthorized, 403 Forbidden. Container Image Existence: Double-check that the image name and tag are correct, and that the image exists in the specified registry. Network Connectivity: If the container needs external network access, ensure there are no firewall rules or networking issues preventing it. Disk Space: Container needs adequate disk space within the VM. Docker Container Issues (using shared image build process) Dockerfile Syntax and Instructions: Look for syntax errors, typos, or incorrect commands in the Dockerfile. Ensure there is a clear CMD or ENTRYPOINT instruction that starts the main process in the container. Verify that any required dependencies are correctly installed in the image. Entrypoint/CMD: Make sure the entrypoint/cmd are executable. Use chmod +x <your-script> Verify the entrypoint starts the correct process and the process stays in foreground. Base Image: Confirm the base image is appropriate and has no known issues that could prevent startup. Environment Variables: Ensure that the container is not missing any required environment variables. Port Exposing: If the container exposes a port, make sure it matches what Vertex AI expects. File System: Check if the container is attempting to access files not present or not correctly mounted. Resource Constraints: Check if the container requires more resources (CPU, memory) than the Vertex AI instance provides. Vertex AI Resource-Specific Troubleshooting: Notebook Instances: Examine the notebook instance logs for errors related to container startup. Try starting a simpler, known-good container to rule out environment issues. Training Jobs/Prediction Endpoints: Check Vertex AI job logs for more detailed error messages during container initialization. Verify that the container is built for the correct architecture. Debugging within the Container (if possible): If the container starts but then fails, try to get a shell inside the container using the debug features to examine the file system and running processes to diagnose the problem. Suggest Simplification: Encourage the user to simplify their Dockerfile and container content to isolate the issue. Suggest building a very basic container to check that the container infrastructure itself is functioning correctly. Gather Logs: If possible, ask the customer for the Vertex AI Resource logs that they are using and any logs related to the container. Container logs will be stored in the Vertex AI training/prediction logs Example Questions to Ask the Customer: Could you share the Dockerfile you used to build your container image? What is the exact error message you are seeing when the container fails to start? Can you double-check that the Vertex AI service account has the necessary permissions to pull the container image from the registry? Have you tried running a simpler container image to rule out issues with the Vertex AI setup? Important Tips: Be Patient: Container startup issues can be complex. Start Basic: Don't jump to complex solutions initially. Encourage the user to break things down: They should test their container outside of the Vertex environment first. Review the documentation: Guide the customer to official Vertex AI documentation regarding custom containers, as it often provides troubleshooting guides and best practices. By systematically addressing these points and working closely with the customer, you should be able to effectively troubleshoot why their custom container is failing to start. Remember to focus on gathering detailed information, verifying the fundamentals, and then digging deeper into the container's specifics and the Vertex AI resource it is being used in.";
}
if (selectedModel === "gemini" && selectedRetrieval === "mmr"){
answer = "Okay, let's break down how to help the customer with a custom container failing to start in a Vertex AI instance. This is a common issue and requires a systematic approach to troubleshooting. Here's a guide you can follow, with explanations tailored for customer support: Understanding the Problem Custom Container: The customer is using their own Docker container with their ML code and dependencies, not a pre-built Vertex AI container. This adds complexity but offers more flexibility. Failing to Start: The container isn't launching correctly within the Vertex AI environment. This means their training job or model deployment isn't working. Troubleshooting Steps for the Customer Here's a structured approach you can guide the customer through: Gather Detailed Information: Job Type: Is this a training job, online prediction, or batch prediction? Knowing the context helps narrow down possibilities. Error Messages: The most crucial piece of information. Ask the customer for the exact error messages they are seeing in the Vertex AI logs. These are usually found in the job's details or the endpoint's logs. Container Registry: Where is the custom container image stored? (e.g., Google Container Registry - gcr.io, Docker Hub, or a private registry). Container Image Tag/Digest: Which specific version of their container image are they using? Vertex AI Configuration: How is the Vertex AI job configured? Specifically: Machine Type: Which compute instance type are they using? (e.g., n1-standard-4, a2-highgpu-1g) Region: Which Google Cloud region are they deploying to? Network: Are there any specific network configurations they're using? Service Account: What permissions does the service account have? Dockerfile: Can the customer share their Dockerfile? This can help identify issues with image construction. Test Scenario: Ask if the container runs as expected outside of Vertex AI (e.g., locally or in a different environment). Common Causes & Solutions (Walk Through These Based on Customer Info) Container Image Issues: Image Does Not Exist / Is Not Accessible: Check the Registry Path: Ensure the container image path is correct in the Vertex AI job configuration, including the registry and image tag. Typographical errors are common. Authentication: If using a private registry, verify that Vertex AI has the correct credentials to pull the image. This might involve service account permissions or other authentication mechanisms. Image Not Starting Correctly: Entrypoint Errors: Inspect the ENTRYPOINT or CMD command in the Dockerfile. Make sure it executes properly when the container starts. Is there a missing interpreter or a command with errors? Dependency Issues: Check if all required libraries, dependencies, and environment variables are included in the container image. Port Configuration: If deploying online prediction, ensure the container is listening on the expected port (usually 8080). Startup Time: The container might take a long time to start. Increase the timeout if necessary in the configuration. Base Image: The customer might be using a base image that is outdated or does not work well with Vertex AI. Suggest using a base image from the official Google Cloud Deep Learning containers or TensorFlow images. Vertex AI Configuration Issues: Resource Limits: The requested compute resources might not be sufficient for the container. The container might be crashing silently because of the lack of memory. Service Account Permissions: Ensure the service account used by the Vertex AI job has the necessary permissions to access Google Cloud Storage, BigQuery, or other resources needed by the container. Network Configuration: Make sure network settings are correct, especially if accessing resources in a VPC. Incorrect Region/Zone: Deploying a container that is only available in a specific region to the wrong region could cause problems. Vertex AI System Issues: Service Outages: Though less common, there might be temporary service issues with Vertex AI. Check the Google Cloud status dashboard. API Version: Make sure the Vertex AI SDK version is up-to-date. Debugging and Logging: Vertex AI Logs: The primary source of information. Guide the customer on how to navigate to the logs in the Vertex AI console (or using the gcloud command-line tool). Pay close attention to error codes, timestamps, and stack traces. Cloud Logging: If the container writes to logs, those logs will also be available in Cloud Logging. Container Logs (if possible): If the container starts, but then crashes later, check the logs inside the container. This might require running the container in a shell and using command line commands. Local Testing: Encourage the customer to run the container locally to reproduce the problem and find issues in a more controlled environment. Escalation (If Needed): If the problem cannot be solved with basic troubleshooting, you may need to escalate the ticket. Include all the information that was gathered to speed up resolution. Key Points to Communicate to the Customer Be Detailed: Explain that precise information (error messages, configuration details) is critical for efficient troubleshooting. Start Simple: Suggest starting with simple tests like deploying a very basic Hello World container. Then, gradually add complexity. Local Reproducibility: If the customer cannot reproduce the problem locally, the issue most likely exists in Vertex AI. Patience: Remind them that debugging container issues can be tricky and requires a systematic approach. Google Cloud Documentation: Point them to relevant Vertex AI documentation pages for custom containers, troubleshooting, and logging. Stack Overflow: Encourage the customer to search Stack Overflow for similar issues, and to post their problem there if they cannot find a solution. Example Response Template Hi [Customer Name], I understand you're having trouble with your custom container failing to start in Vertex AI. This is a common issue, and we can definitely help you troubleshoot it. To start, could you provide me with the following information: What type of job is this (training, online prediction, batch prediction)? Can you please copy the exact error messages you're seeing in the Vertex AI logs? Where is your container image stored (e.g., gcr.io, Docker Hub)? What is the exact image tag or digest you're using? Could you share your Dockerfile? What machine type are you using, what region are you deploying to, and what specific network configuration are you using? Once I have this information, we can start looking for the root cause. In the meantime, you can also review the Vertex AI documentation on custom containers. Thank you, [Your Name] By following this structured approach, you can effectively guide the customer towards resolving their custom container issue in Vertex AI. Remember to be patient, clear, and focused on gathering the right information.";
}
if ( (selectedModel === "gemma tpu" || selectedModel === "gemma-a100" || selectedModel === "gemma-l4" ) && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "It sounds like the customer is encountering a frustrating issue! Here's a breakdown of how you can help them troubleshoot the custom container in Vertex AI instance fails to start error: **1. Gather Information:** * **Ticket Details:** Carefully review the ticket for any specific error messages, timestamps, and steps the customer took before the issue occurred. * **Custom Container Details:** * What is the Docker image used for the custom container? * What are the container's dependencies? * Has the container been tested and run successfully in other environments? * **Vertex AI Instance Configuration:** * What type of Vertex AI instance is being used (e.g., Workbench, Training)? * What machine type and region are configured? * Are there any specific settings related to the custom container in the instance configuration? **2. Common Causes and Troubleshooting Steps:** * **Image Issues:** * **Pull Errors:** Verify that the Docker image can be successfully pulled from the specified registry. * **Image Size:** Ensure the image size is within the limits of the Vertex AI instance. * **Dependencies:** Check if all necessary dependencies are included in the Docker image. * **Resource Constraints:** * **CPU/Memory:** The container might be exceeding the available resources on the instance. Consider increasing the instance's machine type. * **Disk Space:** The container might be running out of disk space. Check the instance's disk usage and potentially increase the size. * **Networking:** * **Firewall Rules:** Verify that the instance's firewall rules allow the necessary network traffic for the container. * **DNS Resolution:** Ensure that the container can resolve domain names correctly. * **Environment Variables:** * **Missing or Incorrect:** Double-check that all required environment variables are set correctly within the container. * **Permissions:** * **Container Runtime:** The container runtime (e.g., Docker) might not have the necessary permissions to run the container. * **Logs:** * **Container Logs:** Examine the container logs for any clues about the failure. These logs are often available in the Vertex AI console or through the command line. **3. Escalation and Support:** * **Google Cloud Support:** If the issue persists, encourage the customer to contact Google Cloud Support for further assistance. They have access to more advanced tools and expertise. * **Stack Overflow:** Suggest the customer post their question on Stack Overflow with the tag `google-cloud-vertex-ai`. This can help them connect with other developers who might have encountered similar problems. **Remember:** * Be patient and understanding. Technical issues can be frustrating. * Guide the customer through troubleshooting steps systematically. * Document all steps taken and any findings. * Escalate the issue to Google Cloud Support if necessary.";
}
if ((selectedModel === "gemma tpu" || selectedModel === "gemma-a100" || selectedModel === "gemma-l4" ) && selectedRetrieval === "mmr"){
answer = "Let's help that customer troubleshoot their Vertex AI custom container issue. Here's a breakdown of how to approach the problem and guide them: **Understanding the Problem** The error message custom container in Vertex AI instance fails to start means the containerized environment you've set up for your training job isn't launching properly within your Vertex AI instance. **Troubleshooting Steps** 1. **Review the Error Logs:** * **Vertex AI Console:** The Vertex AI console will provide detailed logs related to the failed container start. Look for specific error messages, stack traces, or warnings. * **Cloud Logging:** If you're using Cloud Logging, search for logs related to your Vertex AI instance and the specific training job. 2. **Examine the Custom Container Image:** * **Dockerfile:** Carefully review your Dockerfile to ensure it builds a valid image. Check for: * Correct base image selection (e.g., TensorFlow, PyTorch) * Proper installation of dependencies * Correct environment variables * Exposing necessary ports * **Image Size:** Large images can sometimes lead to startup issues. Try optimizing your image size by removing unnecessary files. * **Image Build Process:** Verify that your Docker build process is successful and that the resulting image is pushed to a supported container registry (e.g., Google Container Registry). 3. **Check Instance Configuration:** * **Machine Type:** Ensure the machine type you've selected for your Vertex AI instance has sufficient resources (CPU, memory, GPU) to run your training job. * **Disk Space:** Verify that your instance has enough disk space to accommodate the training data and model artifacts. * **Networking:** Confirm that your instance has proper network connectivity to access any required external resources (e.g., datasets, APIs). 4. **Environment Variables:** * **Verify:** Double-check that all necessary environment variables are set correctly within your container and within the Vertex AI training job configuration. * **Secrets:** If you're using sensitive information (API keys, passwords), consider using Google Cloud Secret Manager to securely store and access them within your container. 5. **Test Locally:** * **Reproduce:** Try to reproduce the issue locally using the same Docker image and environment configuration. This can help isolate the problem. **Additional Tips** * **Simplify:** If possible, start with a minimal Dockerfile and training script to rule out complex dependencies. * **Community Support:** The Google Cloud community is very active. Search Stack Overflow or the Google Cloud forum for similar issues. * **Vertex AI Documentation:** The official Vertex AI documentation is a valuable resource for troubleshooting and best practices. **Communicate with the Customer** * **Gather Information:** Ask the customer for the following: * The full error message and logs * The Dockerfile and training script * The Vertex AI instance configuration * Any recent changes made to the environment * **Provide Guidance:** Based on the information gathered, provide specific troubleshooting steps and suggestions. * **Escalate if Needed:** If the issue persists, escalate it to Google Cloud support for further assistance. Let me know if you have any more questions or need help with specific aspects of the troubleshooting process.";
}
if (selectedModel === "llama-l4" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "Based on the given context, it seems like you're trying to help the customer with a Vertex AI issue. To address the custom container in Vertex AI instance fails to start issue, I will provide you with some steps and considerations. ### Troubleshooting Steps: 1. **Check the container logs**: Make sure the custom container is able to start and run by checking the Vertex AI instance logs for any errors. You can do this by going to the Vertex AI instance details page, selecting the container, and clicking on the Logs tab. 2. **Verify the container image**: Ensure that the container image is correct and can be pulled from the specified registry. You can test this by pushing the image to a different registry or testing it with a different Vertex AI instance. 3. **Check the container configuration**: Verify that the container configuration (e.g., command, resources, environment variables) is correct and matches the requirements of the container image. 4. **Check the instance type**: Ensure that the instance type selected for the Vertex AI instance has sufficient resources (e.g., CPU, memory, GPU) to run the custom container. 5. **Check the network connectivity**: Verify that the Vertex AI instance has proper network connectivity to pull the container image and communicate with other services (e.g., Cloud Storage). 6. **Check IAM permissions**: Ensure that the Vertex AI instance has the necessary IAM permissions to pull the container image and access other resources. ### Additional Considerations: 1. **Review the Vertex AI documentation**: Check the Vertex AI documentation for any specific guides or tutorials for setting up custom containers. 2. **Check for known issues**: Search for any known issues or bugs in the Vertex AI issue tracker or Google Cloud support forums. 3. **Reach out to Google Cloud support**: If none of the above steps resolve the issue, it may be necessary to reach out to Google Cloud support for further assistance. **Customer Support Options:** 1. **Stack Overflow**: Ask a question on Stack Overflow using the tag `google-cloud-vertex-ai`. 2. **Vertex AI Documentation**: File a bug or feature request from the Vertex AI documentation. 3. **Creating Cases**: Use the creating cases process to file bugs or request features for NAS. Here's an example of how you can explain these steps to the customer: Hello, I understand that your custom container in Vertex AI instance is failing to start. To troubleshoot this issue, I'd like to walk you through some steps to help resolve it. 1. Let's start by checking the container logs. Can you please take a look at the logs for your Vertex AI instance and provide me with any errors or warnings? 2. Next, I'd like to verify the container image. Can you confirm that the image is correct and can be pulled from the specified registry? 3. We should also check the container configuration and ensure it matches the requirements of the container image. 4. Additionally, let's check the instance type to ensure it has sufficient resources. 5. Can you please review the network connectivity to your Vertex AI instance to ensure it has proper network connectivity to pull the container image and communicate with other services? 6. Lastly, let's check IAM permissions to ensure your Vertex AI instance has the necessary permissions. If none of these steps resolve the issue, we'll need to reach out to Google Cloud support for further assistance. By following these steps and considering additional factors, you should be able to help the customer troubleshoot and resolve the issue with their custom container in Vertex AI instance.";
}
if (selectedModel === "llama-l4" && selectedRetrieval === "mmr"){
answer = "To help the customer whose Vertex AI instance fails to start due to a custom container issue, you can ask some clarifying questions to gather more information and then offer some potential solutions. Here are some questions you can ask the customer: 1. Can you please provide more details about the error message you're seeing when the custom container fails to start? 2. What type of custom container are you using (e.g. TensorFlow, PyTorch, etc.)? 3. Are you using a pre-built container from the Vertex AI container registry or a custom-built one? 4. Have you tried recreating the Vertex AI instance with the same custom container settings? 5. Have you checked the Vertex AI logs for any errors related to the custom container fail to start? 6. did you checked that your images are running recent version? With these questions, you can gather more information about the customer's issue and narrow down the potential causes. After gathering more information, here are some potential solutions you can offer: 1. **Reach out to Vertex AI support**: If the issue is related to a specific Vertex AI feature or bug, it's best to file a support request with Vertex AI directly. They have a dedicated team that can help resolve complex issues. 2. **Check the container log**: Have the customer check the container log to see if there are any error messages that can provide insight into the issue. 3. **Verify the container**: Have the customer verify that the custom container is correctly configured and that it's running the expected image. 4. **Check the Vertex AI instance**: Have the customer check that the Vertex AI instance is correctly configured and that it's running the expected machine type and environment. 5. **Provide an example**: If the customer is new to Vertex AI, provide an example of how to deploy a custom container successfully. Here is a suggested flow: You: I understand that the custom container is failing to start in Vertex AI. Can you please provide more details about the error message you're seeing? customer: The error message is << error message >>. You: Thank you for providing that information. I'm going to check some possible causes. Can you please provide more details about your custom container setup? customer: << provide more details about the custom container setup >>. You: Based on the information, I think the issue might be related to the configuration of the custom container. Can you please try checking the container log to see if there are any error messages? Customer: << provide container log information >>. You: Thank you for providing the container log. Based on the information, it seems like the custom container is indeed failing to start due to a specific issue. Can I reach out to Vertex AI support to get further assistance on resolving this issue? Customer: That would be great. You: I'll file a support request with Vertex AI and get back to you as soon as I have more information. In this flow, you're gathering more information from the customer, troubleshooting the issue, and escalating the issue to Vertex AI support when necessary.";
}
if (selectedModel === "mistral-l4" && (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig")){
answer = "To help the customer with a custom container failing to start in a Vertex AI instance, follow these steps: 1. Check the logs in the Vertex AI UI or the Cloud Logging API to understand the error message and identify the issue. 2. Verify that the container image is publicly accessible and has the correct tag. 3. Ensure that the container image meets the requirements for Vertex AI, such as having a valid Dockerfile and a valid entrypoint. 4. Check the container image size and ensure it is within the Vertex AI instance's resource limits. 5. If the issue persists, try creating a new instance with the same container image to see if the issue is specific to the instance. 6. If the issue is still not resolved, consider filing a bug report with the Vertex AI team for further investigation.";
}
if (selectedModel === "mistral-l4" && selectedRetrieval === "mmr"){
answer = "To troubleshoot a failed Vertex AI instance, you can check the following: 1. The logs in the Vertex AI console or the Cloud Logging API for error messages. 2. The status of the instance in the Vertex AI console or the Cloud Operations API. 3. The instance configuration, such as the machine type and the number of instances. 4. The custom container image used for the instance, if applicable. 5. The network settings for the instance, such as the firewall rules and the VPC service control. 6. The availability of the required resources, such as GPUs and TPUs. 7. The quota limits for the project. 8. The permissions of the service account used to create the instance. 9. The version of the machine learning framework used in the instance. 10. The dependencies and libraries installed in the instance. 11. The size and shape of the input data, if applicable. 12. The format and encoding of the input data, if applicable. 13. The version of the model and the input and output formats, if applicable. 14. The network latency and the availability of the external data sources, if applicable. 15. The availability of the external APIs and services, if applicable. 16. The availability of the external data storage, if applicable. 17. The availability of the external compute resources, if applicable. 18. The availability of the external machine learning models, if applicable. 19. The availability of the external libraries and tools, if applicable. 20. The availability of the external dependencies and packages, if applicable. 21. The availability of the external certificates and keys, if applicable. 22. The availability of the external secrets and tokens, if applicable. 23. The availability of the external environment variables, if applicable. 24. The availability of the external configuration files, if applicable. 25. The availability of the external scripts and code, if applicable. 26. The availability of the external data pipelines and workflows, if applicable. 27. The availability of the external data processing";
}
}
// --- End of the Answer Logic Block ---
// --- Start of the Retrieval Text Logic Block (Paste your original logic here) ---
if (selectedPromptValue === "prompt1") {
if (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig") {
retrievalText = `\n\n// SYSTEM LOG: RETRIEVAL ANALYSIS //\n`;
if (selectedVectorDB === "vaisearch"){ retrievalText += `Source: Vertex AI Vector Search | Latency: ~2ms | Scale: ~1 Billion Vectors\n`; }
else if (selectedVectorDB === "qdrant-milvius-gke"){ retrievalText += `Source: Qdrant/Milvus @ GKE | Latency: 3-5ms | Scale: ~1 Million Vectors\n`; }
else if (selectedVectorDB === "sqlite-faiss"){ retrievalText += `Source: FAISS/SQLite-Vec (Local) | Latency: 5-20ms | Scale: ~1 Thousand Vectors\n`; }
else { retrievalText += `Source: Unknown Vector DB | Latency: N/A | Scale: N/A\n`; }
retrievalText += `Method: ANN | Chunks Retrieved: 3 (High Similarity Focus)\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 1 | Input: What is the role of artificial intelligence in Google's security strategy?\nOutput: AI plays a significant role... automating threat detection, improving incident response...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 2 | Input: What is the role of artificial intelligence in Google's security strategy?\nOutput: AI plays a significant role... identifying suspicious activity, detect malware...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 3 | Input: What is the role of artificial intelligence in Google's security strategy?\nOutput: AI plays a significant role... enhancing security analysis, analyze security logs.\n`;
retrievalText += `-----------------------------------\n`;
}
else if (selectedRetrieval === "mmr") {
retrievalText = `\n\n// SYSTEM LOG: RETRIEVAL ANALYSIS //\n`;
if (selectedVectorDB === "vaisearch"){ retrievalText += `Source: Vertex AI Vector Search | Latency: ~2ms | Scale: ~1 Billion Vectors\n`; }
else if (selectedVectorDB === "qdrant-milvius-gke"){ retrievalText += `Source: Qdrant/Milvus @ GKE | Latency: 3-5ms | Scale: ~1 Million Vectors\n`; }
else if (selectedVectorDB === "sqlite-faiss"){ retrievalText += `Source: FAISS/SQLite-Vec (Local) | Latency: 5-20ms | Scale: ~1 Thousand Vectors\n`; }
else { retrievalText += `Source: Unknown Vector DB | Latency: N/A | Scale: N/A\n`; }
retrievalText += `Method: MMR | Chunks Retrieved: 3 (Balanced Similarity & Diversity from ~17 candidates)\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 1 | Input: What is the role of artificial intelligence in Google's security strategy?\nOutput: AI plays a significant role... automating threat detection...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 2 | Input: What is the Threat Analysis Group at Google?\nOutput: The Threat Analysis Group monitors threat actors, tactics...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 3 | Input: How does Google measure and improve the effectiveness of its security mechanisms?\nOutput: Google conducts Red Team exercises to simulate attacks...\n`;
retrievalText += `-----------------------------------\n`;
}
} else if (selectedPromptValue === "prompt2") {
if (selectedRetrieval === "ann" || selectedRetrieval === "smalltobig") {
retrievalText = `\n\n// SYSTEM LOG: RETRIEVAL ANALYSIS //\n`;
if (selectedVectorDB === "vaisearch"){ retrievalText += `Source: Vertex AI Vector Search | Latency: ~2ms | Scale: ~1 Billion Vectors\n`; }
else if (selectedVectorDB === "qdrant-milvius-gke"){ retrievalText += `Source: Qdrant/Milvus @ GKE | Latency: 3-5ms | Scale: ~1 Million Vectors\n`; }
else if (selectedVectorDB === "sqlite-faiss"){ retrievalText += `Source: FAISS/SQLite-Vec (Local) | Latency: 5-20ms | Scale: ~1 Thousand Vectors\n`; }
else { retrievalText += `Source: Unknown Vector DB | Latency: N/A | Scale: N/A\n`; }
retrievalText += `Method: ANN | Chunks Retrieved: 3 (High Similarity Focus)\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 1 | Input: question: How can I get support for Vertex AI?\nOutput: There are several ways to get support... Stack Overflow... documentation... creating cases...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 2 | Input: question: What is the first step in setting up a Google Cloud project for Vertex AI?\nOutput: Go to the project selector page and select or create a Google Cloud project.\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 3 | Input: question: What APIs need to be enabled for Vertex AI?\nOutput: IAM, Compute Engine, Notebooks, Cloud Storage, and Vertex AI APIs\n`;
retrievalText += `-----------------------------------\n`;
}
else if (selectedRetrieval === "mmr") {
retrievalText = `\n\n// SYSTEM LOG: RETRIEVAL ANALYSIS //\n`;
if (selectedVectorDB === "vaisearch"){ retrievalText += `Source: Vertex AI Vector Search | Latency: ~2ms | Scale: ~1 Billion Vectors\n`; }
else if (selectedVectorDB === "qdrant-milvius-gke"){ retrievalText += `Source: Qdrant/Milvus @ GKE | Latency: 3-5ms | Scale: ~1 Million Vectors\n`; }
else if (selectedVectorDB === "sqlite-faiss"){ retrievalText += `Source: FAISS/SQLite-Vec (Local) | Latency: 5-20ms | Scale: ~1 Thousand Vectors\n`; }
else { retrievalText += `Source: Unknown Vector DB | Latency: N/A | Scale: N/A\n`; }
retrievalText += `Method: MMR | Chunks Retrieved: 3 (Balanced Similarity & Diversity from ~17 candidates)\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 1 | Input: question: How can I get support for Vertex AI?\nOutput: There are several ways... Stack Overflow... documentation... creating cases...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 2 | Input: question: What is the difference between AI Platform Training and Vertex AI custom training?\nOutput: The main difference... specify framework version... custom container... machine configurations...\n`;
retrievalText += `-----------------------------------\n`;
retrievalText += `CHUNK 3 | Input: question: What are the different ways to deploy a model in Vertex AI?\nOutput: You can deploy... prebuilt or custom containers, batch predictions, or online serving.\n`;
retrievalText += `-----------------------------------\n`;
}
}
// --- End of the Retrieval Text Logic Block ---
// Streaming Effect
ragOutput.textContent = ""; // Clear placeholder
let charIndex = 0;
const chunkSize = 5; // Characters per step
// Adjust speed based on model (faster models = faster typing effect)
let intervalTime = (selectedModel === "gemini" || selectedModel === "gemma tpu" || selectedModel === "gemma-a100") ? 8 : 40;
function streamResponse() {
if (charIndex < answer.length) {
ragOutput.textContent += answer.substring(charIndex, Math.min(charIndex + chunkSize, answer.length));
charIndex += chunkSize;
setTimeout(streamResponse, intervalTime);
} else {
// Once main answer is streamed, append retrieval info
ragOutput.textContent += retrievalText;
// Optionally scroll to bottom
ragOutput.scrollTop = ragOutput.scrollHeight;
}
}
streamResponse(); // Start streaming
}, 500); // 500ms delay to simulate processing
});
// --- Plotly Charting Logic (with Theme Adjustments) ---
function addNoise(point, noiseFactor) {
return point.map(coord => coord + (Math.random() - 0.5) * noiseFactor);
}
function applyNoiseToEmbeddings(embeddings, noiseFactor) {
const noisyEmbeddings = {};
for (const subject in embeddings) {
noisyEmbeddings[subject] = embeddings[subject].map(point => addNoise(point, noiseFactor));
}
return noisyEmbeddings;
}
// Sample embedding data (Keep your original data)
const embeddingsANN = { "HR": [[0.1, 0.2, 0.3], /*...*/ ], "Datacenter": [ [0.2, 0.3, 0.1], /*...*/ ], "Vertex AI": [ [0.3, 0.1, 0.2], /*...*/ ] };
const embeddingsMMR = { "HR": [[0.2, 0.1, 0.4], /*...*/ ], "Datacenter": [ [0.3, 0.2, 0.1], /*...*/ ], "Vertex AI": [ [0.1, 0.3, 0.2], /*...*/ ] };
// (Include the full data from your original script here)
const noiseFactor = 0.32;
const noisyEmbeddingsANN = applyNoiseToEmbeddings(embeddingsANN, noiseFactor);
const noisyEmbeddingsMMR = applyNoiseToEmbeddings(embeddingsMMR, noiseFactor);
const promptEmbedding = [0.5, 0.5, 0.5];
function euclideanDistance(point1, point2) { /* ... keep original ... */
let sum = 0;
for (let i = 0; i < point1.length; i++) { sum += Math.pow(point1[i] - point2[i], 2); }
return Math.sqrt(sum);
}
function findClosestPoints(embeddings, prompt, n, differentFar = false) { /* ... keep original ... */
let allPoints = [];
for (const subject in embeddings) { allPoints = allPoints.concat(embeddings[subject].map(point => ({point, subject}))); }
allPoints.sort((a, b) => euclideanDistance(a.point, prompt) - euclideanDistance(b.point, prompt));
if (differentFar) {
const closestPoints = []; let index = 0;
while (closestPoints.length < n && index < allPoints.length ) {
const currentPoint = allPoints[index]; let farEnough = true;
for (const addedPoint of closestPoints) { if(euclideanDistance(currentPoint.point, addedPoint.point) < 0.2){ farEnough = false; break; } }
if (farEnough) { closestPoints.push(currentPoint) } index++;
} return closestPoints.map(item => item.point);
} return allPoints.slice(0, n).map(item => item.point);
}
function createPlotData(embeddings, plotTitle, prompt, closestPointsANN = [], closestPointsMMR = []) {
const data = [];
const colors = { // Synthwave-friendly colors
"HR": 'var(--neon-cyan)', //'#00ffff',
"Datacenter": 'var(--neon-orange)', //'#ff8c00',
"Vertex AI": '#90ee90' // Light Green (adjust if needed)
}
const closestColorANN = '#ff1493'; // Deep Pink
const closestColorMMR = '#ff1493'; // Deep Pink (or different if preferred)
const promptColor = 'white'; //'#ffffff';
for (const subject in embeddings) {
if (!embeddings[subject]) continue; // Add check for safety
const color = colors[subject] || 'grey'; // Fallback color
data.push({
x: embeddings[subject].map(e => e[0]),
y: embeddings[subject].map(e => e[1]),
z: embeddings[subject].map(e => e[2]),
mode: 'markers', name: subject, type: 'scatter3d',
marker: { size: 6, color: color, opacity: 0.7 } // Slightly larger, transparent
});
}
if (closestPointsANN && closestPointsANN.length > 0){
data.push({
x: closestPointsANN.map(e => e[0]), y: closestPointsANN.map(e => e[1]), z: closestPointsANN.map(e => e[2]),
mode: 'markers', name: "Closest ANN", type: 'scatter3d',
marker: { size: 10, color: closestColorANN, symbol: 'diamond', line: { color: 'white', width: 1 } } // Larger, different symbol
});
}
if (closestPointsMMR && closestPointsMMR.length > 0){
data.push({
x: closestPointsMMR.map(e => e[0]), y: closestPointsMMR.map(e => e[1]), z: closestPointsMMR.map(e => e[2]),
mode: 'markers', name: "Closest MMR (Diverse)", type: 'scatter3d',
marker: { size: 10, color: closestColorMMR, symbol: 'cross', line: { color: 'white', width: 1 } } // Different symbol
});
}
// Prompt marker
data.push({
x: [prompt[0]], y: [prompt[1]], z: [prompt[2]],
mode: 'markers', name: "Prompt Vector", type: 'scatter3d',
marker: { size: 12, color: promptColor, symbol: 'star', line: { color: 'var(--neon-pink)', width: 1 } } // Star symbol for prompt
});
// --- Layout adjustments for dark theme ---
const layout = {
title: { text: plotTitle, font: { color: 'var(--neon-pink)', size: 18, family: 'Audiowide, sans-serif' } },
paper_bgcolor: 'rgba(0,0,0,0)', // Transparent background
plot_bgcolor: 'rgba(0,0,0,0)', // Transparent plot area
font: { color: 'var(--light-text)', family: 'Orbitron, sans-serif' }, // Light text default
scene: {
xaxis: { title: 'Dimension X', titlefont: { color: 'var(--neon-cyan)' }, gridcolor: 'rgba(0, 255, 255, 0.2)', zerolinecolor: 'var(--neon-pink)', linecolor: 'rgba(0, 255, 255, 0.5)', tickfont: {color: 'var(--light-text)'} },
yaxis: { title: 'Dimension Y', titlefont: { color: 'var(--neon-cyan)' }, gridcolor: 'rgba(0, 255, 255, 0.2)', zerolinecolor: 'var(--neon-pink)', linecolor: 'rgba(0, 255, 255, 0.5)', tickfont: {color: 'var(--light-text)'} },
zaxis: { title: 'Dimension Z', titlefont: { color: 'var(--neon-cyan)' }, gridcolor: 'rgba(0, 255, 255, 0.2)', zerolinecolor: 'var(--neon-pink)', linecolor: 'rgba(0, 255, 255, 0.5)', tickfont: {color: 'var(--light-text)'} },
bgcolor: 'rgba(13, 0, 26, 0.3)' // Very subtle dark bg for scene
},
legend: { font: { color: 'var(--light-text)' }, bgcolor: 'rgba(0,0,0,0.3)', bordercolor: 'var(--neon-cyan)', borderwidth: 1 }
};
return {data, layout};
}
// Find closest points (keep your original logic)
const closestPointsANN = findClosestPoints(noisyEmbeddingsANN, promptEmbedding, 3);
const closestPointsMMR = findClosestPoints(noisyEmbeddingsMMR, promptEmbedding, 3, true);
// Create data and layout for ANN plot
const {data: dataANN, layout: layoutANN} = createPlotData(noisyEmbeddingsANN, "ANN Retrieval // Vector Space", promptEmbedding, closestPointsANN);
// Create data and layout for MMR plot
const {data: dataMMR, layout: layoutMMR} = createPlotData(noisyEmbeddingsMMR, "MMR Retrieval // Vector Space", promptEmbedding, [], closestPointsMMR);
// Create the plots
Plotly.newPlot('embedding-plot-ann', dataANN, layoutANN, {responsive: true});
Plotly.newPlot('embedding-plot-mmr', dataMMR, layoutMMR, {responsive: true});
// Initial config text update
updateConfigText();
});
</script>
</body>
</html> |