Santiago Casas
commited on
Commit
·
8343c13
1
Parent(s):
52c5971
add project files and clapp.png
Browse files- .gitattributes +1 -0
- CLAPP.py +1033 -0
- README.md +1 -1
- images/CLAPP.png +3 -0
- requirements.txt +175 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
CLAPP.py
ADDED
|
@@ -0,0 +1,1033 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This script requires Streamlit and LangChain
|
| 2 |
+
# Install it with: pip install streamlit openai langchain langchain-openai langchain-community
|
| 3 |
+
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import time
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import base64
|
| 9 |
+
import getpass
|
| 10 |
+
from cryptography.fernet import Fernet
|
| 11 |
+
from langchain_openai import ChatOpenAI
|
| 12 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
| 13 |
+
from langchain_core.messages import HumanMessage, SystemMessage
|
| 14 |
+
from langchain_openai import OpenAIEmbeddings
|
| 15 |
+
from langchain_community.vectorstores import FAISS
|
| 16 |
+
from langchain_community.document_loaders import PyPDFLoader
|
| 17 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 18 |
+
from langchain_community.chat_message_histories import ChatMessageHistory
|
| 19 |
+
from langchain_core.documents import Document
|
| 20 |
+
|
| 21 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
| 22 |
+
|
| 23 |
+
from pydantic import BaseModel, Field
|
| 24 |
+
from typing import Annotated
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
from autogen import ConversableAgent, LLMConfig, UpdateSystemMessage
|
| 28 |
+
import tempfile
|
| 29 |
+
from autogen.coding import LocalCommandLineCodeExecutor, CodeBlock
|
| 30 |
+
import matplotlib
|
| 31 |
+
matplotlib.use('Agg') # Set the backend to Agg before importing pyplot
|
| 32 |
+
import matplotlib.pyplot as plt
|
| 33 |
+
import io
|
| 34 |
+
from PIL import Image
|
| 35 |
+
import re
|
| 36 |
+
import subprocess
|
| 37 |
+
import sys
|
| 38 |
+
from typing import Tuple
|
| 39 |
+
import contextlib # for contextlib.contextmanager
|
| 40 |
+
|
| 41 |
+
# --- Helper Functions ---
|
| 42 |
+
def save_encrypted_key(encrypted_key, username):
|
| 43 |
+
"""Save encrypted key to file with username prefix"""
|
| 44 |
+
try:
|
| 45 |
+
filename = f"{username}_encrypted_api_key" if username else ".encrypted_api_key"
|
| 46 |
+
with open(filename, "w") as f:
|
| 47 |
+
f.write(encrypted_key)
|
| 48 |
+
return True
|
| 49 |
+
except Exception as e:
|
| 50 |
+
return False
|
| 51 |
+
|
| 52 |
+
def load_encrypted_key(username):
|
| 53 |
+
"""Load encrypted key from file with username prefix"""
|
| 54 |
+
try:
|
| 55 |
+
filename = f"{username}_encrypted_api_key" if username else ".encrypted_api_key"
|
| 56 |
+
with open(filename, "r") as f:
|
| 57 |
+
return f.read()
|
| 58 |
+
except FileNotFoundError:
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
def read_keys_from_file(file_path):
|
| 62 |
+
with open(file_path, 'r') as file:
|
| 63 |
+
return json.load(file)
|
| 64 |
+
|
| 65 |
+
def read_prompt_from_file(path):
|
| 66 |
+
with open(path, 'r') as f:
|
| 67 |
+
return f.read()
|
| 68 |
+
|
| 69 |
+
class Response:
|
| 70 |
+
def __init__(self, content):
|
| 71 |
+
self.content = content
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class Feedback(BaseModel):
|
| 75 |
+
grade: Annotated[int, Field(description="Score from 1 to 10")]
|
| 76 |
+
improvement_instructions: Annotated[str, Field(description="Advice on how to improve the reply")]
|
| 77 |
+
|
| 78 |
+
class StreamHandler(BaseCallbackHandler):
|
| 79 |
+
def __init__(self, container):
|
| 80 |
+
self.container = container
|
| 81 |
+
self.text = ""
|
| 82 |
+
|
| 83 |
+
def on_llm_new_token(self, token: str, **kwargs):
|
| 84 |
+
self.text += token
|
| 85 |
+
self.container.markdown(self.text + "▌")
|
| 86 |
+
|
| 87 |
+
# --- Streamlit Page Config ---
|
| 88 |
+
st.set_page_config(
|
| 89 |
+
page_title="CLAPP Agent",
|
| 90 |
+
page_icon="🤖",
|
| 91 |
+
layout="wide",
|
| 92 |
+
initial_sidebar_state="auto"
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
st.markdown("# CLAPP: CLASS LLM Agent for Pair Programming")
|
| 96 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
| 97 |
+
with col2:
|
| 98 |
+
st.image("images/CLAPP.png", width=400)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# New prompts for the swarm
|
| 102 |
+
Initial_Agent_Instructions = read_prompt_from_file("prompts/class_instructions.txt") # Reuse or adapt class_instructions
|
| 103 |
+
Review_Agent_Instructions = read_prompt_from_file("prompts/review_instructions.txt") # Adapt rating_instructions
|
| 104 |
+
#Typo_Agent_Instructions = read_prompt_from_file("prompts/typo_instructions.txt") # New prompt file
|
| 105 |
+
Formatting_Agent_Instructions = read_prompt_from_file("prompts/formatting_instructions.txt") # New prompt file
|
| 106 |
+
Code_Execution_Agent_Instructions = read_prompt_from_file("prompts/codeexecutor_instructions.txt") # New prompt file
|
| 107 |
+
|
| 108 |
+
# --- Initialize Session State ---
|
| 109 |
+
def init_session():
|
| 110 |
+
if "messages" not in st.session_state:
|
| 111 |
+
st.session_state.messages = []
|
| 112 |
+
if "debug" not in st.session_state:
|
| 113 |
+
st.session_state.debug = False
|
| 114 |
+
if "llm" not in st.session_state:
|
| 115 |
+
st.session_state.llm = None
|
| 116 |
+
if "llmBG" not in st.session_state:
|
| 117 |
+
st.session_state.llmBG = None
|
| 118 |
+
if "memory" not in st.session_state:
|
| 119 |
+
st.session_state.memory = ChatMessageHistory()
|
| 120 |
+
if "vector_store" not in st.session_state:
|
| 121 |
+
st.session_state.vector_store = None
|
| 122 |
+
if "last_token_count" not in st.session_state:
|
| 123 |
+
st.session_state.last_token_count = 0
|
| 124 |
+
if "selected_model" not in st.session_state:
|
| 125 |
+
st.session_state.selected_model = "gpt-4o-mini"
|
| 126 |
+
if "greeted" not in st.session_state:
|
| 127 |
+
st.session_state.greeted = False
|
| 128 |
+
if "debug_messages" not in st.session_state:
|
| 129 |
+
st.session_state.debug_messages = []
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
init_session()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# --- Sidebar Configuration ---
|
| 137 |
+
with st.sidebar:
|
| 138 |
+
st.header("🔐 API & Assistants")
|
| 139 |
+
api_key = st.text_input("1. OpenAI API Key", type="password")
|
| 140 |
+
username = st.text_input("2. Username (for saving your API key)", placeholder="Enter your username")
|
| 141 |
+
user_password = st.text_input("3. Password to encrypt/decrypt API key", type="password")
|
| 142 |
+
|
| 143 |
+
# When both API key and password are provided
|
| 144 |
+
if api_key and user_password:
|
| 145 |
+
# Create encryption key from password
|
| 146 |
+
key = base64.urlsafe_b64encode(user_password.ljust(32)[:32].encode())
|
| 147 |
+
fernet = Fernet(key)
|
| 148 |
+
|
| 149 |
+
# If this is a new API key, encrypt and save it
|
| 150 |
+
if "saved_api_key" not in st.session_state or api_key != st.session_state.saved_api_key:
|
| 151 |
+
try:
|
| 152 |
+
# Encrypt the API key
|
| 153 |
+
encrypted_key = fernet.encrypt(api_key.encode())
|
| 154 |
+
|
| 155 |
+
# Save to session state and file
|
| 156 |
+
st.session_state.saved_api_key = api_key
|
| 157 |
+
st.session_state.encrypted_key = encrypted_key.decode()
|
| 158 |
+
|
| 159 |
+
# Save to file
|
| 160 |
+
if save_encrypted_key(encrypted_key.decode(), username):
|
| 161 |
+
st.success("API key encrypted and saved! ✅")
|
| 162 |
+
else:
|
| 163 |
+
st.warning("API key encrypted but couldn't save to file! ⚠️")
|
| 164 |
+
except Exception as e:
|
| 165 |
+
st.error(f"Error saving API key: {str(e)}")
|
| 166 |
+
|
| 167 |
+
# Try to load saved API key if password is provided
|
| 168 |
+
elif user_password and not api_key:
|
| 169 |
+
# Try to load from file first
|
| 170 |
+
encrypted_key = load_encrypted_key(username)
|
| 171 |
+
if encrypted_key:
|
| 172 |
+
try:
|
| 173 |
+
# Recreate encryption key
|
| 174 |
+
key = base64.urlsafe_b64encode(user_password.ljust(32)[:32].encode())
|
| 175 |
+
fernet = Fernet(key)
|
| 176 |
+
|
| 177 |
+
# Decrypt the saved key
|
| 178 |
+
decrypted_key = fernet.decrypt(encrypted_key.encode()).decode()
|
| 179 |
+
|
| 180 |
+
# Set the API key
|
| 181 |
+
api_key = decrypted_key
|
| 182 |
+
st.session_state.saved_api_key = api_key
|
| 183 |
+
st.success("API key loaded successfully! 🔑")
|
| 184 |
+
except Exception as e:
|
| 185 |
+
st.error("Failed to decrypt API key. Wrong password? 🔒")
|
| 186 |
+
else:
|
| 187 |
+
st.warning("No saved API key found. Please enter your API key first. 🔑")
|
| 188 |
+
|
| 189 |
+
# Add clear saved key button
|
| 190 |
+
if st.button("🗑️ Clear Saved API Key"):
|
| 191 |
+
deleted_files = False
|
| 192 |
+
error_message = ""
|
| 193 |
+
|
| 194 |
+
# Try to delete username-specific file if it exists
|
| 195 |
+
if username:
|
| 196 |
+
filename = f"{username}_encrypted_api_key"
|
| 197 |
+
if os.path.exists(filename):
|
| 198 |
+
try:
|
| 199 |
+
os.remove(filename)
|
| 200 |
+
deleted_files = True
|
| 201 |
+
st.success(f"Deleted key file for user: {username}")
|
| 202 |
+
except Exception as e:
|
| 203 |
+
error_message += f"Error clearing {filename}: {str(e)}\n"
|
| 204 |
+
|
| 205 |
+
# Also try to delete the default file if it exists
|
| 206 |
+
if os.path.exists(".encrypted_api_key"):
|
| 207 |
+
try:
|
| 208 |
+
os.remove(".encrypted_api_key")
|
| 209 |
+
deleted_files = True
|
| 210 |
+
st.success("Deleted default key file")
|
| 211 |
+
except Exception as e:
|
| 212 |
+
error_message += f"Error clearing default key file: {str(e)}\n"
|
| 213 |
+
|
| 214 |
+
# Clean up session state
|
| 215 |
+
if "saved_api_key" in st.session_state:
|
| 216 |
+
del st.session_state.saved_api_key
|
| 217 |
+
if "encrypted_key" in st.session_state:
|
| 218 |
+
del st.session_state.encrypted_key
|
| 219 |
+
|
| 220 |
+
# Show appropriate message
|
| 221 |
+
if deleted_files:
|
| 222 |
+
st.info("Session cleared. Reloading page...")
|
| 223 |
+
time.sleep(1) # Brief pause so user can see the message
|
| 224 |
+
st.rerun()
|
| 225 |
+
elif error_message:
|
| 226 |
+
st.error(error_message)
|
| 227 |
+
else:
|
| 228 |
+
st.warning("No saved API keys found to delete.")
|
| 229 |
+
|
| 230 |
+
st.session_state.selected_model = st.selectbox(
|
| 231 |
+
"4. Choose LLM model 🧠",
|
| 232 |
+
options=["gpt-4o-mini", "gpt-4o"],
|
| 233 |
+
index=["gpt-4o-mini", "gpt-4o"].index(st.session_state.selected_model)
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
# Check if model has changed
|
| 238 |
+
if "previous_model" not in st.session_state:
|
| 239 |
+
st.session_state.previous_model = st.session_state.selected_model
|
| 240 |
+
elif st.session_state.previous_model != st.session_state.selected_model:
|
| 241 |
+
# Reset relevant state variables when model changes
|
| 242 |
+
st.session_state.vector_store = None
|
| 243 |
+
st.session_state.greeted = False
|
| 244 |
+
st.session_state.messages = []
|
| 245 |
+
st.session_state.memory = ChatMessageHistory()
|
| 246 |
+
st.session_state.previous_model = st.session_state.selected_model
|
| 247 |
+
st.info("Model changed! Please initialize again with the new model.")
|
| 248 |
+
|
| 249 |
+
st.write("### Response Mode")
|
| 250 |
+
col1, col2 = st.columns([1, 2])
|
| 251 |
+
with col1:
|
| 252 |
+
mode_is_fast = st.toggle("Fast Mode", value=True)
|
| 253 |
+
with col2:
|
| 254 |
+
if mode_is_fast:
|
| 255 |
+
st.caption("✨ Quick responses with good quality (recommended for most uses)")
|
| 256 |
+
else:
|
| 257 |
+
st.caption("🎯 Swarm mode, more refined responses (may take longer)")
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
if api_key:
|
| 261 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
| 262 |
+
|
| 263 |
+
# Initialize only after model is selected
|
| 264 |
+
if st.button("🚀 Initialize with Selected Model"):
|
| 265 |
+
# First initialization without streaming
|
| 266 |
+
st.session_state.llm = ChatOpenAI(
|
| 267 |
+
model_name=st.session_state.selected_model,
|
| 268 |
+
openai_api_key=api_key,
|
| 269 |
+
temperature=1.0
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
if st.session_state.vector_store is None:
|
| 273 |
+
embedding_status = st.empty()
|
| 274 |
+
embedding_status.info("🔄 Processing and embedding your RAG data... This might take a moment! ⏳")
|
| 275 |
+
embeddings = OpenAIEmbeddings(model="text-embedding-3-large")
|
| 276 |
+
|
| 277 |
+
# Get all files from class-data directory
|
| 278 |
+
all_docs = []
|
| 279 |
+
for filename in os.listdir("./class-data"):
|
| 280 |
+
file_path = os.path.join("./class-data", filename)
|
| 281 |
+
|
| 282 |
+
if filename.endswith('.pdf'):
|
| 283 |
+
# Handle PDF files
|
| 284 |
+
loader = PyPDFLoader(file_path)
|
| 285 |
+
docs = loader.load()
|
| 286 |
+
all_docs.extend(docs)
|
| 287 |
+
elif filename.endswith(('.txt', '.py', '.ini')): # Added .py extension
|
| 288 |
+
# Handle text and Python files
|
| 289 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 290 |
+
text = f.read()
|
| 291 |
+
# Create a document with metadata
|
| 292 |
+
all_docs.append(Document(
|
| 293 |
+
page_content=text,
|
| 294 |
+
metadata={"source": filename, "type": "code" if filename.endswith('.py') else "text"}
|
| 295 |
+
))
|
| 296 |
+
|
| 297 |
+
# Split and process all documents
|
| 298 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
| 299 |
+
def sanitize(documents):
|
| 300 |
+
for doc in documents:
|
| 301 |
+
doc.page_content = doc.page_content.encode("utf-8", "ignore").decode("utf-8")
|
| 302 |
+
return documents
|
| 303 |
+
|
| 304 |
+
splits = text_splitter.split_documents(all_docs)
|
| 305 |
+
splits = sanitize(splits)
|
| 306 |
+
|
| 307 |
+
# Create vector store from all documents
|
| 308 |
+
st.session_state.vector_store = FAISS.from_documents(splits, embedding=embeddings)
|
| 309 |
+
embedding_status.empty() # Clear the loading message
|
| 310 |
+
|
| 311 |
+
# Initialize but don't generate welcome message yet
|
| 312 |
+
if not st.session_state.greeted:
|
| 313 |
+
# Just set the initialized flag, we'll generate the welcome message later
|
| 314 |
+
st.session_state.llm_initialized = True
|
| 315 |
+
st.rerun() # Refresh the page to show the initialized state
|
| 316 |
+
|
| 317 |
+
st.markdown("---") # Add a separator for better visual organization
|
| 318 |
+
|
| 319 |
+
# Check if CLASS is already installed
|
| 320 |
+
st.markdown("### 🔧 CLASS Setup")
|
| 321 |
+
if st.checkbox("Check CLASS installation status"):
|
| 322 |
+
try:
|
| 323 |
+
# Use sys.executable to run a simple test to see if classy can be imported
|
| 324 |
+
result = subprocess.run(
|
| 325 |
+
[sys.executable, "-c", "from classy import Class; print('CLASS successfully imported!')"],
|
| 326 |
+
capture_output=True,
|
| 327 |
+
text=True
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
if result.returncode == 0:
|
| 331 |
+
st.success("✅ CLASS is already installed and ready to use!")
|
| 332 |
+
else:
|
| 333 |
+
st.error("❌ The 'classy' module is not installed. Please install CLASS using the button below.")
|
| 334 |
+
if result.stderr:
|
| 335 |
+
st.code(result.stderr, language="bash")
|
| 336 |
+
except Exception as e:
|
| 337 |
+
st.error(f"❌ Error checking CLASS installation: {str(e)}")
|
| 338 |
+
|
| 339 |
+
# Add CLASS installation and testing buttons
|
| 340 |
+
st.text("If not installed, install CLASS to enable code execution and plotting")
|
| 341 |
+
if st.button("🔄 Install CLASS"):
|
| 342 |
+
# Show simple initial message
|
| 343 |
+
status_placeholder = st.empty()
|
| 344 |
+
status_placeholder.info("Installing CLASS... This could take a few minutes.")
|
| 345 |
+
|
| 346 |
+
try:
|
| 347 |
+
# Get the path to install_classy.sh
|
| 348 |
+
install_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'install_classy.sh')
|
| 349 |
+
|
| 350 |
+
# Make the script executable
|
| 351 |
+
os.chmod(install_script_path, 0o755)
|
| 352 |
+
|
| 353 |
+
# Run the installation script with shell=True to ensure proper execution
|
| 354 |
+
process = subprocess.Popen(
|
| 355 |
+
[install_script_path],
|
| 356 |
+
stdout=subprocess.PIPE,
|
| 357 |
+
stderr=subprocess.STDOUT,
|
| 358 |
+
text=True,
|
| 359 |
+
bufsize=1,
|
| 360 |
+
shell=True,
|
| 361 |
+
cwd=os.path.dirname(os.path.abspath(__file__))
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
# Create a placeholder for the current line
|
| 365 |
+
current_line_placeholder = st.empty()
|
| 366 |
+
|
| 367 |
+
# Collect output in the background while showing just the last line
|
| 368 |
+
output_text = ""
|
| 369 |
+
for line in iter(process.stdout.readline, ''):
|
| 370 |
+
output_text += line
|
| 371 |
+
# Update the placeholder with just the current line (real-time feedback)
|
| 372 |
+
if line.strip(): # Only update for non-empty lines
|
| 373 |
+
current_line_placeholder.info(f"Current: {line.strip()}")
|
| 374 |
+
|
| 375 |
+
# Get the final return code
|
| 376 |
+
return_code = process.wait()
|
| 377 |
+
|
| 378 |
+
# Clear the current line placeholder when done
|
| 379 |
+
current_line_placeholder.empty()
|
| 380 |
+
|
| 381 |
+
# Update status based on result
|
| 382 |
+
if return_code == 0:
|
| 383 |
+
status_placeholder.success("✅ CLASS installed successfully!")
|
| 384 |
+
else:
|
| 385 |
+
status_placeholder.error(f"❌ CLASS installation failed with return code: {return_code}")
|
| 386 |
+
|
| 387 |
+
# Display the full output in an expander (not expanded by default)
|
| 388 |
+
with st.expander("View Full Installation Log", expanded=False):
|
| 389 |
+
st.code(output_text)
|
| 390 |
+
|
| 391 |
+
except Exception as e:
|
| 392 |
+
status_placeholder.error(f"Installation failed with exception: {str(e)}")
|
| 393 |
+
st.exception(e) # Show the full exception for debugging
|
| 394 |
+
|
| 395 |
+
# Add test environment button
|
| 396 |
+
st.text("If CLASS is installed, test the environment")
|
| 397 |
+
if st.button("🧪 Test CLASS"):
|
| 398 |
+
# Show simple initial message
|
| 399 |
+
status_placeholder = st.empty()
|
| 400 |
+
status_placeholder.info("Testing CLASS environment... This could take a moment.")
|
| 401 |
+
|
| 402 |
+
try:
|
| 403 |
+
# Get the path to test_classy.py
|
| 404 |
+
test_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_classy.py')
|
| 405 |
+
|
| 406 |
+
# Create a temporary directory for the test
|
| 407 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 408 |
+
# Run the test script with streaming output
|
| 409 |
+
process = subprocess.Popen(
|
| 410 |
+
[sys.executable, test_script_path],
|
| 411 |
+
stdout=subprocess.PIPE,
|
| 412 |
+
stderr=subprocess.STDOUT,
|
| 413 |
+
text=True,
|
| 414 |
+
bufsize=1,
|
| 415 |
+
cwd=temp_dir
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
# Create a placeholder for the current line
|
| 419 |
+
current_line_placeholder = st.empty()
|
| 420 |
+
|
| 421 |
+
# Collect output in the background while showing just the last line
|
| 422 |
+
output_text = ""
|
| 423 |
+
for line in iter(process.stdout.readline, ''):
|
| 424 |
+
output_text += line
|
| 425 |
+
# Update the placeholder with just the current line (real-time feedback)
|
| 426 |
+
if line.strip(): # Only update for non-empty lines
|
| 427 |
+
current_line_placeholder.info(f"Current: {line.strip()}")
|
| 428 |
+
|
| 429 |
+
# Get the final return code
|
| 430 |
+
return_code = process.wait()
|
| 431 |
+
|
| 432 |
+
# Clear the current line placeholder when done
|
| 433 |
+
current_line_placeholder.empty()
|
| 434 |
+
|
| 435 |
+
# Update status based on result
|
| 436 |
+
if return_code == 0:
|
| 437 |
+
status_placeholder.success("✅ CLASS test completed successfully!")
|
| 438 |
+
else:
|
| 439 |
+
status_placeholder.error(f"❌ CLASS test failed with return code: {return_code}")
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
# Check for common errors
|
| 443 |
+
if "ModuleNotFoundError" in output_text or "ImportError" in output_text:
|
| 444 |
+
st.error("❌ Python module import error detected. Make sure CLASS is properly installed.")
|
| 445 |
+
|
| 446 |
+
if "CosmoSevereError" in output_text or "CosmoComputationError" in output_text:
|
| 447 |
+
st.error("❌ CLASS computation error detected.")
|
| 448 |
+
|
| 449 |
+
# Display the full output in an expander (not expanded by default)
|
| 450 |
+
with st.expander("View Full Test Log", expanded=False):
|
| 451 |
+
st.code(output_text)
|
| 452 |
+
# Check if the plot was generated
|
| 453 |
+
plot_path = os.path.join(temp_dir, 'cmb_temperature_spectrum.png')
|
| 454 |
+
if os.path.exists(plot_path):
|
| 455 |
+
# Show the plot if it was generated
|
| 456 |
+
st.subheader("Generated CMB Power Spectrum")
|
| 457 |
+
st.image(plot_path, use_container_width=True)
|
| 458 |
+
else:
|
| 459 |
+
st.warning("⚠️ No plot was generated")
|
| 460 |
+
|
| 461 |
+
except Exception as e:
|
| 462 |
+
status_placeholder.error(f"Test failed with exception: {str(e)}")
|
| 463 |
+
st.exception(e) # Show the full exception for debugging
|
| 464 |
+
|
| 465 |
+
st.markdown("---") # Add a separator for better visual organization
|
| 466 |
+
st.session_state.debug = st.checkbox("🔍 Show Debug Info")
|
| 467 |
+
if st.button("🗑️ Reset Chat"):
|
| 468 |
+
st.session_state.clear()
|
| 469 |
+
st.rerun()
|
| 470 |
+
|
| 471 |
+
if st.session_state.last_token_count > 0:
|
| 472 |
+
st.markdown(f"🧮 **Last response token usage:** `{st.session_state.last_token_count}` tokens")
|
| 473 |
+
|
| 474 |
+
# --- Display all saved plots in sidebar ---
|
| 475 |
+
if "generated_plots" in st.session_state and st.session_state.generated_plots:
|
| 476 |
+
with st.expander("📊 Plot Gallery", expanded=False):
|
| 477 |
+
st.write("All plots generated during this session:")
|
| 478 |
+
# Use a single column layout for the sidebar
|
| 479 |
+
for i, plot_path in enumerate(st.session_state.generated_plots):
|
| 480 |
+
if os.path.exists(plot_path):
|
| 481 |
+
st.image(plot_path, width=250, caption=os.path.basename(plot_path))
|
| 482 |
+
st.markdown("---") # Add separator between plots
|
| 483 |
+
|
| 484 |
+
# --- Retrieval + Prompt Construction ---
|
| 485 |
+
def build_messages(context, question, system):
|
| 486 |
+
system_msg = SystemMessage(content=system)
|
| 487 |
+
human_msg = HumanMessage(content=f"Context:\n{context}\n\nQuestion:\n{question}")
|
| 488 |
+
return [system_msg] + st.session_state.memory.messages + [human_msg]
|
| 489 |
+
|
| 490 |
+
def build_messages_rating(context, question, answer, system):
|
| 491 |
+
system_msg = SystemMessage(content=system)
|
| 492 |
+
human_msg = HumanMessage(content=f"Context:\n{context}\n\nQuestion:\n{question}\n\nAI Answer:\n{answer}")
|
| 493 |
+
return [system_msg] + st.session_state.memory.messages + [human_msg]
|
| 494 |
+
|
| 495 |
+
def build_messages_refinement(context, question, answer, feedback, system):
|
| 496 |
+
system_msg = SystemMessage(content=system)
|
| 497 |
+
human_msg = HumanMessage(content=f"Context:\n{context}\n\nQuestion:\n{question}\n\nAI Answer:\n{answer}\n\nReviewer Feedback:\n{feedback}")
|
| 498 |
+
return [system_msg] + st.session_state.memory.messages + [human_msg]
|
| 499 |
+
|
| 500 |
+
def format_memory_messages(memory_messages):
|
| 501 |
+
formatted = ""
|
| 502 |
+
for msg in memory_messages:
|
| 503 |
+
role = msg.type.capitalize() # 'human' -> 'Human'
|
| 504 |
+
content = msg.content
|
| 505 |
+
formatted += f"{role}: {content}\n\n"
|
| 506 |
+
return formatted.strip()
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def retrieve_context(question):
|
| 510 |
+
docs = st.session_state.vector_store.similarity_search(question, k=4)
|
| 511 |
+
return "\n\n".join([doc.page_content for doc in docs])
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
# Set up code execution environment
|
| 515 |
+
#temp_dir = tempfile.TemporaryDirectory()
|
| 516 |
+
|
| 517 |
+
class PlotAwareExecutor(LocalCommandLineCodeExecutor):
|
| 518 |
+
def __init__(self, **kwargs):
|
| 519 |
+
import tempfile
|
| 520 |
+
# Create a persistent plots directory if it doesn't exist
|
| 521 |
+
plots_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'plots')
|
| 522 |
+
os.makedirs(plots_dir, exist_ok=True)
|
| 523 |
+
|
| 524 |
+
# Still use a temp dir for code execution
|
| 525 |
+
temp_dir = tempfile.TemporaryDirectory()
|
| 526 |
+
kwargs['work_dir'] = temp_dir.name
|
| 527 |
+
super().__init__(**kwargs)
|
| 528 |
+
self._temp_dir = temp_dir
|
| 529 |
+
self._plots_dir = plots_dir
|
| 530 |
+
|
| 531 |
+
@contextlib.contextmanager
|
| 532 |
+
def _capture_output(self):
|
| 533 |
+
old_out, old_err = sys.stdout, sys.stderr
|
| 534 |
+
buf_out, buf_err = io.StringIO(), io.StringIO()
|
| 535 |
+
sys.stdout, sys.stderr = buf_out, buf_err
|
| 536 |
+
try:
|
| 537 |
+
yield buf_out, buf_err
|
| 538 |
+
finally:
|
| 539 |
+
sys.stdout, sys.stderr = old_out, old_err
|
| 540 |
+
|
| 541 |
+
def execute_code(self, code: str):
|
| 542 |
+
# 1) Extract code from markdown
|
| 543 |
+
match = re.search(r"```(?:python)?\n(.*?)```", code, re.DOTALL)
|
| 544 |
+
cleaned = match.group(1) if match else code
|
| 545 |
+
cleaned = cleaned.replace("plt.show()", "")
|
| 546 |
+
|
| 547 |
+
# Add timestamp for saving figures only if there's plt usage in the code
|
| 548 |
+
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
| 549 |
+
plot_filename = f'plot_{timestamp}.png'
|
| 550 |
+
plot_path = os.path.join(self._plots_dir, plot_filename)
|
| 551 |
+
temp_plot_path = None
|
| 552 |
+
|
| 553 |
+
for line in cleaned.split("\n"):
|
| 554 |
+
if "plt.savefig" in line:
|
| 555 |
+
temp_plot_path = os.path.join(self._temp_dir.name, f'temporary_{timestamp}.png')
|
| 556 |
+
cleaned = cleaned.replace(line, f"plt.savefig('{temp_plot_path}', dpi=300)")
|
| 557 |
+
break
|
| 558 |
+
else:
|
| 559 |
+
# If there's a plot but no save, auto-insert save
|
| 560 |
+
if "plt." in cleaned:
|
| 561 |
+
temp_plot_path = os.path.join(self._temp_dir.name, f'temporary_{timestamp}.png')
|
| 562 |
+
cleaned += f"\nplt.savefig('{temp_plot_path}')"
|
| 563 |
+
|
| 564 |
+
# Create a temporary Python file to execute
|
| 565 |
+
temp_script_path = os.path.join(self._temp_dir.name, f'temp_script_{timestamp}.py')
|
| 566 |
+
with open(temp_script_path, 'w') as f:
|
| 567 |
+
f.write(cleaned)
|
| 568 |
+
|
| 569 |
+
full_output = ""
|
| 570 |
+
try:
|
| 571 |
+
# 2) Capture stdout using subprocess
|
| 572 |
+
process = subprocess.Popen(
|
| 573 |
+
[sys.executable, temp_script_path],
|
| 574 |
+
stdout=subprocess.PIPE,
|
| 575 |
+
stderr=subprocess.STDOUT,
|
| 576 |
+
text=True,
|
| 577 |
+
bufsize=1,
|
| 578 |
+
cwd=self._temp_dir.name
|
| 579 |
+
)
|
| 580 |
+
stdout, _ = process.communicate()
|
| 581 |
+
|
| 582 |
+
# 3) Format the output
|
| 583 |
+
with self._capture_output() as (out_buf, err_buf):
|
| 584 |
+
if stdout:
|
| 585 |
+
out_buf.write(stdout)
|
| 586 |
+
stdout_text = out_buf.getvalue()
|
| 587 |
+
stderr_text = err_buf.getvalue()
|
| 588 |
+
|
| 589 |
+
if stdout_text:
|
| 590 |
+
full_output += f"STDOUT:\n{stdout_text}\n"
|
| 591 |
+
if stderr_text:
|
| 592 |
+
full_output += f"STDERR:\n{stderr_text}\n"
|
| 593 |
+
|
| 594 |
+
# Copy plot from temp to persistent location if it exists
|
| 595 |
+
if temp_plot_path and os.path.exists(temp_plot_path):
|
| 596 |
+
import shutil
|
| 597 |
+
shutil.copy2(temp_plot_path, plot_path)
|
| 598 |
+
# Initialize the plots list if it doesn't exist
|
| 599 |
+
if "generated_plots" not in st.session_state:
|
| 600 |
+
st.session_state.generated_plots = []
|
| 601 |
+
# Add the persistent plot path to session state
|
| 602 |
+
st.session_state.generated_plots.append(plot_path)
|
| 603 |
+
|
| 604 |
+
except Exception:
|
| 605 |
+
with self._capture_output() as (out_buf, err_buf):
|
| 606 |
+
import traceback
|
| 607 |
+
traceback.print_exc(file=sys.stderr)
|
| 608 |
+
full_output += f"STDERR:\n{err_buf.getvalue()}\n"
|
| 609 |
+
|
| 610 |
+
return full_output, plot_path
|
| 611 |
+
|
| 612 |
+
# Example instantiation:
|
| 613 |
+
executor = PlotAwareExecutor(timeout=10)
|
| 614 |
+
|
| 615 |
+
# Global agent configurations
|
| 616 |
+
initial_config = LLMConfig(
|
| 617 |
+
api_type="openai",
|
| 618 |
+
model=st.session_state.selected_model,
|
| 619 |
+
temperature=0.2, # Low temperature for consistent initial responses
|
| 620 |
+
api_key=api_key,
|
| 621 |
+
)
|
| 622 |
+
|
| 623 |
+
review_config = LLMConfig(
|
| 624 |
+
api_type="openai",
|
| 625 |
+
model=st.session_state.selected_model,
|
| 626 |
+
temperature=0.7, # Higher temperature for creative reviews
|
| 627 |
+
api_key=api_key,
|
| 628 |
+
response_format=Feedback
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
# typo_config = LLMConfig(
|
| 632 |
+
# api_type="openai",
|
| 633 |
+
# model=st.session_state.selected_model,
|
| 634 |
+
# temperature=0.1, # Very low temperature for precise code corrections
|
| 635 |
+
# api_key=api_key,
|
| 636 |
+
# )
|
| 637 |
+
|
| 638 |
+
formatting_config = LLMConfig(
|
| 639 |
+
api_type="openai",
|
| 640 |
+
model=st.session_state.selected_model,
|
| 641 |
+
temperature=0.3, # Moderate temperature for formatting
|
| 642 |
+
api_key=api_key,
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
code_execution_config = LLMConfig(
|
| 646 |
+
api_type="openai",
|
| 647 |
+
model=st.session_state.selected_model,
|
| 648 |
+
temperature=0.1, # Very low temperature for code execution
|
| 649 |
+
api_key=api_key,
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
# Global agent instances with updated system messages
|
| 653 |
+
initial_agent = ConversableAgent(
|
| 654 |
+
name="initial_agent",
|
| 655 |
+
system_message=f"""
|
| 656 |
+
{Initial_Agent_Instructions}""",
|
| 657 |
+
human_input_mode="NEVER",
|
| 658 |
+
llm_config=initial_config
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
review_agent = ConversableAgent(
|
| 662 |
+
name="review_agent",
|
| 663 |
+
system_message=f"""{Review_Agent_Instructions}""",
|
| 664 |
+
human_input_mode="NEVER",
|
| 665 |
+
llm_config=review_config
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
# typo_agent = ConversableAgent(
|
| 669 |
+
# name="typo_agent",
|
| 670 |
+
# system_message=f"""You are the typo and code correction agent. Your task is to:
|
| 671 |
+
# 1. Fix any typos or grammatical errors
|
| 672 |
+
# 2. Correct any code issues
|
| 673 |
+
# 3. Ensure proper formatting
|
| 674 |
+
# 4. Maintain the original meaning while improving clarity
|
| 675 |
+
# 5. Verify plots are saved to disk (not using show())
|
| 676 |
+
# 6. PRESERVE all code blocks exactly as they are unless there are actual errors
|
| 677 |
+
# 7. If no changes are needed, keep the original code blocks unchanged
|
| 678 |
+
|
| 679 |
+
# # {Typo_Agent_Instructions}""",
|
| 680 |
+
# # human_input_mode="NEVER",
|
| 681 |
+
# # llm_config=typo_config
|
| 682 |
+
# # )
|
| 683 |
+
|
| 684 |
+
formatting_agent = ConversableAgent(
|
| 685 |
+
name="formatting_agent",
|
| 686 |
+
system_message="""{Formatting_Agent_Instructions}""",
|
| 687 |
+
human_input_mode="NEVER",
|
| 688 |
+
llm_config=formatting_config
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
code_executor = ConversableAgent(
|
| 692 |
+
name="code_executor",
|
| 693 |
+
system_message="""{Code_Execution_Agent_Instructions}""",
|
| 694 |
+
human_input_mode="NEVER",
|
| 695 |
+
llm_config=code_execution_config,
|
| 696 |
+
code_execution_config={"executor": executor},
|
| 697 |
+
max_consecutive_auto_reply=50
|
| 698 |
+
)
|
| 699 |
+
|
| 700 |
+
def call_ai(context, user_input):
|
| 701 |
+
if mode_is_fast:
|
| 702 |
+
messages = build_messages(context, user_input, Initial_Agent_Instructions)
|
| 703 |
+
response = st.session_state.llm.invoke(messages)
|
| 704 |
+
return Response(content=response.content)
|
| 705 |
+
else:
|
| 706 |
+
# New Swarm Workflow for detailed mode
|
| 707 |
+
st.markdown("Thinking (Swarm Mode)... ")
|
| 708 |
+
|
| 709 |
+
# Format the conversation history for context
|
| 710 |
+
conversation_history = format_memory_messages(st.session_state.memory.messages)
|
| 711 |
+
|
| 712 |
+
# 1. Initial Agent generates the draft
|
| 713 |
+
st.markdown("Generating initial draft...")
|
| 714 |
+
chat_result_1 = initial_agent.initiate_chat(
|
| 715 |
+
recipient=initial_agent,
|
| 716 |
+
message=f"Conversation history:\n{conversation_history}\n\nContext from documents: {context}\n\nUser question: {user_input}",
|
| 717 |
+
max_turns=1,
|
| 718 |
+
summary_method="last_msg"
|
| 719 |
+
)
|
| 720 |
+
draft_answer = chat_result_1.summary
|
| 721 |
+
if st.session_state.debug:
|
| 722 |
+
st.session_state.debug_messages.append(("Initial Draft", draft_answer))
|
| 723 |
+
|
| 724 |
+
# 2. Review Agent critiques the draft
|
| 725 |
+
st.markdown("Reviewing draft...")
|
| 726 |
+
chat_result_2 = review_agent.initiate_chat(
|
| 727 |
+
recipient=review_agent,
|
| 728 |
+
message=f"Conversation history:\n{conversation_history}\n\nPlease review this draft answer:\n{draft_answer}",
|
| 729 |
+
max_turns=1,
|
| 730 |
+
summary_method="last_msg"
|
| 731 |
+
)
|
| 732 |
+
review_feedback = chat_result_2.summary
|
| 733 |
+
if st.session_state.debug:
|
| 734 |
+
st.session_state.debug_messages.append(("Review Feedback", review_feedback))
|
| 735 |
+
|
| 736 |
+
# # 3. Typo Agent corrects the draft
|
| 737 |
+
# st.markdown("Checking for typos...")
|
| 738 |
+
# chat_result_3 = typo_agent.initiate_chat(
|
| 739 |
+
# recipient=typo_agent,
|
| 740 |
+
# message=f"Original draft: {draft_answer}\n\nReview feedback: {review_feedback}",
|
| 741 |
+
# max_turns=1,
|
| 742 |
+
# summary_method="last_msg"
|
| 743 |
+
# )
|
| 744 |
+
# typo_corrected_answer = chat_result_3.summary
|
| 745 |
+
# if st.session_state.debug: st.text(f"Typo-Corrected Answer:\n{typo_corrected_answer}")
|
| 746 |
+
|
| 747 |
+
# 4. Formatting Agent formats the final answer
|
| 748 |
+
st.markdown("Formatting final answer...")
|
| 749 |
+
chat_result_4 = formatting_agent.initiate_chat(
|
| 750 |
+
recipient=formatting_agent,
|
| 751 |
+
message=f"""Please format this answer while preserving any code blocks:
|
| 752 |
+
{draft_answer}""",
|
| 753 |
+
max_turns=1,
|
| 754 |
+
summary_method="last_msg"
|
| 755 |
+
)
|
| 756 |
+
formatted_answer = chat_result_4.summary
|
| 757 |
+
if st.session_state.debug:
|
| 758 |
+
st.session_state.debug_messages.append(("Formatted Answer", formatted_answer))
|
| 759 |
+
|
| 760 |
+
# Check if the answer contains code
|
| 761 |
+
if "```python" in formatted_answer:
|
| 762 |
+
# Add a note about code execution
|
| 763 |
+
formatted_answer += "\n\n> 💡 **Note**: This answer contains code. If you want to execute it, type 'execute!' in the chat."
|
| 764 |
+
return Response(content=formatted_answer)
|
| 765 |
+
else:
|
| 766 |
+
return Response(content=formatted_answer)
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
# --- Chat Input ---
|
| 770 |
+
user_input = st.chat_input("Type your prompt here...")
|
| 771 |
+
|
| 772 |
+
# --- Display Full Chat History ---
|
| 773 |
+
for message in st.session_state.messages:
|
| 774 |
+
with st.chat_message(message["role"]):
|
| 775 |
+
# Check if this message contains a plot path marker
|
| 776 |
+
if "PLOT_PATH:" in message["content"]:
|
| 777 |
+
# Split content into text and plot path
|
| 778 |
+
parts = message["content"].split("PLOT_PATH:")
|
| 779 |
+
# Display the text part
|
| 780 |
+
st.markdown(parts[0])
|
| 781 |
+
# Display each plot path
|
| 782 |
+
for plot_info in parts[1:]:
|
| 783 |
+
plot_path = plot_info.split('\n')[0].strip()
|
| 784 |
+
if os.path.exists(plot_path):
|
| 785 |
+
st.image(plot_path, width=700)
|
| 786 |
+
else:
|
| 787 |
+
st.markdown(message["content"])
|
| 788 |
+
|
| 789 |
+
# --- Process New Prompt ---
|
| 790 |
+
if user_input:
|
| 791 |
+
# Show user input immediately
|
| 792 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
| 793 |
+
with st.chat_message("user"):
|
| 794 |
+
st.markdown(user_input)
|
| 795 |
+
|
| 796 |
+
st.session_state.memory.add_user_message(user_input)
|
| 797 |
+
context = retrieve_context(user_input)
|
| 798 |
+
|
| 799 |
+
# Count prompt tokens using tiktoken if needed
|
| 800 |
+
try:
|
| 801 |
+
import tiktoken
|
| 802 |
+
enc = tiktoken.encoding_for_model("gpt-4")
|
| 803 |
+
st.session_state.last_token_count = len(enc.encode(user_input))
|
| 804 |
+
except:
|
| 805 |
+
st.session_state.last_token_count = 0
|
| 806 |
+
|
| 807 |
+
# Stream assistant response
|
| 808 |
+
with st.chat_message("assistant"):
|
| 809 |
+
stream_box = st.empty()
|
| 810 |
+
stream_handler = StreamHandler(stream_box)
|
| 811 |
+
|
| 812 |
+
# Second initialization with streaming
|
| 813 |
+
st.session_state.llm = ChatOpenAI(
|
| 814 |
+
model_name=st.session_state.selected_model,
|
| 815 |
+
streaming=True,
|
| 816 |
+
callbacks=[stream_handler],
|
| 817 |
+
openai_api_key=api_key,
|
| 818 |
+
temperature=0.2
|
| 819 |
+
)
|
| 820 |
+
|
| 821 |
+
# Check if this is an execution request
|
| 822 |
+
if user_input.strip().lower() == "execute!":
|
| 823 |
+
# Find the last assistant message containing code
|
| 824 |
+
last_assistant_message = None
|
| 825 |
+
for message in reversed(st.session_state.messages):
|
| 826 |
+
if message["role"] == "assistant" and "```" in message["content"]:
|
| 827 |
+
last_assistant_message = message["content"]
|
| 828 |
+
break
|
| 829 |
+
|
| 830 |
+
if last_assistant_message:
|
| 831 |
+
st.markdown("Executing code...")
|
| 832 |
+
st.info("🚀 Executing cleaned code...")
|
| 833 |
+
#chat_result = code_executor.initiate_chat(
|
| 834 |
+
# recipient=code_executor,
|
| 835 |
+
# message=f"Please execute this code:\n{last_assistant_message}",
|
| 836 |
+
# max_turns=1,
|
| 837 |
+
# summary_method="last_msg"
|
| 838 |
+
#)
|
| 839 |
+
#execution_output = chat_result.summary
|
| 840 |
+
execution_output, plot_path = executor.execute_code(last_assistant_message)
|
| 841 |
+
st.subheader("Execution Output")
|
| 842 |
+
st.text(execution_output) # now contains both STDOUT and STDERR
|
| 843 |
+
|
| 844 |
+
if os.path.exists(plot_path):
|
| 845 |
+
st.success("✅ Plot generated successfully!")
|
| 846 |
+
# Display the plot
|
| 847 |
+
#st.image(plot_path, use_container_width=True)
|
| 848 |
+
st.image(plot_path, width=700)
|
| 849 |
+
else:
|
| 850 |
+
st.warning("⚠️ No plot was generated")
|
| 851 |
+
|
| 852 |
+
# Check for errors and iterate if needed
|
| 853 |
+
max_iterations = 3 # Maximum number of iterations to prevent infinite loops
|
| 854 |
+
current_iteration = 0
|
| 855 |
+
has_errors = any(error_indicator in execution_output for error_indicator in ["Traceback", "Error:", "Exception:", "TypeError:", "ValueError:", "NameError:", "SyntaxError:", "Error in Class"])
|
| 856 |
+
|
| 857 |
+
while has_errors and current_iteration < max_iterations:
|
| 858 |
+
current_iteration += 1
|
| 859 |
+
st.error(f"Previous error: {execution_output}") # Show the actual error message
|
| 860 |
+
st.info(f"🔧 Fixing errors (attempt {current_iteration}/{max_iterations})...")
|
| 861 |
+
|
| 862 |
+
# Get new review with error information
|
| 863 |
+
review_message = f"""
|
| 864 |
+
Previous answer had errors during execution:
|
| 865 |
+
{execution_output}
|
| 866 |
+
|
| 867 |
+
Please review and suggest fixes for this answer. IMPORTANT: Preserve all code blocks exactly as they are, only fix actual errors:
|
| 868 |
+
{last_assistant_message}
|
| 869 |
+
"""
|
| 870 |
+
chat_result_2 = review_agent.initiate_chat(
|
| 871 |
+
recipient=review_agent,
|
| 872 |
+
message=review_message,
|
| 873 |
+
max_turns=1,
|
| 874 |
+
summary_method="last_msg"
|
| 875 |
+
)
|
| 876 |
+
review_feedback = chat_result_2.summary
|
| 877 |
+
if st.session_state.debug:
|
| 878 |
+
st.session_state.debug_messages.append(("Error Review Feedback", review_feedback))
|
| 879 |
+
|
| 880 |
+
# Get corrected version
|
| 881 |
+
chat_result_3 = initial_agent.initiate_chat(
|
| 882 |
+
recipient=initial_agent,
|
| 883 |
+
message=f"""Original answer: {last_assistant_message}
|
| 884 |
+
Review feedback with error fixes: {review_feedback}
|
| 885 |
+
IMPORTANT: Only fix actual errors in the code blocks. Preserve all working code exactly as it is.""",
|
| 886 |
+
max_turns=1,
|
| 887 |
+
summary_method="last_msg"
|
| 888 |
+
)
|
| 889 |
+
corrected_answer = chat_result_3.summary
|
| 890 |
+
if st.session_state.debug:
|
| 891 |
+
st.session_state.debug_messages.append(("Corrected Answer", corrected_answer))
|
| 892 |
+
|
| 893 |
+
# Format the corrected answer
|
| 894 |
+
chat_result_4 = formatting_agent.initiate_chat(
|
| 895 |
+
recipient=formatting_agent,
|
| 896 |
+
message=f"""Please format this corrected answer while preserving all code blocks:
|
| 897 |
+
{corrected_answer}
|
| 898 |
+
""",
|
| 899 |
+
max_turns=1,
|
| 900 |
+
summary_method="last_msg"
|
| 901 |
+
)
|
| 902 |
+
formatted_answer = chat_result_4.summary
|
| 903 |
+
if st.session_state.debug:
|
| 904 |
+
st.session_state.debug_messages.append(("Formatted Corrected Answer", formatted_answer))
|
| 905 |
+
|
| 906 |
+
# Execute the corrected code
|
| 907 |
+
st.info("🚀 Executing corrected code...")
|
| 908 |
+
#chat_result = code_executor.initiate_chat(
|
| 909 |
+
# recipient=code_executor,
|
| 910 |
+
# message=f"Please execute this corrected code:\n{formatted_answer}",
|
| 911 |
+
# max_turns=1,
|
| 912 |
+
# summary_method="last_msg"
|
| 913 |
+
#)
|
| 914 |
+
#execution_output = chat_result.summary
|
| 915 |
+
execution_output, plot_path = executor.execute_code(formatted_answer)
|
| 916 |
+
st.subheader("Execution Output")
|
| 917 |
+
st.text(execution_output) # now contains both STDOUT and STDERR
|
| 918 |
+
|
| 919 |
+
if os.path.exists(plot_path):
|
| 920 |
+
st.success("✅ Plot generated successfully!")
|
| 921 |
+
# Display the plot
|
| 922 |
+
st.image(plot_path, width=700)
|
| 923 |
+
else:
|
| 924 |
+
st.warning("⚠️ No plot was generated")
|
| 925 |
+
|
| 926 |
+
if st.session_state.debug:
|
| 927 |
+
st.session_state.debug_messages.append(("Execution Output", execution_output))
|
| 928 |
+
|
| 929 |
+
# If we've reached the end of iterations and we're successful
|
| 930 |
+
if not has_errors or current_iteration == max_iterations:
|
| 931 |
+
# Add successful execution to the conversation with plot
|
| 932 |
+
final_answer = formatted_answer if formatted_answer else last_assistant_message
|
| 933 |
+
response_text = f"Execution completed successfully:\n{execution_output}\n\nThe following code was executed:\n```python\n{final_answer}\n```"
|
| 934 |
+
|
| 935 |
+
# Add plot path marker for rendering in the conversation
|
| 936 |
+
if os.path.exists(plot_path):
|
| 937 |
+
response_text += f"\n\nPLOT_PATH:{plot_path}\n"
|
| 938 |
+
|
| 939 |
+
if current_iteration > 0:
|
| 940 |
+
response_text = f"After {current_iteration} correction attempts: " + response_text
|
| 941 |
+
|
| 942 |
+
# Set the response variable with our constructed text that includes plot
|
| 943 |
+
response = Response(content=response_text)
|
| 944 |
+
|
| 945 |
+
# Update last_assistant_message with the formatted answer for next iteration
|
| 946 |
+
last_assistant_message = formatted_answer
|
| 947 |
+
has_errors = any(error_indicator in execution_output for error_indicator in ["Traceback", "Error:", "Exception:", "TypeError:", "ValueError:", "NameError:", "SyntaxError:", "Error in Class"])
|
| 948 |
+
|
| 949 |
+
if has_errors:
|
| 950 |
+
st.markdown("> ⚠️ **Note**: Some errors could not be fixed after multiple attempts. You can request changes by describing them in the chat.")
|
| 951 |
+
st.markdown(f"> ❌ Last execution message:\n{execution_output}")
|
| 952 |
+
response = Response(content=f"Execution completed with errors:\n{execution_output}")
|
| 953 |
+
else:
|
| 954 |
+
# Check for common error indicators in the output
|
| 955 |
+
if any(error_indicator in execution_output for error_indicator in ["Traceback", "Error:", "Exception:", "TypeError:", "ValueError:", "NameError:", "SyntaxError:"]):
|
| 956 |
+
st.markdown("> ⚠️ **Note**: Code execution completed but with errors. You can request changes by describing them in the chat.")
|
| 957 |
+
st.markdown(f"> ❌ Execution message:\n{execution_output}")
|
| 958 |
+
response = Response(content=f"Execution completed with errors:\n{execution_output}")
|
| 959 |
+
else:
|
| 960 |
+
st.markdown(f"> ✅ Code executed successfully. Last execution message:\n{execution_output}")
|
| 961 |
+
|
| 962 |
+
# Display the final code that was successfully executed
|
| 963 |
+
with st.expander("View Successfully Executed Code", expanded=False):
|
| 964 |
+
st.markdown(last_assistant_message)
|
| 965 |
+
|
| 966 |
+
# Create a response message that includes the plot path
|
| 967 |
+
response_text = f"Execution completed successfully:\n{execution_output}\n\nThe following code was executed:\n```python\n{last_assistant_message}\n```"
|
| 968 |
+
|
| 969 |
+
# Add plot path marker for rendering in the conversation
|
| 970 |
+
if os.path.exists(plot_path):
|
| 971 |
+
response_text += f"\n\nPLOT_PATH:{plot_path}\n"
|
| 972 |
+
|
| 973 |
+
response = Response(content=response_text)
|
| 974 |
+
else:
|
| 975 |
+
response = Response(content="No code found to execute in the previous messages.")
|
| 976 |
+
else:
|
| 977 |
+
response = call_ai(context, user_input)
|
| 978 |
+
if not mode_is_fast:
|
| 979 |
+
st.markdown(response.content)
|
| 980 |
+
|
| 981 |
+
st.session_state.memory.add_ai_message(response.content)
|
| 982 |
+
st.session_state.messages.append({"role": "assistant", "content": response.content})
|
| 983 |
+
|
| 984 |
+
# --- Display Welcome Message (outside of sidebar) ---
|
| 985 |
+
# This ensures the welcome message appears in the main content area
|
| 986 |
+
if "llm_initialized" in st.session_state and st.session_state.llm_initialized and not st.session_state.greeted:
|
| 987 |
+
# Create a chat message container for the welcome message
|
| 988 |
+
with st.chat_message("assistant"):
|
| 989 |
+
# Create empty container for streaming
|
| 990 |
+
welcome_container = st.empty()
|
| 991 |
+
|
| 992 |
+
# Set up the streaming handler
|
| 993 |
+
welcome_stream_handler = StreamHandler(welcome_container)
|
| 994 |
+
|
| 995 |
+
# Initialize streaming LLM
|
| 996 |
+
streaming_llm = ChatOpenAI(
|
| 997 |
+
model_name=st.session_state.selected_model,
|
| 998 |
+
streaming=True,
|
| 999 |
+
callbacks=[welcome_stream_handler],
|
| 1000 |
+
openai_api_key=api_key,
|
| 1001 |
+
temperature=0.2
|
| 1002 |
+
)
|
| 1003 |
+
|
| 1004 |
+
# Generate the streaming welcome message
|
| 1005 |
+
greeting = streaming_llm.invoke([
|
| 1006 |
+
SystemMessage(content=Initial_Agent_Instructions),
|
| 1007 |
+
HumanMessage(content="Please greet the user and briefly explain what you can do as the CLASS code assistant.")
|
| 1008 |
+
])
|
| 1009 |
+
|
| 1010 |
+
# Save the completed message to history
|
| 1011 |
+
st.session_state.messages.append({"role": "assistant", "content": greeting.content})
|
| 1012 |
+
st.session_state.memory.add_ai_message(greeting.content)
|
| 1013 |
+
st.session_state.greeted = True
|
| 1014 |
+
|
| 1015 |
+
# --- Debug Info ---
|
| 1016 |
+
if st.session_state.debug:
|
| 1017 |
+
with st.sidebar.expander("🛠️ Debug Information", expanded=True):
|
| 1018 |
+
# Create a container for debug messages
|
| 1019 |
+
debug_container = st.container()
|
| 1020 |
+
with debug_container:
|
| 1021 |
+
st.markdown("### Debug Messages")
|
| 1022 |
+
|
| 1023 |
+
# Display all debug messages in a scrollable container
|
| 1024 |
+
for title, message in st.session_state.debug_messages:
|
| 1025 |
+
st.markdown(f"### {title}")
|
| 1026 |
+
st.markdown(message)
|
| 1027 |
+
st.markdown("---")
|
| 1028 |
+
|
| 1029 |
+
with st.sidebar.expander("🛠️ Context Used"):
|
| 1030 |
+
if "context" in locals():
|
| 1031 |
+
st.markdown(context)
|
| 1032 |
+
else:
|
| 1033 |
+
st.markdown("No context retrieved yet.")
|
README.md
CHANGED
|
@@ -5,7 +5,7 @@ colorFrom: blue
|
|
| 5 |
colorTo: gray
|
| 6 |
sdk: streamlit
|
| 7 |
sdk_version: 1.44.1
|
| 8 |
-
app_file:
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: 'CLAPP: Classy Llm Agent for Pair Programming'
|
|
|
|
| 5 |
colorTo: gray
|
| 6 |
sdk: streamlit
|
| 7 |
sdk_version: 1.44.1
|
| 8 |
+
app_file: CLAPP.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: 'CLAPP: Classy Llm Agent for Pair Programming'
|
images/CLAPP.png
ADDED
|
Git LFS Details
|
requirements.txt
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ag2==0.8.5
|
| 2 |
+
aiohappyeyeballs==2.6.1
|
| 3 |
+
aiohttp==3.11.14
|
| 4 |
+
aiosignal==1.3.2
|
| 5 |
+
altair==4.2.2
|
| 6 |
+
annotated-types==0.7.0
|
| 7 |
+
anyio==4.9.0
|
| 8 |
+
argon2-cffi==23.1.0
|
| 9 |
+
argon2-cffi-bindings==21.2.0
|
| 10 |
+
arrow==1.3.0
|
| 11 |
+
asttokens==3.0.0
|
| 12 |
+
async-lru==2.0.5
|
| 13 |
+
async-timeout==4.0.3
|
| 14 |
+
asyncer==0.0.8
|
| 15 |
+
attrs==25.3.0
|
| 16 |
+
babel==2.17.0
|
| 17 |
+
beautifulsoup4==4.13.3
|
| 18 |
+
bleach==6.2.0
|
| 19 |
+
blinker==1.9.0
|
| 20 |
+
cachetools==5.5.2
|
| 21 |
+
certifi==2025.1.31
|
| 22 |
+
cffi==1.17.1
|
| 23 |
+
charset-normalizer==3.4.1
|
| 24 |
+
click==8.1.8
|
| 25 |
+
comm==0.2.2
|
| 26 |
+
contourpy==1.3.1
|
| 27 |
+
cryptography==44.0.2
|
| 28 |
+
cycler==0.12.1
|
| 29 |
+
cython==3.0.12
|
| 30 |
+
dataclasses-json==0.6.7
|
| 31 |
+
debugpy==1.8.13
|
| 32 |
+
decorator==5.2.1
|
| 33 |
+
defusedxml==0.7.1
|
| 34 |
+
diskcache==5.6.3
|
| 35 |
+
distro==1.9.0
|
| 36 |
+
docker==7.1.0
|
| 37 |
+
entrypoints==0.4
|
| 38 |
+
exceptiongroup==1.2.2
|
| 39 |
+
executing==2.2.0
|
| 40 |
+
faiss-cpu==1.10.0
|
| 41 |
+
fastjsonschema==2.21.1
|
| 42 |
+
feedparser==6.0.11
|
| 43 |
+
fonttools==4.56.0
|
| 44 |
+
fqdn==1.5.1
|
| 45 |
+
frozenlist==1.5.0
|
| 46 |
+
gitdb==4.0.12
|
| 47 |
+
gitpython==3.1.44
|
| 48 |
+
greenlet==3.1.1
|
| 49 |
+
h11==0.14.0
|
| 50 |
+
httpcore==1.0.7
|
| 51 |
+
httpx==0.28.1
|
| 52 |
+
httpx-sse==0.4.0
|
| 53 |
+
idna==3.10
|
| 54 |
+
ipykernel==6.29.5
|
| 55 |
+
ipython==8.35.0
|
| 56 |
+
ipywidgets==8.1.5
|
| 57 |
+
isoduration==20.11.0
|
| 58 |
+
jedi==0.19.2
|
| 59 |
+
jinja2==3.1.6
|
| 60 |
+
jiter==0.9.0
|
| 61 |
+
json5==0.10.0
|
| 62 |
+
jsonpatch==1.33
|
| 63 |
+
jsonpointer==3.0.0
|
| 64 |
+
jsonschema==4.23.0
|
| 65 |
+
jsonschema-specifications==2024.10.1
|
| 66 |
+
jupyter==1.1.1
|
| 67 |
+
jupyter-client==8.6.3
|
| 68 |
+
jupyter-console==6.6.3
|
| 69 |
+
jupyter-core==5.7.2
|
| 70 |
+
jupyter-events==0.12.0
|
| 71 |
+
jupyter-lsp==2.2.5
|
| 72 |
+
jupyter-server==2.15.0
|
| 73 |
+
jupyter-server-terminals==0.5.3
|
| 74 |
+
jupyterlab==4.3.6
|
| 75 |
+
jupyterlab-pygments==0.3.0
|
| 76 |
+
jupyterlab-server==2.27.3
|
| 77 |
+
jupyterlab-widgets==3.0.13
|
| 78 |
+
kiwisolver==1.4.8
|
| 79 |
+
langchain==0.3.23
|
| 80 |
+
langchain-community==0.3.21
|
| 81 |
+
langchain-core==0.3.52
|
| 82 |
+
langchain-openai==0.3.13
|
| 83 |
+
langchain-text-splitters==0.3.8
|
| 84 |
+
langsmith==0.3.19
|
| 85 |
+
markupsafe==3.0.2
|
| 86 |
+
marshmallow==3.26.1
|
| 87 |
+
matplotlib==3.10.1
|
| 88 |
+
matplotlib-inline==0.1.7
|
| 89 |
+
mistune==3.1.3
|
| 90 |
+
multidict==6.2.0
|
| 91 |
+
mypy-extensions==1.0.0
|
| 92 |
+
nbclient==0.10.2
|
| 93 |
+
nbconvert==7.16.6
|
| 94 |
+
nbformat==5.10.4
|
| 95 |
+
nest-asyncio==1.6.0
|
| 96 |
+
notebook==7.3.3
|
| 97 |
+
notebook-shim==0.2.4
|
| 98 |
+
numpy==1.26.4
|
| 99 |
+
openai==1.68.2
|
| 100 |
+
orjson==3.10.16
|
| 101 |
+
overrides==7.7.0
|
| 102 |
+
packaging==24.2
|
| 103 |
+
pandas==2.2.3
|
| 104 |
+
pandocfilters==1.5.1
|
| 105 |
+
parso==0.8.4
|
| 106 |
+
pexpect==4.9.0
|
| 107 |
+
pillow==11.1.0
|
| 108 |
+
platformdirs==4.3.7
|
| 109 |
+
prometheus-client==0.21.1
|
| 110 |
+
prompt-toolkit==3.0.50
|
| 111 |
+
propcache==0.3.1
|
| 112 |
+
protobuf==5.29.4
|
| 113 |
+
psutil==7.0.0
|
| 114 |
+
ptyprocess==0.7.0
|
| 115 |
+
pure-eval==0.2.3
|
| 116 |
+
pyarrow==19.0.1
|
| 117 |
+
pyautogen==0.8.5
|
| 118 |
+
pycparser==2.22
|
| 119 |
+
pydantic==2.10.6
|
| 120 |
+
pydantic-core==2.27.2
|
| 121 |
+
pydantic-settings==2.8.1
|
| 122 |
+
pydeck==0.9.1
|
| 123 |
+
pygments==2.19.1
|
| 124 |
+
pyparsing==3.2.3
|
| 125 |
+
pypdf==5.4.0
|
| 126 |
+
python-dateutil==2.9.0.post0
|
| 127 |
+
python-dotenv==1.1.0
|
| 128 |
+
python-json-logger==3.3.0
|
| 129 |
+
pytz==2025.2
|
| 130 |
+
pyyaml==6.0.2
|
| 131 |
+
pyzmq==26.4.0
|
| 132 |
+
referencing==0.36.2
|
| 133 |
+
regex==2024.11.6
|
| 134 |
+
requests==2.32.3
|
| 135 |
+
requests-toolbelt==1.0.0
|
| 136 |
+
rfc3339-validator==0.1.4
|
| 137 |
+
rfc3986-validator==0.1.1
|
| 138 |
+
rpds-py==0.23.1
|
| 139 |
+
scipy==1.15.2
|
| 140 |
+
send2trash==1.8.3
|
| 141 |
+
setuptools==78.1.0
|
| 142 |
+
sgmllib3k==1.0.0
|
| 143 |
+
six==1.17.0
|
| 144 |
+
smmap==5.0.2
|
| 145 |
+
sniffio==1.3.1
|
| 146 |
+
soupsieve==2.6
|
| 147 |
+
sqlalchemy==2.0.39
|
| 148 |
+
stack-data==0.6.3
|
| 149 |
+
streamlit==1.43.2
|
| 150 |
+
tenacity==9.0.0
|
| 151 |
+
termcolor==3.0.1
|
| 152 |
+
terminado==0.18.1
|
| 153 |
+
tiktoken==0.9.0
|
| 154 |
+
tinycss2==1.4.0
|
| 155 |
+
toml==0.10.2
|
| 156 |
+
tomli==2.2.1
|
| 157 |
+
toolz==1.0.0
|
| 158 |
+
tornado==6.4.2
|
| 159 |
+
tqdm==4.67.1
|
| 160 |
+
traitlets==5.14.3
|
| 161 |
+
types-python-dateutil==2.9.0.20241206
|
| 162 |
+
typing-extensions==4.13.1
|
| 163 |
+
typing-inspect==0.9.0
|
| 164 |
+
tzdata==2025.2
|
| 165 |
+
uri-template==1.3.0
|
| 166 |
+
urllib3==2.3.0
|
| 167 |
+
uv==0.6.9
|
| 168 |
+
watchdog==6.0.0
|
| 169 |
+
wcwidth==0.2.13
|
| 170 |
+
webcolors==24.11.1
|
| 171 |
+
webencodings==0.5.1
|
| 172 |
+
websocket-client==1.8.0
|
| 173 |
+
widgetsnbextension==4.0.13
|
| 174 |
+
yarl==1.18.3
|
| 175 |
+
zstandard==0.23.0
|