diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,1871 +1,1281 @@
import streamlit as st
import streamlit.components.v1 as components
-import os
-from PIL import Image
-# Set the page layout
-st.set_page_config(layout="wide")
-import json
-import base64
-import time
-from dotenv import load_dotenv
-import os
-import requests
-
-# Try loading environment variables locally
-try:
- from dotenv import load_dotenv
- load_dotenv()
-except:
- pass
-
-# Get the token from environment variables
-HF_TOKEN = os.environ.get("HF_TOKEN")
-
-
-if "framework" not in st.session_state:
- st.session_state.framework = "gen"
-# Initialize state
-if "menu" not in st.session_state:
- st.session_state.menu = "class"
-
-if "show_overlay" not in st.session_state:
- st.session_state.show_overlay = True
-if "models" not in st.session_state:
- st.session_state.models = []
-if "save_path" not in st.session_state:
- st.session_state.save_path = ""
-# Initialize message storage
-if "messages" not in st.session_state:
- st.session_state.messages = []
-if "input_text" not in st.session_state:
- st.session_state.input_text = ""
-if "input_task" not in st.session_state:
- st.session_state.input_task = ""
-if "generate_response" not in st.session_state:
- st.session_state.generate_response = False
-
-
-if st.session_state.show_overlay == False:
- left = -9
- top = -10
-else:
- top= -6.75
- left =-5
-# Folder to store chat histories
-CHAT_DIR = "chat_histories"
-os.makedirs(CHAT_DIR, exist_ok=True)
-# Set default chat_id if not set
-if "chat_id" not in st.session_state:
- st.session_state.chat_id = "chat_1"
-# Save messages to a file
-def save_chat_history():
- if st.session_state.messages: # Only save if there's at least one message
- with open(f"{CHAT_DIR}/{st.session_state.chat_id}.json", "w", encoding="utf-8") as f:
- json.dump(st.session_state.messages, f, ensure_ascii=False, indent=4)
-#####################################################################################################
-
-# Function to load data
-
-def query_huggingface_model(selected_model: dict, input_data, input_type="text",max_tokens=512,task="text-classification",temperature=0.7, top_p=0.9 ):
- API_URL = selected_model.get("url")
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
-
- try:
- if input_type == "text":
- if task == "text-generation":
- payload = {
- "messages": [
- {
- "role": "user",
- "content": input_data
- }
- ],
- "max_tokens": max_tokens,
- "temperature": temperature,
- "top_p": top_p,
- "model":selected_model.get("model")
- }
-
- else:
- payload = {
- "inputs": input_data ,
-
- }
- response = requests.post(API_URL, headers=headers, json=payload)
- elif input_type == "image":
- with open(input_data, "rb") as f:
- data = f.read()
- response = requests.post(API_URL, headers=headers, data=data)
-
- else:
- return {"error": f"Unsupported input_type: {input_type}"}
-
- response.raise_for_status()
- return response.json()
+st.set_page_config(layout="wide", page_title="Streamlit LLM Playground")
+st.markdown("""
+
+ """, unsafe_allow_html=True)
+html_code="""
+
+
+
+
+
+ LLM Studio Enhanced
+
+
+
+
+
+
+
+
+
+
+
+
+
auto_awesome
+
Welcome to LLM Studio
+
Select a mode from the left, or click "New Chat" to begin a new conversation. Your chat history will be saved here for this session.
+
+
+
+
+
+
+
- }}
+
+
- /* Hover effect */
- div.stButton > button:hover {{
- background: rgba(255, 255, 255, 0.2);
- box-shadow: 0px 6px 12px rgba(0, 0, 0, 0.4); /* Enhanced shadow on hover */
- transform: scale(1.05); /* Slightly enlarge button */
- transform: scale(1.1); /* Slight zoom on hover */
- box-shadow: 0px 4px 12px rgba(255, 255, 255, 0.4); /* Glow effect */
- }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+# --- Option 1: Using st.components.v1.html (Recommended for more control) ---
+# This allows more control over height and scrolling
+components.html(
+ html_code,
+
+)
+
+# --- Option 2: Using st.markdown with an iframe (Simpler, but less control) ---
+# st.markdown(
+# f"""
+#
+# """,
+# unsafe_allow_html=True
+# )
+