diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8510497a28dca6e8ef028821959d40ae0492ef49 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Credentials and Secrets +.env + +# Python Compiled Files +__pycache__/ +*.py[cod] + +# Virtual Environments +venv/ +.venv/ + +# System Files +.DS_Store +Thumbs.db + +# Logs +*.log + +# Editor Directories +.vscode/ +.idea/ + +*.jpg diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7152071c7aba5653ea231d6ef951f4ff8d5dec7c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +# Use official Python 3.10 image (Fixes 3.9 deprecation warnings) +FROM python:3.10-slim + +WORKDIR /app + +# Install git (required for some dependencies) +RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* + +# Copy requirements and install +COPY requirements.txt . +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt + +# Copy application files +COPY . . + +# Fix permissions for Hugging Face Spaces +RUN mkdir -p /tmp/home +ENV HOME=/tmp/home + +EXPOSE 7860 +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..eb27c6f9ef5fabaf13787297a5fa337aaab234fe --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Bhishaj + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c542bb71a1967435140f5f0881fadf5256d6003b --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +--- +title: StyleSync AI +emoji: šŸ›ļø +colorFrom: green +colorTo: indigo +sdk: docker +pinned: false +--- + +# StyleSync AI + +**StyleSync AI** is an autonomous design & merchandising agent built for the Antigravity IDE. + +## ⚔ Stack +* **Core:** FastAPI & Python 3.9 +* **Vision:** Gemini 1.5 Flash +* **Copy:** Llama 3 (Groq) +* **Memory:** Pinecone + +## šŸš€ Local Run +```bash +pip install -r requirements.txt +python main.py diff --git a/add_license.py b/add_license.py new file mode 100644 index 0000000000000000000000000000000000000000..97c3441097983a99d20aa6fadc2434438ccfa68f --- /dev/null +++ b/add_license.py @@ -0,0 +1,64 @@ +import os +import subprocess + +def run_command(command): + try: + # shell=True is often required on Windows for some commands/environments + print(f"Running: {command}") + result = subprocess.run(command, check=True, shell=True, capture_output=True, text=True) + print(result.stdout) + except subprocess.CalledProcessError as e: + print(f"Error running: {command}") + print(e.stderr) + # We don't exit here to allow attempting subsequent commands or user debugging if one fails, + # though for git flow it usually makes sense to stop. + # Given the instruction is a sequence, we should probably stop if add/commit fails. + exit(1) + +def main(): + license_text = """MIT License + +Copyright (c) 2025 Bhishaj + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + + file_path = "LICENSE" + + print(f"Creating {file_path}...") + with open(file_path, "w", encoding="utf-8") as f: + f.write(license_text) + print(f"{file_path} created successfully.") + + print("Running git commands...") + + # 1. git add LICENSE + run_command("git add LICENSE") + + # 2. git commit -m 'Add MIT License' + run_command("git commit -m \"Add MIT License\"") + + # 3. git push space clean_deploy:main + print("Pushing to Hugging Face Space (this might take a few seconds)...") + run_command("git push space clean_deploy:main") + + print("Done! License added and pushed to Hugging Face Space.") + +if __name__ == "__main__": + main() diff --git a/agents/__init__.py b/agents/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/manager.py b/agents/manager.py new file mode 100644 index 0000000000000000000000000000000000000000..a246213c4fc762489896742ea0a63a09225c006c --- /dev/null +++ b/agents/manager.py @@ -0,0 +1,46 @@ +import csv +import time +import os +import datetime +from agents.trend_spotter import TrendSpotter +from agents.visionary import Visionary + +class MerchManager: + def __init__(self): + self.trend_spotter = TrendSpotter() + self.visionary = Visionary() + self.results_dir = "results" + if not os.path.exists(self.results_dir): + os.makedirs(self.results_dir) + + def generate_batch(self, niche: str) -> str: + # Step 1: Get slogans + print(f"šŸ” Analyzing trends for niche: {niche}...") + slogans = self.trend_spotter.get_trends(niche) + + results = [] + + # Step 2: Generate art prompts + print(f"šŸŽØ Generating designs for {len(slogans)} slogans...") + for i, slogan in enumerate(slogans): + print(f"Generating design {i+1}/{len(slogans)}...") + prompt = self.visionary.generate_art_prompt(slogan, niche) + results.append({ + "Niche": niche, + "Slogan": slogan, + "Art Prompt": prompt + }) + time.sleep(10) + + # Step 3 & 4: Save to CSV + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"merch_batch_{niche}_{timestamp}.csv" + filepath = os.path.join(self.results_dir, filename) + + with open(filepath, mode='w', newline='', encoding='utf-8') as file: + writer = csv.DictWriter(file, fieldnames=["Niche", "Slogan", "Art Prompt"]) + writer.writeheader() + writer.writerows(results) + + print(f"āœ… Batch complete! Saved to {filepath}") + return filename diff --git a/agents/memory_agent.py b/agents/memory_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..edf7ef8ccaf86f605209c0dc0afde256898983cb --- /dev/null +++ b/agents/memory_agent.py @@ -0,0 +1,83 @@ +import os +import time +from dotenv import load_dotenv +from pinecone import Pinecone, ServerlessSpec +import google.generativeai as genai + +load_dotenv() + +class MemoryAgent: + def __init__(self): + # 1. Configure Gemini (for Embeddings) + self.gemini_api_key = os.getenv("GEMINI_API_KEY") + if not self.gemini_api_key: + print("āš ļø GEMINI_API_KEY missing. Memory Agent will fail.") + return + genai.configure(api_key=self.gemini_api_key) + + # 2. Configure Pinecone (Vector DB) + self.pinecone_api_key = os.getenv("PINECONE_API_KEY") + if not self.pinecone_api_key: + print("āš ļø PINECONE_API_KEY missing. Memory Agent will fail.") + return + + self.pc = Pinecone(api_key=self.pinecone_api_key) + self.index_name = "stylesync-index-v2" # Rebranded Index Name + + # 3. Create Index if not exists + existing_indexes = [i.name for i in self.pc.list_indexes()] + if self.index_name not in existing_indexes: + print(f"🧠 Creating new memory index: {self.index_name}...") + try: + self.pc.create_index( + name=self.index_name, + dimension=3072, # Dimension for 'models/gemini-embedding-001' + metric='cosine', + spec=ServerlessSpec(cloud='aws', region='us-east-1') + ) + while not self.pc.describe_index(self.index_name).status['ready']: + time.sleep(1) + print("āœ… Index created successfully.") + except Exception as e: + print(f"āŒ Failed to create index: {e}") + + self.index = self.pc.Index(self.index_name) + + def _get_embedding(self, text): + """Generates vector embeddings using Gemini""" + try: + result = genai.embed_content( + model="models/gemini-embedding-001", + content=text, + task_type="retrieval_document" + ) + return result['embedding'] + except Exception as e: + print(f"āŒ Embedding Error: {e}") + return [0.0] * 3072 # Return empty vector on failure + + def retrieve_keywords(self, query_text: str, top_k=5): + """Searches memory for relevant keywords""" + if not hasattr(self, 'index'): return [] + + print(f"🧠 Searching memory for: '{query_text}'...") + embedding = self._get_embedding(query_text) + + try: + results = self.index.query( + vector=embedding, + top_k=top_k, + include_metadata=True + ) + + # Extract unique keywords + keywords = [] + for match in results.matches: + if match.score > 0.5: # Relevance threshold + kw_str = match.metadata.get('keywords', '') + keywords.extend([k.strip() for k in kw_str.split(',')]) + + return list(set(keywords))[:10] # Return top 10 unique + except Exception as e: + print(f"āŒ Search Error: {e}") + return [] diff --git a/agents/visual_analyst.py b/agents/visual_analyst.py new file mode 100644 index 0000000000000000000000000000000000000000..431bacbd1633879554d72560173a9c7b4d25b530 --- /dev/null +++ b/agents/visual_analyst.py @@ -0,0 +1,59 @@ +import os +import json +import google.generativeai as genai +from dotenv import load_dotenv + +load_dotenv() + +class VisualAnalyst: + def __init__(self): + self.api_key = os.getenv("GEMINI_API_KEY") + if not self.api_key: + raise ValueError("GEMINI_API_KEY not found") + + genai.configure(api_key=self.api_key) + self.model_name = "models/gemini-flash-latest" + self.model = genai.GenerativeModel(self.model_name) + print(f"āœ… VisualAnalyst stored Gemini model: {self.model_name}") + + async def analyze_image(self, image_path: str): + try: + # Upload the file to Gemini + # Note: For efficiency in production, files should be managed (uploads/deletes) + # but for this agentic flow, we'll upload per request or assume local path usage helper if needed. + # However, the standard `model.generate_content` can take PIL images or file objects directly for some sdk versions, + # but using the File API is cleaner for 1.5 Flash multi-modal. + # Let's use the simpler PIL integration if available, or just path if the SDK supports it. + # actually, standard genai usage for images usually involves PIL or uploading. + # Let's try the PIL approach first as it's often more direct for local scripts. + import PIL.Image + img = PIL.Image.open(image_path) + + user_prompt = ( + "Analyze this product image. " + "Return ONLY valid JSON with keys: main_color, product_type, design_style, visual_features." + ) + + # Gemini 1.5 Flash supports JSON response schema, but simple prompting often works well too. + # We'll stick to prompt engineering for now to match the "Return ONLY valid JSON" instruction. + response = self.model.generate_content([user_prompt, img]) + + response_text = response.text + + # Clean up potential markdown code fences + cleaned_content = response_text + if "```json" in cleaned_content: + cleaned_content = cleaned_content.replace("```json", "").replace("```", "") + elif "```" in cleaned_content: + cleaned_content = cleaned_content.replace("```", "") + + return json.loads(cleaned_content.strip()) + + except Exception as e: + print(f"āŒ Analysis Failed: {e}") + return { + "main_color": "Unknown", + "product_type": "Unknown", + "design_style": "Unknown", + "visual_features": [f"Error: {str(e)}"] + } diff --git a/agents/writer_agent.py b/agents/writer_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7aa413f9d30b9626c329180cabe05ecf888e86 --- /dev/null +++ b/agents/writer_agent.py @@ -0,0 +1,81 @@ +import os +import json +import re +from groq import Groq +from dotenv import load_dotenv + +load_dotenv() + +class WriterAgent: + def __init__(self): + self.api_key = os.getenv("GROQ_API_KEY") + if not self.api_key: + print("āš ļø GROQ_API_KEY missing.") + self.client = None + else: + self.client = Groq(api_key=self.api_key) + self.model = "llama-3.3-70b-versatile" + + def write_listing(self, visual_data: dict, seo_keywords: list) -> dict: + if not self.client: + return {"error": "No API Key"} + + # Sanitization: Ensure data is clean string + style = str(visual_data.get('design_style', 'modern')) + + system_prompt = """You are an expert e-commerce copywriter. + Your task is to write a high-converting product listing. + + CRITICAL RULES: + 1. Return ONLY valid JSON. + 2. Do not include markdown formatting (like ```json). + 3. Escape any double quotes inside the description string. + 4. Keep the 'description' to 1-2 paragraphs max. + + JSON Structure: + { + "title": "SEO Optimized Title", + "description": "Engaging product description...", + "features": ["Feature 1", "Feature 2", "Feature 3"], + "price_estimate": "$XX-$XX" + } + """ + + user_content = f""" + PRODUCT DATA: + {json.dumps(visual_data, indent=2)} + + KEYWORDS: + {', '.join(seo_keywords)} + """ + + try: + completion = self.client.chat.completions.create( + model=self.model, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_content} + ], + # LOWER TEMPERATURE TO 0.3 TO PREVENT SYNTAX ERRORS + temperature=0.3, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content + + # Extra Safety: Attempt to repair common JSON strings if needed + try: + return json.loads(content) + except json.JSONDecodeError: + # If strict parsing fails, try to strip markdown + content = content.replace("```json", "").replace("```", "").strip() + return json.loads(content) + + except Exception as e: + print(f"āŒ Writer Error: {e}") + return { + "title": "Error Generating Listing", + "description": "Please try again. The AI model output invalid data.", + "features": [], + "error": str(e) + } diff --git a/check_basic.py b/check_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..62ed83a48e0a428b6347cdd23e262080cb85ed5d --- /dev/null +++ b/check_basic.py @@ -0,0 +1,20 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) + +print(f"Testing token with microsoft/resnet-50") + +try: + # Pass the URL directly as the input (InferenceClient handles URLs for image tasks) + result = client.image_classification( + model="microsoft/resnet-50", + image="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + ) + print("Success:", result) +except Exception as e: + print("Failed:", e) diff --git a/check_gemini.py b/check_gemini.py new file mode 100644 index 0000000000000000000000000000000000000000..cfca9358a8a65cf29acb003b394c594983f845f0 --- /dev/null +++ b/check_gemini.py @@ -0,0 +1,26 @@ +import os +import google.generativeai as genai +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY") +genai.configure(api_key=api_key) + +print("Listing available Gemini models...") +try: + for m in genai.list_models(): + if 'generateContent' in m.supported_generation_methods: + print(m.name) +except Exception as e: + print(f"List models failed: {e}") + +model_name = "gemini-1.5-flash" +print(f"\nTesting model: {model_name}") + +try: + model = genai.GenerativeModel(model_name) + response = model.generate_content("Hello, can you see this?") + print("Response:", response.text) +except Exception as e: + print(f"Test failed: {e}") diff --git a/check_gemini_clean.py b/check_gemini_clean.py new file mode 100644 index 0000000000000000000000000000000000000000..751fe58712a396513b8f40bd9b9773b8407f90ad --- /dev/null +++ b/check_gemini_clean.py @@ -0,0 +1,24 @@ +import os +import google.generativeai as genai +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY") +genai.configure(api_key=api_key) + +candidates = [ + "gemini-2.0-flash", + "gemini-2.0-flash-exp", + "models/gemini-2.0-flash" +] + +for model_name in candidates: + print(f"\nTesting model: {model_name}") + try: + model = genai.GenerativeModel(model_name) + response = model.generate_content("Hello") + print(f"āœ… Success with {model_name}: {response.text}") + break + except Exception as e: + print(f"āŒ Failed with {model_name}: {e}") diff --git a/check_gemini_models.py b/check_gemini_models.py new file mode 100644 index 0000000000000000000000000000000000000000..b872213a0c7291fe1261bdd36c06cf6f36b2e8e9 --- /dev/null +++ b/check_gemini_models.py @@ -0,0 +1,15 @@ +import os +from dotenv import load_dotenv +import google.generativeai as genai + +load_dotenv() +api_key = os.getenv("GEMINI_API_KEY") + +if not api_key: + print("āŒ API Key not found") +else: + genai.configure(api_key=api_key) + print("Listing available models:") + for m in genai.list_models(): + if 'embedContent' in m.supported_generation_methods: + print(f"- {m.name}") diff --git a/check_groq.py b/check_groq.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1cfc8daf0a05db639ec0c8a57894f8937e5d93 --- /dev/null +++ b/check_groq.py @@ -0,0 +1,11 @@ +import os +from groq import Groq +from dotenv import load_dotenv + +load_dotenv() +client = Groq(api_key=os.getenv("GROQ_API_KEY")) + +print("Listing Groq models...") +models = client.models.list() +for m in models.data: + print(m.id) diff --git a/check_groq_models.py b/check_groq_models.py new file mode 100644 index 0000000000000000000000000000000000000000..95ef4f8e708e6ef50024201903a48ad7c058663c --- /dev/null +++ b/check_groq_models.py @@ -0,0 +1,14 @@ +import os +from groq import Groq +from dotenv import load_dotenv + +load_dotenv() + +try: + client = Groq(api_key=os.getenv("GROQ_API_KEY")) + models = client.models.list() + print("Available Models:") + for model in models.data: + print(f"- {model.id}") +except Exception as e: + print(f"Error listing models: {e}") diff --git a/check_groq_vision.py b/check_groq_vision.py new file mode 100644 index 0000000000000000000000000000000000000000..2fef204aa21a954072c0646b9fd04ea68d80a0d2 --- /dev/null +++ b/check_groq_vision.py @@ -0,0 +1,36 @@ +import os +from groq import Groq +from dotenv import load_dotenv +import base64 + +load_dotenv() + +client = Groq(api_key=os.getenv("GROQ_API_KEY")) +model = "llama-3.2-11b-vision-preview" + +print(f"Testing Groq Vision model: {model}") + +# Test 1: Image URL +print("\n--- Test 1: Image URL ---") +try: + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + completion = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + {"type": "image_url", "image_url": {"url": image_url}}, + ], + } + ], + temperature=1, + max_tokens=1024, + top_p=1, + stream=False, + stop=None, + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Groq Vision failed:", e) diff --git a/check_idefics.py b/check_idefics.py new file mode 100644 index 0000000000000000000000000000000000000000..9a147a19999a3e66c6c125232153a5e26f9112f8 --- /dev/null +++ b/check_idefics.py @@ -0,0 +1,33 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "HuggingFaceM4/idefics2-8b" + +print(f"Testing model: {model}") + +# Test 1: Image URL +print("\n--- Test 1: Image URL ---") +try: + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + messages = [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "What is in this image?"} + ] + } + ] + completion = client.chat.completions.create( + model=model, + messages=messages, + max_tokens=100 + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Image URL failed:", e) diff --git a/check_idefics_raw.py b/check_idefics_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..63e5737d1812d52a2843276bbd54f98858b8b93c --- /dev/null +++ b/check_idefics_raw.py @@ -0,0 +1,29 @@ +import os +import requests +import json +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +model = "HuggingFaceM4/idefics2-8b" +url = f"https://router.huggingface.co/models/{model}" + +headers = {"Authorization": f"Bearer {api_key}"} + +print(f"Testing URL: {url}") + +# Test A: Simple text inputs +print("\n--- Test A: Simple Text ---") +response = requests.post(url, headers=headers, json={"inputs": "Hello"}) +print(f"Status: {response.status_code}") +print("Response:", response.text) + +# Test B: Formatted inputs (Standard for some VLM APIs) +# Often they accept { "inputs": "User: ...", "parameters": ... } +print("\n--- Test B: Formatted Prompt ---") +image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" +prompt = f"User: ![]({image_url}) Describe this image.\nAssistant:" +response = requests.post(url, headers=headers, json={"inputs": prompt, "parameters": {"max_new_tokens": 50}}) +print(f"Status: {response.status_code}") +print("Response:", response.text) diff --git a/check_idefics_v2.py b/check_idefics_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..cff412d492377c765df36dfdf508f92fbc4211a4 --- /dev/null +++ b/check_idefics_v2.py @@ -0,0 +1,31 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "HuggingFaceM4/idefics2-8b" + +print(f"Testing model: {model}") + +image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + +# Format for Idefics2: +# User: ![]() \nAssistant: +prompt = f"User: ![]({image_url}) Describe this image.\nAssistant:" + +print(f"\n--- Testing with text_generation and specific prompt ---") +print(f"Prompt: {prompt}") + +try: + # Use text_generation for models that don't support chat + response = client.text_generation( + prompt=prompt, + model=model, + max_new_tokens=100 + ) + print("Response:", response) +except Exception as e: + print("Failed:", e) diff --git a/check_idefics_v3.py b/check_idefics_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..c141fb6a5ab3ec0ccdc7486cf9f14be26624dc16 --- /dev/null +++ b/check_idefics_v3.py @@ -0,0 +1,30 @@ +import os +import traceback +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "HuggingFaceM4/idefics2-8b" + +print(f"Testing model: {model}") + +print("\n--- Test 1: Image to Text (Captioning) ---") +try: + # This might work if the API treats it as captioning + res = client.image_to_text( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true", + model=model + ) + print("Response:", res) +except Exception: + traceback.print_exc() + +print("\n--- Test 2: Text Generation (Simple) ---") +try: + res = client.text_generation("describe a car", model=model, max_new_tokens=50) + print("Response:", res) +except Exception: + traceback.print_exc() diff --git a/check_llama.py b/check_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..ce89a93096544562a342f56a6ac8eb2f0074892a --- /dev/null +++ b/check_llama.py @@ -0,0 +1,33 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "meta-llama/Llama-3.2-11B-Vision-Instruct" + +print(f"Testing model: {model}") + +# Test 1: Image URL (Llama Vision) +print("\n--- Test 1: Image URL ---") +try: + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + messages = [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "What is in this image?"} + ] + } + ] + completion = client.chat.completions.create( + model=model, + messages=messages, + max_tokens=100 + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Image URL failed:", e) diff --git a/check_llava.py b/check_llava.py new file mode 100644 index 0000000000000000000000000000000000000000..45f8838ebf1283406699c569e89065ea0ce4f8de --- /dev/null +++ b/check_llava.py @@ -0,0 +1,33 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "llava-hf/llava-1.5-7b-hf" + +print(f"Testing model: {model}") + +# Test 1: Image URL +print("\n--- Test 1: Image URL ---") +try: + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + messages = [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "What is in this image?"} + ] + } + ] + completion = client.chat.completions.create( + model=model, + messages=messages, + max_tokens=100 + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Image URL failed:", e) diff --git a/check_models.py b/check_models.py new file mode 100644 index 0000000000000000000000000000000000000000..959f25472fa8d8095ea27d730091b39b1be92b95 --- /dev/null +++ b/check_models.py @@ -0,0 +1,15 @@ +import google.generativeai as genai +import os +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("GEMINI_API_KEY") +if not api_key: + print("No API key found") +else: + genai.configure(api_key=api_key) + print("Listing models...") + for m in genai.list_models(): + if 'generateContent' in m.supported_generation_methods: + print(m.name) diff --git a/check_models_list.py b/check_models_list.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7dd960dc1329afe118c05f897888e57353f001 --- /dev/null +++ b/check_models_list.py @@ -0,0 +1,15 @@ +import google.generativeai as genai +import os +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("GEMINI_API_KEY") +if not api_key: + print("āŒ API Key not found") +else: + genai.configure(api_key=api_key) + print("Listing available models...") + for m in genai.list_models(): + if 'generateContent' in m.supported_generation_methods: + print(m.name) diff --git a/check_qwen.py b/check_qwen.py new file mode 100644 index 0000000000000000000000000000000000000000..f91c5ec6dd133195b91d402ff082a927939b516f --- /dev/null +++ b/check_qwen.py @@ -0,0 +1,48 @@ +import os +from huggingface_hub import InferenceClient +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +client = InferenceClient(api_key=api_key) +model = "Qwen/Qwen2-VL-7B-Instruct" + +print(f"Testing model: {model}") + +# Test 1: Text only +print("\n--- Test 1: Text Only ---") +try: + messages = [ + {"role": "user", "content": "Hello, are you working?"} + ] + completion = client.chat.completions.create( + model=model, + messages=messages, + max_tokens=100 + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Text only failed:", e) + +# Test 2: Image (using a public URL to avoid base64 issues first) +print("\n--- Test 2: Image URL ---") +try: + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true" + messages = [ + { + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "What is in this image?"} + ] + } + ] + completion = client.chat.completions.create( + model=model, + messages=messages, + max_tokens=100 + ) + print("Response:", completion.choices[0].message.content) +except Exception as e: + print("Image URL failed:", e) diff --git a/check_qwen_raw.py b/check_qwen_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..45c10fd2848b66ec8c269e8a3fef31daabf88dbc --- /dev/null +++ b/check_qwen_raw.py @@ -0,0 +1,52 @@ +import os +import requests +import json +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +model = "Qwen/Qwen2-VL-7B-Instruct" +# Update URL to router +url = f"https://router.huggingface.co/models/{model}" + +headers = {"Authorization": f"Bearer {api_key}"} + +print(f"Testing URL: {url}") + +# Test 1: Simple text generation payload (inputs string) +data_text = { + "inputs": "Hello", + "parameters": {"max_new_tokens": 50} +} +print("\n--- Test 1: Text Generation (inputs string) ---") +response = requests.post(url, headers=headers, json=data_text) +print(f"Status: {response.status_code}") +print("Response:", response.text) + +# Test 2: VQA format +data_vqa = { + "inputs": { + "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true", + "question": "What is in this image?" + } +} +print("\n--- Test 2: VQA Format ---") +response = requests.post(url, headers=headers, json=data_vqa) +print(f"Status: {response.status_code}") +print("Response:", response.text) + +# Test 3: Chat Completions API (OpenAI style) +url_chat = f"https://router.huggingface.co/models/{model}/v1/chat/completions" +print(f"\nTesting URL: {url_chat}") +data_chat = { + "model": model, # Sometimes required in body + "messages": [ + {"role": "user", "content": "Hello"} + ], + "max_tokens": 50 +} +print("\n--- Test 3: Chat Completion ---") +response = requests.post(url_chat, headers=headers, json=data_chat) +print(f"Status: {response.status_code}") +print("Response:", response.text) diff --git a/code.html b/code.html new file mode 100644 index 0000000000000000000000000000000000000000..411f9e9fdab4fe521489ecb1baf1b4418b505d32 --- /dev/null +++ b/code.html @@ -0,0 +1,203 @@ + + + + +StyleSync AI AI AI AI AI Dashboard + + + + + + + + + +
+
+
+all_inclusive +
+
+

StyleSync AI AI AI AI AI

+Enterprise Edition +
+
+ +
+
+
+
+
+

Input Data

+Step 1 of 2 +
+
+
+
+cloud_upload +
+
+

Drop Product Image Here

+

Supports JPG, PNG, WEBP

+
+ +
+
+
+
+ +
+ +
+
+
+ +
+
+
+
+
+
+
+

Generated Output

+
+ + +
+
+
+
+
+ + +
+
+Vision Agent +Gemini Pro 1.5 +
+visibility +
+
+
+ +
+
+Reasoning Agent +Llama 3 70B +
+psychology +
+
+
+ +
+
+SEO Context +Pinecone DB +
+database +
+
+
+
+output.json +
+
+
+
+
+
+
+
1  {
+2    "product_analysis": {
+3      "title": "Apex Terrain All-Weather Performance Jacket",
+4      "category": "Outerwear / Men's / Technical Shells",
+5      "features": [
+6        "Gore-Tex Pro Membrane",
+7        "Articulated Sleeves",
+8        "Helmet-Compatible Hood"
+9      ],
+10     "seo_tags": [
+11       "#hikinggear", "#waterproof", "#adventure"
+12     ],
+13     "sentiment_score": 0.98,
+14     "market_fit": "High Demand"
+15   },
+16   "deployment_status": "Ready"
+17 }
+
+
+
+
+
+ + + \ No newline at end of file diff --git a/connect_n8n.py b/connect_n8n.py new file mode 100644 index 0000000000000000000000000000000000000000..229041fe72e0225969167ddb119129031166cb5a --- /dev/null +++ b/connect_n8n.py @@ -0,0 +1,126 @@ +import os +import subprocess + +def update_requirements(): + req_file = "requirements.txt" + if not os.path.exists(req_file): + with open(req_file, "w") as f: + f.write("httpx\n") + print(f"Created {req_file} with httpx.") + return + + with open(req_file, "r") as f: + content = f.read() + + if "httpx" not in content: + with open(req_file, "a") as f: + f.write("\nhttpx\n") + print("Appended httpx to requirements.txt.") + else: + print("httpx already in requirements.txt.") + +def update_main(): + main_content = r'''import os +import httpx +import asyncio +from fastapi import FastAPI, UploadFile, File +from fastapi.responses import HTMLResponse, JSONResponse +from dotenv import load_dotenv +# Import Agents +from agents.visual_analyst import VisualAnalyst +from agents.memory_agent import MemoryAgent +from agents.writer_agent import WriterAgent +load_dotenv() +app = FastAPI() +# Initialize Agents +try: + visual_agent = VisualAnalyst() + memory_agent = MemoryAgent() + writer_agent = WriterAgent() + memory_agent.seed_database() + print("āœ… All Agents Online") +except Exception as e: + print(f"āš ļø Agent Startup Warning: {e}") +@app.get("/", response_class=HTMLResponse) +async def read_root(): + try: + with open("dashboard.html", "r") as f: + return f.read() + except FileNotFoundError: + return "

Error: dashboard.html not found

" +@app.post("/generate-catalog") +async def generate_catalog(file: UploadFile = File(...)): + try: + # 1. Save Temp File + os.makedirs("uploads", exist_ok=True) + file_path = f"uploads/{file.filename}" + with open(file_path, "wb") as f: + f.write(await file.read()) + # 2. Run AI Pipeline + visual_data = await visual_agent.analyze_image(file_path) + + query = f"{visual_data.get('main_color', '')} {visual_data.get('product_type', 'product')}" + seo_keywords = memory_agent.retrieve_keywords(query) + + listing = writer_agent.write_listing(visual_data, seo_keywords) + + # 3. Construct Final Payload + final_data = { + "visual_data": visual_data, + "seo_keywords": seo_keywords, + "listing": listing + } + # 4. ⚔ N8N AUTOMATION TRIGGER ⚔ + n8n_url = os.getenv("N8N_WEBHOOK_URL") + if n8n_url: + print(f"šŸš€ Sending data to N8N: {n8n_url}") + # Fire and forget (don't make the user wait for n8n) + asyncio.create_task(send_to_n8n(n8n_url, final_data)) + + # Cleanup + if os.path.exists(file_path): + os.remove(file_path) + + return JSONResponse(content=final_data) + except Exception as e: + return JSONResponse(content={"error": str(e)}, status_code=500) +# Async Helper to send data without blocking +async def send_to_n8n(url, data): + try: + async with httpx.AsyncClient() as client: + await client.post(url, json=data, timeout=5.0) + print("āœ… N8N Webhook Sent Successfully") + except Exception as e: + print(f"āŒ N8N Webhook Failed: {e}") +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=7860) +''' + with open("main.py", "w", encoding="utf-8") as f: + f.write(main_content) + print("Updated main.py with N8N integration logic.") + +def deploy(): + try: + subprocess.run(["git", "add", "."], check=True) + # Check if there are changes to commit + status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True) + if status.stdout.strip(): + subprocess.run(["git", "commit", "-m", "Add N8N Integration"], check=True) + print("Git commit successful.") + else: + print("No changes to commit.") + + print("Pushing to space...") + subprocess.run(["git", "push", "space", "clean_deploy:main"], check=True) + print("āœ… Successfully deployed to Hugging Face Space.") + + except subprocess.CalledProcessError as e: + print(f"āŒ Deployment failed: {e}") + +if __name__ == "__main__": + print("Starting N8N Integration Setup...") + update_requirements() + update_main() + deploy() + print("āœ… connect_n8n.py completed.") diff --git a/create_dockerfile.py b/create_dockerfile.py new file mode 100644 index 0000000000000000000000000000000000000000..03ef0f80ff9e027e30a57aef41a8c93db4f1a38d --- /dev/null +++ b/create_dockerfile.py @@ -0,0 +1,50 @@ +import subprocess +import sys + +def run_command(command): + print(f"Running: {command}") + try: + # shell=True allows us to run the command string exactly as provided + subprocess.run(command, shell=True, check=True) + except subprocess.CalledProcessError as e: + print(f"Error executing command '{command}': {e}") + sys.exit(1) + +def main(): + # 1. Create Dockerfile + dockerfile_content = """FROM python:3.9 +WORKDIR /code +COPY ./requirements.txt /code/requirements.txt +RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt +COPY . /code +# Fix permissions for libraries that write to home +RUN mkdir -p /tmp/home +ENV HOME=/tmp/home +# Start the FastAPI server on port 7860 (required by Hugging Face) +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] +""" + + print("Creating Dockerfile...") + try: + with open("Dockerfile", "w", newline='\n') as f: + f.write(dockerfile_content) + print("Dockerfile created successfully.") + except Exception as e: + print(f"Failed to create Dockerfile: {e}") + sys.exit(1) + + # 2. Push to Space + print("Executing Git commands...") + commands = [ + 'git add Dockerfile', + 'git commit -m "Add Dockerfile for Hugging Face deployment"', + 'git push -f space clean_deploy:main' + ] + + for cmd in commands: + run_command(cmd) + + print("\ncreate_dockerfile.py execution completed.") + +if __name__ == "__main__": + main() diff --git a/dashboard.html b/dashboard.html new file mode 100644 index 0000000000000000000000000000000000000000..15edbbdcaa46944f9dae4f994e138901b5fc1bc9 --- /dev/null +++ b/dashboard.html @@ -0,0 +1,462 @@ + + + + + + + StyleSync AI AI AI AI AI Dashboard + + + + + + + + + + +
+
+
+ all_inclusive +
+
+

StyleSync AI AI AI AI AI

+ Enterprise Edition +
+
+ +
+
+
+
+
+

Input Data

+ Step + 1 of 2 +
+
+ +
+ +
+ cloud_upload +
+
+

Drop + Product Image Here

+

Supports JPG, PNG, WEBP

+
+ +
+
+
+
+
+ +
+ +
+
+
+ +
+
+
+
+
+
+
+
+

Generated Output

+
+ Step + 2 of 2 + + +
+
+
+
+
+ + +
+
+ Vision Agent + Gemini Pro 1.5 +
+ visibility +
+
+
+ +
+
+ Reasoning Agent + Llama 3 70B +
+ psychology +
+
+
+ +
+
+ SEO Context + Pinecone DB +
+ database +
+
+
+
+ output.json +
+
+
+
+
+
+
+
1  {
+2    "product_analysis": {
+3      "title": "Apex Terrain All-Weather Performance Jacket",
+4      "category": "Outerwear / Men's / Technical Shells",
+5      "features": [
+6        "Gore-Tex Pro Membrane",
+7        "Articulated Sleeves",
+8        "Helmet-Compatible Hood"
+9      ],
+10     "seo_tags": [
+11       "#hikinggear", "#waterproof", "#adventure"
+12     ],
+13     "sentiment_score": 0.98,
+14     "market_fit": "High Demand"
+15   },
+16   "deployment_status": "Ready"
+17 }
+
+
+
+
+
+ + + + \ No newline at end of file diff --git a/final_deploy_push.py b/final_deploy_push.py new file mode 100644 index 0000000000000000000000000000000000000000..154110fe8a8510e547a4b6a72aef730ccc4c4795 --- /dev/null +++ b/final_deploy_push.py @@ -0,0 +1,20 @@ +import subprocess +import sys + +# Force UTF-8 output for Windows terminals +sys.stdout.reconfigure(encoding='utf-8') + +def deploy(): + print("āš ļø Ensure you are inside the D:\\Projects\\StyleSync AI AI AI AI AI directory before running this!") + + command = "git push --force space clean_deploy:main" + print(f"\nRunning: {command} ...") + + try: + subprocess.run(command, check=True, shell=True) + print("\nāœ… Successfully pushed to Space!") + except subprocess.CalledProcessError as e: + print(f"\nāŒ Push failed: {e}") + +if __name__ == "__main__": + deploy() diff --git a/final_upload.py b/final_upload.py new file mode 100644 index 0000000000000000000000000000000000000000..ed26601b08eacd385bae013fc81ab0446e2d229f --- /dev/null +++ b/final_upload.py @@ -0,0 +1,61 @@ +import subprocess +import sys + +def run_command(command, check=True): + try: + subprocess.run(command, check=check, shell=True, text=True) + except subprocess.CalledProcessError as e: + print(f"Error executing currently: {command}") + # We don't exit here because some commands like 'remote remove' might fail meaningfully but we want to continue, + # or we handle them specifically in the main flow. + if check: + # Re-raise if we strictly wanted this to succeed + raise e + +def main(): + # Input: Ask the user for the GitHub URL + if len(sys.argv) > 1: + github_url = sys.argv[1].strip() + else: + github_url = input('Please paste your GitHub URL here: ').strip() + + if not github_url: + print("Error: No URL provided.") + return + + try: + # Git Commands sequence + print("Initializing git...") + run_command("git init") + + print("Adding files...") + run_command("git add .") + + print("Committing files...") + try: + # Use check=True so it raises exception on failure, which we catch + run_command('git commit -m "Initial commit - StyleSync AI AI AI AI AI"', check=True) + except subprocess.CalledProcessError: + print("Commit failed (likely nothing to commit). Continuing...") + + print("Renaming branch to main...") + run_command("git branch -M main") + + print("Removing existing origin (if any)...") + # Don't check=True here because it fails if origin doesn't exist + run_command("git remote remove origin", check=False) + + print(f"Adding remote origin: {github_url}") + run_command(f"git remote add origin {github_url}") + + print("Pushing to GitHub...") + run_command("git push -u origin main") + + # Success message + print('āœ… Code is live on GitHub!') + + except Exception as e: + print(f"\nāŒ An error occurred: {e}") + +if __name__ == "__main__": + main() diff --git a/finalize_ui.py b/finalize_ui.py new file mode 100644 index 0000000000000000000000000000000000000000..083dbf3796ca74c18e3023888b677a1f7d82ddae --- /dev/null +++ b/finalize_ui.py @@ -0,0 +1,34 @@ +import os + +def polish_dashboard(): + file_path = "dashboard.html" + if not os.path.exists(file_path): + print("āŒ dashboard.html not found!") + return + + with open(file_path, "r", encoding="utf-8") as f: + html = f.read() + + # Force Branding Updates + replacements = { + "MerchFlow AI": "StyleSync AI", + "MerchFlow": "StyleSync", + "bg-primary": "bg-emerald-600", + "text-primary": "text-emerald-500", + "border-primary": "border-emerald-500", + "from-primary": "from-emerald-500", + "to-primary": "to-emerald-500", + "hover:bg-primary": "hover:bg-emerald-500", + "hover:text-primary": "hover:text-emerald-400" + } + + for old, new in replacements.items(): + html = html.replace(old, new) + + with open(file_path, "w", encoding="utf-8") as f: + f.write(html) + + print("āœ… Dashboard UI polished: Emerald Green Theme & StyleSync Branding applied.") + +if __name__ == "__main__": + polish_dashboard() diff --git a/fix_dashboard_routing.py b/fix_dashboard_routing.py new file mode 100644 index 0000000000000000000000000000000000000000..d1451fc81ac97f14e06a5387ada14e2d88cfef77 --- /dev/null +++ b/fix_dashboard_routing.py @@ -0,0 +1,87 @@ +import os +import subprocess + +def main(): + # Define the content for main.py + main_py_content = """import os +from fastapi import FastAPI, UploadFile, File, HTTPException +from fastapi.responses import HTMLResponse, JSONResponse +from fastapi.staticfiles import StaticFiles +from agents.visual_analyst import VisualAnalyst +from dotenv import load_dotenv +# Load environment variables +load_dotenv() +app = FastAPI() +# Initialize Agent +visual_agent = VisualAnalyst() +# 1. READ THE DASHBOARD HTML FILE INTO MEMORY +try: + with open("dashboard.html", "r") as f: + dashboard_html = f.read() +except FileNotFoundError: + dashboard_html = "

Error: dashboard.html not found. Please ensure the file exists.

" +# 2. SERVE DASHBOARD AT ROOT (Home Page) +@app.get("/", response_class=HTMLResponse) +async def read_root(): + return dashboard_html +# 3. KEEP /dashboard ROUTE AS BACKUP +@app.get("/dashboard", response_class=HTMLResponse) +async def read_dashboard(): + return dashboard_html +@app.post("/analyze") +async def analyze_merch(file: UploadFile = File(...)): + try: + os.makedirs("uploads", exist_ok=True) + file_path = f"uploads/{file.filename}" + with open(file_path, "wb") as f: + f.write(await file.read()) + result = await visual_agent.analyze_image(file_path) + + if os.path.exists(file_path): + os.remove(file_path) + + return JSONResponse(content=result) + except Exception as e: + return JSONResponse(content={"error": str(e)}, status_code=500) +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=7860) +""" + + # Overwrite main.py + print("Overwriting main.py...") + try: + with open("main.py", "w", encoding="utf-8") as f: + f.write(main_py_content) + print("Successfully updated main.py") + except Exception as e: + print(f"Error writing main.py: {e}") + return + + # Define git commands + git_commands = [ + ["git", "add", "main.py"], + ["git", "commit", "-m", "Fix dashboard 404 by serving HTML at root"], + ["git", "push", "space", "clean_deploy:main"] + ] + + # Run git commands + print("\nRunning git commands...") + for cmd in git_commands: + print(f"Executing: {' '.join(cmd)}") + try: + subprocess.run(cmd, check=True) + except subprocess.CalledProcessError as e: + print(f"Command failed: {e}") + # If commit fails (e.g. nothing to commit), we might want to continue or stop. + # But push should definitely happen if commit works. + # If commit fails because "nothing to commit, working tree clean", push might still be relevant if previous commit wasn't pushed? + # But the user logic implies we just made a change to main.py, so commit should succeed unless main.py was ALREADY this content. + # We will continue to try push even if commit fails, just in case. + # But wait, if commit fails, push might proceed. + pass + + print("\nfix_dashboard_routing.py completed.") + +if __name__ == "__main__": + main() diff --git a/fix_google_key.py b/fix_google_key.py new file mode 100644 index 0000000000000000000000000000000000000000..cdabb024b499d627225436f7f41e66ecc88d51e3 --- /dev/null +++ b/fix_google_key.py @@ -0,0 +1,48 @@ +import os +import sys + +# Force UTF-8 output for Windows terminals +sys.stdout.reconfigure(encoding='utf-8') + +# 1. Update .env +env_path = ".env" +key = "GOOGLE_API_KEY" +value = "AIzaSyDgIkagGBciWNZDTn07OlfY9tVPvo6KJ1on" + +print(f"Updating {key} in .env...") + +lines = [] +if os.path.exists(env_path): + with open(env_path, "r", encoding="utf-8") as f: + lines = f.readlines() + +found = False +new_lines = [] +for line in lines: + if line.startswith(f"{key}="): + new_lines.append(f"{key}={value}\n") + found = True + else: + new_lines.append(line) + +if not found: + if new_lines and not new_lines[-1].endswith('\n'): + new_lines.append('\n') + new_lines.append(f"{key}={value}\n") + +with open(env_path, "w", encoding="utf-8") as f: + f.writelines(new_lines) + +print(f"āœ… Updated {key} in .env") + +# 2. Upload to Cloud +print("Syncing secrets to Hugging Face Space...") +try: + # Build path to ensure we can import upload_secrets + sys.path.append(os.getcwd()) + from upload_secrets import upload_secrets + + upload_secrets() + print("āœ… Google Key saved locally and uploaded to Hugging Face!") +except Exception as e: + print(f"āŒ Failed to sync: {e}") diff --git a/fix_readme.py b/fix_readme.py new file mode 100644 index 0000000000000000000000000000000000000000..f31b53c1ca298e28e2b50abf8b61dbd105e1346a --- /dev/null +++ b/fix_readme.py @@ -0,0 +1,42 @@ +import os +import sys +import subprocess + +# Force UTF-8 output for Windows terminals +sys.stdout.reconfigure(encoding='utf-8') + +readme_content = """--- +title: StyleSync AI AI AI AI AI +emoji: šŸš€ +colorFrom: blue +colorTo: indigo +sdk: docker +pinned: false +--- +# StyleSync AI AI AI AI AI +An AI-powered merchandising agent. +""" + +def run_command(command): + print(f"Running: {command}") + try: + subprocess.run(command, check=True, shell=True) + print("āœ… Success") + except subprocess.CalledProcessError as e: + print(f"āŒ Error: {e}") + # Don't exit, try to continue or let user see error + +def fix_readme(): + print("Writing README.md...") + with open("README.md", "w", encoding="utf-8") as f: + f.write(readme_content) + print("āœ… Created README.md") + + print("Deploying changes...") + run_command("git add README.md") + run_command('git commit -m "Add Hugging Face configuration"') + run_command("git push space clean_deploy:main") + print("āœ… Configuration fixed and pushed!") + +if __name__ == "__main__": + fix_readme() diff --git a/fix_vision_core.py b/fix_vision_core.py new file mode 100644 index 0000000000000000000000000000000000000000..5ab5455ebe5f3da50e3432d33ad0ce0366af50e7 --- /dev/null +++ b/fix_vision_core.py @@ -0,0 +1,90 @@ +import os +import subprocess + +def fix_vision_core(): + # Content for agents/visual_analyst.py + content = """import os +import json +import asyncio +import google.generativeai as genai +from PIL import Image +from dotenv import load_dotenv + +load_dotenv() + +class VisualAnalyst: + def __init__(self): + api_key = os.getenv("GEMINI_API_KEY") + if not api_key: + print("āš ļø GEMINI_API_KEY missing") + + genai.configure(api_key=api_key) + # Use the modern, faster Flash model + self.model = genai.GenerativeModel('gemini-1.5-flash') + + async def analyze_image(self, image_path: str): + print(f"šŸ‘ļø Analyzing image: {image_path}") + + try: + # 1. Load image properly with Pillow (Fixes format issues) + img = Image.open(image_path) + + # 2. Define the prompt + prompt = \"\"\" + Analyze this product image for an e-commerce listing. + Return ONLY a raw JSON object (no markdown formatting) with this structure: + { + "main_color": "string", + "product_type": "string", + "design_style": "string (minimalist, streetwear, vintage, etc)", + "visual_features": ["list", "of", "visible", "features"], + "suggested_title": "creative product title", + "condition_guess": "new/used" + } + \"\"\" + + # 3. Run in a thread to prevent blocking (Sync to Async wrapper) + response = await asyncio.to_thread( + self.model.generate_content, + [prompt, img] + ) + + # 4. Clean and Parse JSON + text_response = response.text.replace('```json', '').replace('```', '').strip() + return json.loads(text_response) + except Exception as e: + print(f"āŒ Vision Error: {e}") + # Return a Safe Fallback (Simulation) + return { + "main_color": "Unknown", + "product_type": "Unidentified Item", + "design_style": "Standard", + "visual_features": ["Error analyzing image"], + "suggested_title": "Manual Review Needed", + "condition_guess": "New" + } +""" + # Write the file + os.makedirs("agents", exist_ok=True) + with open("agents/visual_analyst.py", "w", encoding="utf-8") as f: + f.write(content) + print("āœ… agents/visual_analyst.py updated.") + + # Git operations + print("šŸš€ Pushing to HuggingFace...") + commands = [ + ["git", "add", "agents/visual_analyst.py"], + ["git", "commit", "-m", "Fix vision core and error handling"], + ["git", "push", "space", "clean_deploy:main"] + ] + + for cmd in commands: + try: + print(f"Running: {' '.join(cmd)}") + subprocess.run(cmd, check=True) + except subprocess.CalledProcessError as e: + print(f"āš ļø Command failed: {e}") + # Continue even if commit fails (e.g. prompt already applied) + +if __name__ == "__main__": + fix_vision_core() diff --git a/install_gh.py b/install_gh.py new file mode 100644 index 0000000000000000000000000000000000000000..e421ce8ccbd0c7a42306acfcb2b2e142a6d9317d --- /dev/null +++ b/install_gh.py @@ -0,0 +1,33 @@ +import shutil +import subprocess +import sys + +def main(): + # Check Status + gh_path = shutil.which('gh') + + if not gh_path: + # Install + print("GitHub CLI not found. Installing via winget...") + try: + subprocess.run(['winget', 'install', '--id', 'GitHub.cli', '-e'], check=True) + except subprocess.CalledProcessError as e: + print(f"Error installing GitHub CLI: {e}") + return + except FileNotFoundError: + print("Error: 'winget' command not found. Please ensure App Installer is installed.") + return + + # Post-Install Instructions (Runs if installed or if installation succeeded) + print("\n" + "="*40) + try: + # Attempt to use ANSI codes for bold, may not work in all Windows terminals but works in VS Code / modern Windows Terminal + print("āœ… \033[1mGitHub CLI is ready!\033[0m") + except: + print("āœ… GitHub CLI is ready!") + print("="*40) + print("āš ļø IMPORTANT: You must now restart your terminal to reload your PATH.") + print("šŸ‘‰ After restarting, run this command to log in: gh auth login") + +if __name__ == "__main__": + main() diff --git a/launcher.py b/launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..a9338e59ad7cd3e1476345e38a7159ccebb104e4 --- /dev/null +++ b/launcher.py @@ -0,0 +1,47 @@ +import subprocess +import time +import webbrowser +import os +import urllib.request +import sys + +def is_server_ready(url): + try: + with urllib.request.urlopen(url) as response: + return response.getcode() == 200 + except Exception: + return False + +def main(): + print("šŸš€ Starting Engine...") + + # Definition of the server command + # Using sys.executable to ensure we use the same python interpreter + server_command = [sys.executable, "-m", "uvicorn", "main:app", "--reload"] + + # Start the server as a subprocess + process = subprocess.Popen(server_command, cwd=os.getcwd()) + + server_url = "http://localhost:8000" + + # Poll for server availability + try: + while not is_server_ready(server_url): + time.sleep(1) + + print("āœ… Dashboard Launched") + + # Open the dashboard in the default web browser + dashboard_path = os.path.abspath("dashboard.html") + webbrowser.open(f"file:///{dashboard_path}") + + # Keep the script running to maintain the server process + process.wait() + + except KeyboardInterrupt: + print("\nšŸ›‘ Shutting down...") + process.terminate() + process.wait() + +if __name__ == "__main__": + main() diff --git a/legacy/trend_spotter.py b/legacy/trend_spotter.py new file mode 100644 index 0000000000000000000000000000000000000000..c0d5fbd14e22e5bebe3bc511e61c9a75a8166c69 --- /dev/null +++ b/legacy/trend_spotter.py @@ -0,0 +1,46 @@ +import os +import json +import google.generativeai as genai +from dotenv import load_dotenv + +load_dotenv() + +class TrendSpotter: + def __init__(self): + self.api_key = os.getenv("GEMINI_API_KEY") + if self.api_key: + genai.configure(api_key=self.api_key) + self.model = genai.GenerativeModel('gemini-flash-latest') + self.has_key = True + else: + self.model = None + self.has_key = False + + def get_trends(self, niche: str): + if not self.has_key: + print("āš ļø No API Key found, using mock data") + return ['Retro Cat Mom', 'Pixel Art Kitty', 'Cattitude'] + + try: + prompt = f"Generate 5 short, witty, and viral t-shirt text concepts for the niche: {niche}. Return strictly a JSON list of strings." + response = self.model.generate_content(prompt) + + content = response.text + # Clean up markdown formatting if present + if "```json" in content: + content = content.replace("```json", "").replace("```", "") + elif "```" in content: + content = content.replace("```", "") + + try: + trends = json.loads(content) + if isinstance(trends, list): + return trends + else: + return [content] + except json.JSONDecodeError: + return [content] + + except Exception as e: + print(f"āŒ Error calling Gemini: {e}") + return ['Retro Cat Mom', 'Pixel Art Kitty', 'Cattitude'] diff --git a/legacy/visionary.py b/legacy/visionary.py new file mode 100644 index 0000000000000000000000000000000000000000..0fc57e93709ec55c24ba07cb70c28bcf7738bfaf --- /dev/null +++ b/legacy/visionary.py @@ -0,0 +1,33 @@ +import os +import google.generativeai as genai +from dotenv import load_dotenv + +load_dotenv() + +class Visionary: + def __init__(self): + self.api_key = os.getenv("GEMINI_API_KEY") + if self.api_key: + genai.configure(api_key=self.api_key) + self.model = genai.GenerativeModel('gemini-flash-latest') + self.has_key = True + else: + self.model = None + self.has_key = False + + def generate_art_prompt(self, slogan: str, niche: str) -> str: + if not self.has_key: + return "Mock visualization: A cute retro cat wearing sunglasses, vector art, pastel colors" + + try: + system_prompt = ( + f'You are an expert T-shirt Designer. Create a high-quality AI art generation prompt ' + f'for the slogan: "{slogan}" in the niche: "{niche}". ' + f'Specify style (e.g., vector, retro, kawaii), colors, and composition. ' + f'Keep it under 40 words.' + ) + response = self.model.generate_content(system_prompt) + return response.text.strip() + except Exception as e: + print(f"āŒ Error calling Gemini: {e}") + return "Error generating prompt" diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..2c33de27fb6cde3c0e69bbb98965835282b2977e --- /dev/null +++ b/main.py @@ -0,0 +1,90 @@ +import os +import httpx +import asyncio +from fastapi import FastAPI, UploadFile, File +from fastapi.responses import HTMLResponse, JSONResponse +from fastapi.staticfiles import StaticFiles +from dotenv import load_dotenv + +# Import Phase 2 & 3 Agents +from agents.visual_analyst import VisualAnalyst +from agents.memory_agent import MemoryAgent +from agents.writer_agent import WriterAgent + +load_dotenv() +app = FastAPI() + +# --- Global Agent Initialization --- +print("šŸš€ StyleSync AI: Initializing Agents...") +try: + visual_agent = VisualAnalyst() + memory_agent = MemoryAgent() # Connects to 'stylesync-index-v2' + writer_agent = WriterAgent() + print("āœ… All Agents Online & Ready.") +except Exception as e: + print(f"āŒ Critical Startup Error: {e}") + +@app.get("/", response_class=HTMLResponse) +async def read_root(): + try: + with open("dashboard.html", "r", encoding="utf-8") as f: + return f.read() + except FileNotFoundError: + return "

Error: dashboard.html not found. Run setup scripts first.

" + +@app.post("/generate-catalog") +async def generate_catalog(file: UploadFile = File(...)): + file_path = f"temp_{file.filename}" + try: + # 1. Save File Temporarily + with open(file_path, "wb") as f: + f.write(await file.read()) + + # 2. Vision (The Eyes) + print(f"šŸ‘ļø Analyzing: {file.filename}") + visual_data = await visual_agent.analyze_image(file_path) + + # 3. Memory (The Context) + # Create a search query from visual tags + search_query = f"{visual_data.get('design_style', '')} {visual_data.get('product_type', '')}" + print(f"🧠 Recalling trends for: {search_query}") + seo_keywords = memory_agent.retrieve_keywords(search_query) + + # 4. Writer (The Brain) + print("āœļø Drafting copy...") + listing = writer_agent.write_listing(visual_data, seo_keywords) + + # 5. Construct Payload + response_data = { + "status": "success", + "visual_analysis": visual_data, + "market_trends": seo_keywords, + "final_listing": listing + } + + # 6. Automation Trigger (n8n) + n8n_url = os.getenv("N8N_WEBHOOK_URL") + if n8n_url: + asyncio.create_task(trigger_webhook(n8n_url, response_data)) + + return JSONResponse(content=response_data) + + except Exception as e: + print(f"āŒ Pipeline Error: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + finally: + if os.path.exists(file_path): + os.remove(file_path) + +async def trigger_webhook(url, data): + """Fire-and-forget webhook to n8n""" + try: + async with httpx.AsyncClient() as client: + await client.post(url, json=data, timeout=5.0) + print(f"šŸš€ Webhook sent to n8n") + except Exception as e: + print(f"āš ļø Webhook failed: {e}") + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=7860) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f540a9a1e904b2b515523d81ffbedf19ece4e7a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +fastapi==0.109.0 +uvicorn==0.27.0 +python-multipart +python-dotenv +google-generativeai>=0.8.3 +groq +pinecone>=3.0.0 +langchain +langchain-community +langchain-google-genai +langchain-groq +pillow +huggingface_hub[cli] +httpx diff --git a/restore_full_brain.py b/restore_full_brain.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d1b2b729d26ad5152deef536fca2cdd66c525c --- /dev/null +++ b/restore_full_brain.py @@ -0,0 +1,122 @@ +import os +import subprocess + +def restore_main(): + content = """import os +from fastapi import FastAPI, UploadFile, File +from fastapi.responses import HTMLResponse, JSONResponse +from dotenv import load_dotenv + +# Import Agents +from agents.visual_analyst import VisualAnalyst +from agents.memory_agent import MemoryAgent +from agents.writer_agent import WriterAgent + +load_dotenv() +app = FastAPI() + +# Initialize All Agents +try: + visual_agent = VisualAnalyst() + memory_agent = MemoryAgent() + writer_agent = WriterAgent() + + # Seed memory on startup + memory_agent.seed_database() + print("āœ… All Agents Online") +except Exception as e: + print(f"āš ļø Warning: Some agents failed to load: {e}") + +# 1. SERVE DASHBOARD AT ROOT +@app.get("/", response_class=HTMLResponse) +async def read_root(): + try: + with open("dashboard.html", "r") as f: + return f.read() + except FileNotFoundError: + return "Error: dashboard.html not found" + +# 2. THE MAIN ORCHESTRATOR ENDPOINT +@app.post("/generate-catalog") +async def generate_catalog(file: UploadFile = File(...)): + try: + # A. Save Temp File + os.makedirs("uploads", exist_ok=True) + file_path = f"uploads/{file.filename}" + with open(file_path, "wb") as f: + f.write(await file.read()) + + # B. Visual Analysis (The Eyes) + visual_data = await visual_agent.analyze_image(file_path) + + # C. Memory Search (The Context) + # Create a search query from visual data + query = f"{visual_data.get('main_color', '')} {visual_data.get('product_type', 'product')}" + seo_keywords = memory_agent.retrieve_keywords(query) + + # D. Write Copy (The Brain) + listing = writer_agent.write_listing(visual_data, seo_keywords) + + # Cleanup + if os.path.exists(file_path): + os.remove(file_path) + + # Return Full Data Structure + return JSONResponse(content={ + "visual_data": visual_data, + "seo_keywords": seo_keywords, + "listing": listing + }) + except Exception as e: + print(f"Error: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=7860) +""" + with open("main.py", "w", encoding="utf-8") as f: + f.write(content) + print("āœ… main.py restored with full agent logic.") + +def update_dashboard(): + try: + with open("dashboard.html", "r", encoding="utf-8") as f: + content = f.read() + + # Replace localhost URL with relative path + new_content = content.replace("http://localhost:8000/generate-catalog", "/generate-catalog") + + with open("dashboard.html", "w", encoding="utf-8") as f: + f.write(new_content) + print("āœ… dashboard.html updated for cloud deployment.") + except Exception as e: + print(f"āŒ Error updating dashboard.html: {e}") + +def deploy(): + print("šŸš€ Starting Deployment...") + commands = [ + ["git", "add", "main.py", "dashboard.html"], + ["git", "commit", "-m", "Restore full brain logic and fix dashboard URL"], + ["git", "push", "space", "clean_deploy:main"] + ] + + for cmd in commands: + try: + print(f"Running: {' '.join(cmd)}") + result = subprocess.run(cmd, check=True, capture_output=True, text=True) + print(result.stdout) + except subprocess.CalledProcessError as e: + print(f"āŒ Error running command: {' '.join(cmd)}") + print(e.stderr) + # Don't break on commit error as it might be empty + if "nothing to commit" in e.stderr: + continue + # For other errors we might want to continue or stop, but let's try to proceed + print("āœ… Deployment script finished.") + +if __name__ == "__main__": + print("šŸ”§ Restoring Full Brain...") + restore_main() + update_dashboard() + deploy() diff --git a/results/merch_batch_Coffee_20251214_052033.csv b/results/merch_batch_Coffee_20251214_052033.csv new file mode 100644 index 0000000000000000000000000000000000000000..c1f0a80e0566c43a7d8cb00773880893eb903857 --- /dev/null +++ b/results/merch_batch_Coffee_20251214_052033.csv @@ -0,0 +1,6 @@ +Niche,Slogan,Art Prompt +Coffee,Espresso Patronum.,"Neo-traditional digital illustration: Coffee portafilter emitting a glowing blue steam Patronus. Rich brown, teal, and gold colors. Centered, detailed T-shirt graphic. (23 words)" +Coffee,This Is My Resting Coffee Face.,"**Retro 1970s cartoon T-shirt design, centered.** Grumpy, steaming mug face integrated with bold text. Colors: Espresso, Cream, Burnt Orange. **Vector.**" +Coffee,My Blood Type Is Dark Roast.,"Retro screen print vector. Dripping anatomical heart as a coffee bean. Deep espresso, charcoal, and cream colors. Bold, centered T-shirt design." +Coffee,Humaning Is Hard.,Error generating prompt +Coffee,Powered By Anxiety & Arabica.,Error generating prompt diff --git a/results/merch_batch_Coffee_20251214_052441.csv b/results/merch_batch_Coffee_20251214_052441.csv new file mode 100644 index 0000000000000000000000000000000000000000..7f8381c28fd2d2bda077ce79ead91b3ca8e5f631 --- /dev/null +++ b/results/merch_batch_Coffee_20251214_052441.csv @@ -0,0 +1,6 @@ +Niche,Slogan,Art Prompt +Coffee,Thou Shalt Not Decaf.,"High-contrast woodcut engraving of sacred coffee tablets. Espresso, cream, gold palette. Centered, dramatic lighting. Vector-ready." +Coffee,Pre-Coffee: Danger Zone.,"Mid-Century modern warning sign, centered. Bold text on distressed yellow. Black/crimson palette. Vector, high contrast, coffee bomb symbol." +Coffee,My Blood Type Is Coffee.,"Vintage distressed screen print. IV blood bag filled with coffee, centered. Espresso, cream, and black palette. Typography reads: ""My Blood Type Is Coffee.""" +Coffee,"I'm Not Addicted, I'm Committed.","Retro-vector graphic: Coffee cup with stylized steam waves. Burnt orange, espresso, and cream palette. Centered, bold T-shirt design." +Coffee,Warning: May Talk About Coffee Too Much.,Error generating prompt diff --git a/results/merch_batch_Coffee_20251214_052609.csv b/results/merch_batch_Coffee_20251214_052609.csv new file mode 100644 index 0000000000000000000000000000000000000000..8fcc4073e7a26390e633aa14dadabb47b3061f78 --- /dev/null +++ b/results/merch_batch_Coffee_20251214_052609.csv @@ -0,0 +1,6 @@ +Niche,Slogan,Art Prompt +Coffee,Must Caffeinate to Human.,"Vintage tattoo flash style. Skull morphing into steaming coffee cup, centered. Warm brown, cream, black palette. T-shirt ready graphic." +Coffee,My Blood Type Is Arabica.,Error generating prompt +Coffee,Decaf Is A Conspiracy.,Error generating prompt +Coffee,Powered by Espresso and Anxiety.,Error generating prompt +Coffee,I'll Sleep When The Coffee Runs Out.,Error generating prompt diff --git a/results/merch_batch_Coffee_20251214_052808.csv b/results/merch_batch_Coffee_20251214_052808.csv new file mode 100644 index 0000000000000000000000000000000000000000..4d2451d6353381da86a9c628f569dd0c13971e1b --- /dev/null +++ b/results/merch_batch_Coffee_20251214_052808.csv @@ -0,0 +1,4 @@ +Niche,Slogan,Art Prompt +Coffee,Retro Cat Mom,Error generating prompt +Coffee,Pixel Art Kitty,Error generating prompt +Coffee,Cattitude,Error generating prompt diff --git a/scan_vision_models.py b/scan_vision_models.py new file mode 100644 index 0000000000000000000000000000000000000000..790809b19de08f78ff5c11be4e162a36aa33c9d9 --- /dev/null +++ b/scan_vision_models.py @@ -0,0 +1,44 @@ +import os +import requests +import json +from dotenv import load_dotenv + +load_dotenv() + +api_key = os.getenv("HF_TOKEN") +headers = {"Authorization": f"Bearer {api_key}"} + +candidates = [ + "HuggingFaceM4/idefics2-8b", + "HuggingFaceM4/idefics2-8b-chatty", + "llava-hf/llava-1.5-7b-hf", + "llava-hf/llava-v1.6-mistral-7b-hf", + "microsoft/Phi-3-vision-128k-instruct", + "NousResearch/Nous-Hermes-2-Vision-Alpha", + "OpenGVLab/InternVL-Chat-V1-5", + "Qwen/Qwen2.5-VL-7B-Instruct", + "google/paligemma-3b-mix-224" +] + +print("Scanning for working Serverless Vision Models...\n") + +for model in candidates: + url = f"https://router.huggingface.co/models/{model}" + print(f"Testing: {model}") + try: + # Simple probe payload + response = requests.post(url, headers=headers, json={"inputs": "Hello"}) + if response.status_code == 200: + print(f"āœ… WORKS! {model} (Status: 200)") + print(f"Response: {response.text[:100]}...") + elif response.status_code == 400: + # 400 might mean it Exists but input format is wrong (which is good!) + print(f"āš ļø EXISTS but 400 (Bad Request): {model}") + print(f"Response: {response.text[:100]}...") + elif response.status_code == 404: + print(f"āŒ 404 Not Found: {model}") + else: + print(f"āŒ Error {response.status_code}: {model}") + except Exception as e: + print(f"āŒ Exception: {e}") + print("-" * 30) diff --git a/setup_dashboard.py b/setup_dashboard.py new file mode 100644 index 0000000000000000000000000000000000000000..6a68ed5ce19312847ef5b96a581f040f14e3ea3d --- /dev/null +++ b/setup_dashboard.py @@ -0,0 +1,140 @@ +import re +import os + +def automate_dashboard_setup(): + input_filename = "code.html" + output_filename = "dashboard.html" + + # 1. Check if the source file exists + if not os.path.exists(input_filename): + print(f"āŒ Error: '{input_filename}' not found. Please save your Stitch UI code as '{input_filename}' first.") + return + + print(f"Reading {input_filename}...") + with open(input_filename, "r", encoding="utf-8") as f: + html = f.read() + + # --- Step 1: Inject IDs into the HTML elements --- + + print("Injecting IDs for interactivity...") + + # Inject ID for the Drop Zone & Hidden Input + # We look for the dashed border div that acts as the drop zone + if 'border-dashed' in html: + html = re.sub( + r'(]*border-dashed[^>]*>)', + r'\1\n', + html, count=1 + ) + html = html.replace('border-dashed', 'id="dropZone" border-dashed') + + # Inject ID for the "Browse Files" button + if 'Browse Files' in html: + html = re.sub(r'(]*>)(\s*Browse Files)', r'