import os import logging import gradio as gr from langchain import LLMChain, PromptTemplate from langchain.memory import ConversationBufferMemory from langchain_google_genai import ChatGoogleGenerativeAI # Setup logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) def load_api_key(): """Load API key from Hugging Face Spaces secrets""" # In Hugging Face Spaces, use secrets instead of .env files api_key = os.getenv("GOOGLE_API_KEY") if not api_key: raise ValueError(""" GOOGLE_API_KEY not found in environment variables. To fix this in Hugging Face Spaces: 1. Go to your Space settings 2. Click on 'Repository secrets' 3. Add GOOGLE_API_KEY with your Google API key value 4. Restart the Space """) return api_key def initialize_llm(): """Initialize the LLM with proper error handling""" try: api_key = load_api_key() os.environ["GOOGLE_API_KEY"] = api_key llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", temperature=0, max_tokens=2048 ) # Test the connection response = llm.invoke("Test connection - respond with 'OK'") logger.info("✅ API connection successful!") logger.info(f"Response: {response.content}") return llm except Exception as e: logger.error(f"❌ API Error: {e}") # Return a mock LLM for demo purposes if API fails return None # Enhanced prompt template template = """You are an expert code reviewer and security analyst specializing in vulnerability detection and secure coding practices. For any code provided, analyze it systematically: **📋 Code Overview**: - Briefly explain what the code does and its purpose **🔒 Security Analysis**: - Identify security vulnerabilities with risk levels: - 🔴 **High Risk**: Critical vulnerabilities that could lead to system compromise - 🟡 **Medium Risk**: Moderate security concerns that should be addressed - 🟢 **Low Risk**: Minor security improvements - Explain potential exploitation methods **⚡ Code Quality Review**: - Performance issues and bottlenecks - Code readability and maintainability - Best practice violations - Logic errors or inefficiencies **🛠️ Actionable Recommendations**: - Provide specific, implementable fixes - Include secure code examples where applicable - Suggest architectural improvements For non-code queries, provide relevant security guidance and best practices. **Conversation History:** {chat_history} **User Input:** {user_message} **Analysis:**""" def create_llm_chain(): """Create the LLM chain with memory""" try: llm = initialize_llm() if llm is None: return None prompt = PromptTemplate( input_variables=["chat_history", "user_message"], template=template ) memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True ) return LLMChain( llm=llm, prompt=prompt, memory=memory ) except Exception as e: logger.error(f"Failed to create LLM chain: {e}") return None def get_text_response(user_message, history): """Generate response with proper error handling""" try: # Check if LLM chain is available if llm_chain is None: return """ 🚫 **API Configuration Error** The Google Gemini API is not properly configured. To use this Space: 1. **Fork this Space** to your own Hugging Face account 2. Go to **Settings** → **Repository secrets** 3. Add `GOOGLE_API_KEY` with your Google AI Studio API key 4. Get your API key from: https://makersuite.google.com/app/apikey 5. **Restart the Space** This is a demo of a code security analyzer that would normally use Google's Gemini AI. """ # Validate input if not user_message or not user_message.strip(): return "⚠️ Please provide code to analyze or ask a security-related question." # Check for potentially sensitive information sensitive_keywords = ['password', 'api_key', 'secret', 'token'] if any(keyword in user_message.lower() for keyword in sensitive_keywords): logger.warning("User input contains potentially sensitive information") response = llm_chain.predict(user_message=user_message.strip()) return response except Exception as e: logger.error(f"Error generating response: {e}") return f""" 🚫 **Error Analysis** I encountered an error while analyzing your request: {str(e)} **Possible solutions:** 1. Check if your Google API key is valid 2. Ensure you have credits remaining in your Google AI account 3. Try again with a shorter input 4. Contact the Space owner if the issue persists """ def create_interface(): """Create the Gradio interface optimized for Hugging Face""" examples = [ "Review this SQL query for injection vulnerabilities: SELECT * FROM users WHERE id = '" + "user_input" + "'", "Analyze this Python authentication function:\n```python\ndef login(username, password):\n if username == 'admin' and password == 'password123':\n return True\n return False\n```", "What are the OWASP Top 10 web application security risks?", "How can I securely store passwords in my application?", "Check this JavaScript for XSS vulnerabilities: document.innerHTML = userInput" ] # Custom CSS for better appearance on HF custom_css = """ .gradio-container { max-width: 1200px !important; } .message-row { justify-content: space-between !important; } footer { visibility: hidden; } """ interface = gr.ChatInterface( get_text_response, examples=examples, title="🔒 Code Security Analyzer & Vulnerability Scanner", description=""" **Professional code security analysis powered by Google Gemini AI** ✅ **Features:** - 🔍 Vulnerability detection with risk assessment - 📊 Code quality review and best practices analysis - 🛡️ Secure coding recommendations - 🌐 Multi-language support (Python, JavaScript, Java, C++, etc.) - 📚 OWASP compliance guidance ⚠️ **Security Notice:** Do not submit production secrets, passwords, or sensitive data. --- **🚀 To use this Space:** 1. Fork this Space to your account 2. Add your Google AI Studio API key in Settings → Repository secrets 3. Set the secret name as `GOOGLE_API_KEY` 4. Get your API key: https://makersuite.google.com/app/apikey """, type='messages', theme=gr.themes.Soft( primary_hue="blue", secondary_hue="gray", font=gr.themes.GoogleFont("Inter") ), css=custom_css, analytics_enabled=False, # Disable analytics for HF Spaces cache_examples=False # Disable caching for better performance ) return interface # Initialize the LLM chain llm_chain = None try: llm_chain = create_llm_chain() if llm_chain: logger.info("🚀 Code Security Analyzer initialized successfully!") else: logger.warning("⚠️ Running in demo mode - API not configured") except Exception as e: logger.error(f"Failed to initialize application: {e}") # Create and launch the interface if __name__ == "__main__": try: demo = create_interface() demo.launch( show_error=True, share=False, # Set to False for HF Spaces enable_queue=True, # Enable queue for better performance max_threads=10 # Limit concurrent users ) except Exception as e: logger.error(f"Failed to launch application: {e}") # Still try to launch a basic interface def error_interface(message, history): return f"Application failed to initialize: {str(e)}" gr.ChatInterface().launch()