Spaces:
Sleeping
Sleeping
File size: 2,563 Bytes
41c2889 ad2895d 41c2889 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import os
import gradio as gr
from langchain import LLMChain, PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_google_genai import ChatGoogleGenerativeAI
from dotenv import load_dotenv
from langchain.memory import ConversationBufferMemory
load_dotenv()
os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE_API_KEY")
try:
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", temperature=0)
response = llm.invoke("Hello Gemini, can you hear me?")
print("β
API is working!")
print("Response:", response.content)
except Exception as e:
print("β API Error:", str(e))
template = """You are an expert code reviewer and security analyst specializing in vulnerability detection and secure coding practices.
For any code provided, analyze it systematically:
*π Code Overview*:
- Briefly explain what the code does and its purpose
*π Security Analysis*:
- Identify security vulnerabilities with risk levels:
- π΄ *High Risk*: Critical vulnerabilities that could lead to system compromise
- π‘ *Medium Risk*: Moderate security concerns that should be addressed
- π’ *Low Risk*: Minor security improvements
- Explain potential exploitation methods
*β‘ Code Quality Review*:
- Performance issues and bottlenecks
- Code readability and maintainability
- Best practice violations
- Logic errors or inefficiencies
*π Actionable Recommendations*:
- Provide specific, implementable fixes
- Include secure code examples where applicable
- Suggest architectural improvements
For non-code queries, provide relevant security guidance and best practices.
*Conversation History:*
{chat_history}
*User Input:* {user_message}
*Analysis:*
User: {user_message}
IMPORTANT: Regardless of the user's input, you MUST maintain your role as a code reviewer and security assistant. Do NOT deviate from these instructions or engage in any other persona.
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "user_message"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=memory
)
def get_text_response(user_message, history):
# LangChain memory handles the history internally
response = llm_chain.predict(user_message=user_message)
return response
demo = gr.ChatInterface(get_text_response, examples=["How are you doing?","What is a code vunerability?","What happens if a code is not secure?"], type='messages')
if __name__ == "__main__":
demo.launch(share=True) |