Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files
app.py
CHANGED
|
@@ -1,112 +1,58 @@
|
|
| 1 |
-
from
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
llm =
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
"
|
| 36 |
-
"
|
| 37 |
-
"
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
)
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
qa = ConversationalRetrievalChain.from_llm(
|
| 60 |
-
llm,
|
| 61 |
-
retriever=retriever,
|
| 62 |
-
memory=memory,
|
| 63 |
-
)
|
| 64 |
-
|
| 65 |
-
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 66 |
-
from langchain_core.output_parsers import StrOutputParser
|
| 67 |
-
from langchain_core.runnables import RunnablePassthrough
|
| 68 |
-
|
| 69 |
-
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
| 70 |
-
which might reference context in the chat history, formulate a standalone question \
|
| 71 |
-
which can be understood without the chat history. Do NOT answer the question, \
|
| 72 |
-
just reformulate it if needed and otherwise return it as is."""
|
| 73 |
-
contextualize_q_prompt = ChatPromptTemplate.from_messages(
|
| 74 |
-
[
|
| 75 |
-
("system", contextualize_q_system_prompt),
|
| 76 |
-
MessagesPlaceholder(variable_name="chat_history"),
|
| 77 |
-
("human", "{question}"),
|
| 78 |
-
]
|
| 79 |
-
)
|
| 80 |
-
contextualize_q_chain = contextualize_q_prompt | llm | StrOutputParser()
|
| 81 |
-
|
| 82 |
-
def contextualized_question(input: dict):
|
| 83 |
-
if input.get("chat_history"):
|
| 84 |
-
return contextualize_q_chain
|
| 85 |
-
else:
|
| 86 |
-
return input["question"]
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
rag_chain = (
|
| 90 |
-
RunnablePassthrough.assign(
|
| 91 |
-
context=contextualized_question | retriever
|
| 92 |
-
)
|
| 93 |
-
| QA_CHAIN_PROMPT
|
| 94 |
-
| llm
|
| 95 |
-
)
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
from langchain_core.messages import AIMessage, HumanMessage
|
| 99 |
-
|
| 100 |
-
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
|
| 101 |
-
os.environ["WANDB_PROJECT"] = "Restaurant_ChatBot"
|
| 102 |
-
|
| 103 |
-
print("Welcome to the Restaurant. How can I help you today?")
|
| 104 |
-
chat_history = []
|
| 105 |
-
|
| 106 |
-
def predict(message):
|
| 107 |
-
ai_msg = rag_chain.invoke({"question": message, "chat_history": chat_history})
|
| 108 |
-
idx = ai_msg.find("Answer")
|
| 109 |
-
chat_history.extend([HumanMessage(content=message), ai_msg])
|
| 110 |
-
|
| 111 |
-
return ai_msg[idx:]
|
| 112 |
-
|
|
|
|
| 1 |
+
from langchain_groq import ChatGroq
|
| 2 |
+
|
| 3 |
+
llm = ChatGroq(
|
| 4 |
+
temperature=0,
|
| 5 |
+
groq_api_key = "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",
|
| 6 |
+
model_name="llama3-8b-8192"
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
from crewai import Agent, Task, Crew
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
Code_Quality_agent = Agent(
|
| 13 |
+
role="Senior Software Engineer",
|
| 14 |
+
goal="Provide the best support quality assurance to the code written by the member in your team",
|
| 15 |
+
backstory="You work in a financial organization. The Goal is to identify bugs,"
|
| 16 |
+
" security should be a top concern. Look for common Look for common"
|
| 17 |
+
" vulnerabilities like SQL injection, cross-site scripting (XSS), "
|
| 18 |
+
"and insecure data handling practices.Ensure the code adheres to"
|
| 19 |
+
" secure coding standards established by the organization or "
|
| 20 |
+
"industry.Scrutinize how the code validates user input to prevent"
|
| 21 |
+
" malicious attacks.For code involving financial calculations"
|
| 22 |
+
" (e.g., interest rates, risk assessments), double-check the formulas"
|
| 23 |
+
" and logic for accuracy. Consider edge cases and ensure the code behave"
|
| 24 |
+
"s as intended under various scenarios.Verify that the code maintains data"
|
| 25 |
+
" integrity throughout processing. This includes checking for potential"
|
| 26 |
+
" data loss, corruption, or unauthorized access.Ensure the code complies"
|
| 27 |
+
" with relevant industry standards and regulations.",
|
| 28 |
+
verbose=True,
|
| 29 |
+
allow_delegation=False,
|
| 30 |
+
llm = llm
|
| 31 |
+
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
Code_Review = Task(expected_output=(
|
| 35 |
+
"You would be given code as an input for the code review {code}. "
|
| 36 |
+
"Make sure to use everything you know to provide the best support possible."
|
| 37 |
+
"Provide clear and actionable feedback to the code author.Maintain a collaborative and respectful tone throughout the review process.Ensure the code complies with relevant industry standards and regulations."
|
| 38 |
+
),
|
| 39 |
+
description=(
|
| 40 |
+
"You would be given a code as an input for the code review {code}."
|
| 41 |
+
"Make sure to use everything you know to provide the best support possible."
|
| 42 |
+
"You must strive to provide a complete and accurate response."
|
| 43 |
+
),
|
| 44 |
+
llm = llm,
|
| 45 |
+
agent= Code_Quality_agent,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
crew = Crew(
|
| 49 |
+
agents=[Code_Quality_agent],
|
| 50 |
+
tasks=[Code_Review],
|
| 51 |
+
verbose=2,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def predict(code):
|
| 55 |
+
input = {code:code}
|
| 56 |
+
result = crew.kickoff(inputs=inputs)
|
| 57 |
+
return result
|
| 58 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main.py
CHANGED
|
@@ -10,8 +10,8 @@ app = FastAPI()
|
|
| 10 |
|
| 11 |
@app.get("/")
|
| 12 |
async def root():
|
| 13 |
-
return {"
|
| 14 |
|
| 15 |
-
@app.post("/
|
| 16 |
-
def
|
| 17 |
return predict(input_json)
|
|
|
|
| 10 |
|
| 11 |
@app.get("/")
|
| 12 |
async def root():
|
| 13 |
+
return {"Code Review Automation":"Version 1.0 'First Draft'"}
|
| 14 |
|
| 15 |
+
@app.post("/AutomateReview/")
|
| 16 |
+
def predict(input_json: str):
|
| 17 |
return predict(input_json)
|