Bhanumani12 commited on
Commit
2487f72
·
verified ·
1 Parent(s): 12c092b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -29
app.py CHANGED
@@ -1,6 +1,18 @@
 
1
  import random
 
 
 
2
 
3
- # ---------- Define Mapping for Model Labels ----------
 
 
 
 
 
 
 
 
4
  label_to_issue_type = {
5
  "LABEL_0": "Performance",
6
  "LABEL_1": "Error",
@@ -8,36 +20,33 @@ label_to_issue_type = {
8
  "LABEL_3": "Best Practice"
9
  }
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def analyze_code(code):
12
  if not code.strip():
13
  return "No code provided.", "", ""
14
 
15
- # --- Simulate classification labels for demo/testing ---
16
- simulated_labels = ["LABEL_0", "LABEL_1", "LABEL_2", "LABEL_3"]
17
- label = random.choice(simulated_labels)
18
- issue_type = label_to_issue_type.get(label, "Error")
19
-
20
- # Simulated suggestion/severity based on issue type
21
- suggestions = {
22
- "Performance": "Consider optimizing loops and database access.",
23
- "Error": "Add proper error handling and null checks.",
24
- "Security": "Avoid dynamic SOQL. Use binding variables.",
25
- "Best Practice": "Refactor for readability and use bulk-safe patterns."
26
- }
27
- severities = {
28
- "Performance": "Medium",
29
- "Error": "High",
30
- "Security": "High",
31
- "Best Practice": "Low"
32
- }
33
-
34
- suggestion = suggestions.get(issue_type)
35
- severity = severities.get(issue_type)
36
-
37
- # Optional: Logging for debugging
38
- print(f"[DEBUG] Label: {label} → Issue: {issue_type} | Severity: {severity}")
39
-
40
- # Log to Salesforce
41
  try:
42
  sf.CodeReviewResult__c.create({
43
  "Name": f"Review_{issue_type}",
@@ -45,9 +54,80 @@ def analyze_code(code):
45
  "IssueType__c": issue_type,
46
  "Suggestion__c": suggestion,
47
  "Severity__c": severity
48
- # You can also add Developer__c here if needed
49
  })
50
  except Exception as e:
51
- suggestion += f" (⚠️ Salesforce error: {str(e)})"
52
 
53
  return issue_type, suggestion, severity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import random
3
+ import gradio as gr
4
+ from transformers import pipeline
5
+ from simple_salesforce import Salesforce
6
 
7
+ # ---------- Salesforce Login ----------
8
+ sf = Salesforce(
9
+ username=os.getenv("SF_USERNAME"),
10
+ password=os.getenv("SF_PASSWORD"),
11
+ security_token=os.getenv("SF_SECURITY_TOKEN"),
12
+ domain="login"
13
+ )
14
+
15
+ # ---------- Label Mapping ----------
16
  label_to_issue_type = {
17
  "LABEL_0": "Performance",
18
  "LABEL_1": "Error",
 
20
  "LABEL_3": "Best Practice"
21
  }
22
 
23
+ suggestions = {
24
+ "Performance": "Consider optimizing loops and database access.",
25
+ "Error": "Add proper error handling and null checks.",
26
+ "Security": "Avoid dynamic SOQL. Use binding variables.",
27
+ "Best Practice": "Refactor for readability and use bulk-safe patterns."
28
+ }
29
+ severities = {
30
+ "Performance": "Medium",
31
+ "Error": "High",
32
+ "Security": "High",
33
+ "Best Practice": "Low"
34
+ }
35
+
36
+ # ---------- Load Dummy Classifier & QnA Model ----------
37
+ qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-base")
38
+
39
+ # ---------- Code Analyzer ----------
40
  def analyze_code(code):
41
  if not code.strip():
42
  return "No code provided.", "", ""
43
 
44
+ # Simulate classification
45
+ label = random.choice(list(label_to_issue_type.keys()))
46
+ issue_type = label_to_issue_type[label]
47
+ suggestion = suggestions[issue_type]
48
+ severity = severities[issue_type]
49
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  try:
51
  sf.CodeReviewResult__c.create({
52
  "Name": f"Review_{issue_type}",
 
54
  "IssueType__c": issue_type,
55
  "Suggestion__c": suggestion,
56
  "Severity__c": severity
 
57
  })
58
  except Exception as e:
59
+ suggestion += f" ⚠️ Salesforce logging failed: {str(e)}"
60
 
61
  return issue_type, suggestion, severity
62
+
63
+ # ---------- Metadata Validator ----------
64
+ def validate_metadata(metadata):
65
+ if not metadata.strip():
66
+ return "No metadata provided.", "", ""
67
+
68
+ mtype = "Field"
69
+ issue = "Unused field detected"
70
+ recommendation = "Remove it to improve performance"
71
+
72
+ try:
73
+ sf.MetadataAuditLog__c.create({
74
+ "Name": f"MetadataLog_{mtype}",
75
+ "MetadataType__c": mtype,
76
+ "IssueDescription__c": issue,
77
+ "Recommendation__c": recommendation,
78
+ "Status__c": "Open"
79
+ })
80
+ except Exception as e:
81
+ recommendation += f" ⚠️ Salesforce logging failed: {str(e)}"
82
+
83
+ return mtype, issue, recommendation
84
+
85
+ # ---------- Natural Language Assistant ----------
86
+ def process_nlp_query(query):
87
+ if not query.strip():
88
+ return "No question provided."
89
+
90
+ prompt = (
91
+ "You are a Salesforce Apex expert. "
92
+ "Answer the question clearly and include governor limits, code best practices, and real examples.\n\n"
93
+ f"Question: {query.strip()}\n\nAnswer:"
94
+ )
95
+
96
+ try:
97
+ result = qa_pipeline(prompt, max_new_tokens=256, do_sample=False)
98
+ output = result[0]["generated_text"]
99
+ if "Answer:" in output:
100
+ return output.split("Answer:")[-1].strip()
101
+ return output.strip()
102
+ except Exception as e:
103
+ return f"⚠️ AI Response Error: {str(e)}"
104
+
105
+ # ---------- Gradio UI ----------
106
+ with gr.Blocks() as demo:
107
+ gr.Markdown("# 🤖 Salesforce AI Code Review & Q&A")
108
+
109
+ with gr.Tab("Code Review"):
110
+ code_input = gr.Textbox(label="Apex / LWC Code", lines=8)
111
+ issue_type = gr.Textbox(label="Issue Type")
112
+ suggestion = gr.Textbox(label="AI Suggestion")
113
+ severity = gr.Textbox(label="Severity")
114
+ code_button = gr.Button("Analyze Code")
115
+ code_button.click(analyze_code, inputs=code_input, outputs=[issue_type, suggestion, severity])
116
+
117
+ with gr.Tab("Metadata Validation"):
118
+ metadata_input = gr.Textbox(label="Metadata XML", lines=8)
119
+ mtype = gr.Textbox(label="Type")
120
+ issue = gr.Textbox(label="Issue")
121
+ recommendation = gr.Textbox(label="Recommendation")
122
+ metadata_button = gr.Button("Validate Metadata")
123
+ metadata_button.click(validate_metadata, inputs=metadata_input, outputs=[mtype, issue, recommendation])
124
+
125
+ with gr.Tab("Ask AI (Natural Language)"):
126
+ query_input = gr.Textbox(label="Your question", lines=2, placeholder="e.g. What is a trigger?")
127
+ response_output = gr.Textbox(label="AI Response", lines=8)
128
+ nlp_button = gr.Button("Ask")
129
+ nlp_button.click(process_nlp_query, inputs=query_input, outputs=response_output)
130
+
131
+ # ---------- Start UI ----------
132
+ if __name__ == "__main__":
133
+ demo.launch()