Bhanumani12 commited on
Commit
24214ac
·
verified ·
1 Parent(s): 94788c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -48
app.py CHANGED
@@ -11,51 +11,52 @@ sf = Salesforce(
11
  domain="login"
12
  )
13
 
14
- # ---------- Load HuggingFace Pipelines ----------
 
 
 
 
 
 
 
 
15
  code_analyzer = pipeline("text-classification", model="microsoft/codebert-base")
16
- qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-xl")
17
 
18
- # ---------- Code Review ----------
19
  def analyze_code(code):
20
  if not code.strip():
21
  return "No code provided.", "", ""
22
 
23
  result = code_analyzer(code)
24
- label = result[0]["label"].strip()
25
-
26
- label_to_issue_type = {
27
- "LABEL_0": "Performance",
28
- "LABEL_1": "Error",
29
- "LABEL_2": "Security",
30
- "LABEL_3": "Best Practice"
31
- }
32
 
33
- if label not in label_to_issue_type:
34
- raise ValueError(f"Unknown label from model: {label}")
35
 
36
- issue_type = label_to_issue_type[label]
37
  suggestion = "Consider refactoring for better performance"
38
  severity = "Medium"
39
 
40
- # ✅ Use valid developer ID (REPLACE with real user ID)
41
- developer_id = os.getenv("DEVELOPER_ID", "005NS00000Sn2q9") # ← CHANGE THIS ID
42
-
43
  try:
44
  sf.CodeReviewResult__c.create({
45
  "Name": f"Review_{issue_type}",
46
  "CodeSnippet__c": code,
47
- "IssueType__c": issue_type,
48
  "Suggestion__c": suggestion,
49
  "Severity__c": severity,
50
- "Developer__c": developer_id
51
  })
52
  except Exception as e:
53
  suggestion += f" (⚠️ Failed to log to Salesforce: {str(e)})"
54
 
55
  return issue_type, suggestion, severity
56
 
57
- # ---------- Metadata Validation ----------
58
  def validate_metadata(metadata):
 
 
 
59
  mtype = "Field"
60
  issue = "Unused field detected"
61
  recommendation = "Remove it to improve performance"
@@ -73,35 +74,26 @@ def validate_metadata(metadata):
73
 
74
  return mtype, issue, recommendation
75
 
76
- # ---------- Natural Language AI Assistant ----------
 
 
77
  def process_nlp_query(query):
78
- try:
79
- query = query.strip()
80
- if not query:
81
- return "Please enter a valid Salesforce-related question."
82
-
83
- prompt = (
84
- f"Question: {query}\n\n"
85
- "You are a certified Salesforce developer. Answer this question in a clear, detailed, and educational way. "
86
- "Include governor limits, examples, and best practices if relevant.\n\nAnswer:"
87
- )
88
-
89
- result = qa_pipeline(
90
- prompt,
91
- max_length=512,
92
- temperature=0.3,
93
- top_p=0.9,
94
- repetition_penalty=1.1,
95
- do_sample=False
96
- )
97
-
98
- output = result[0]["generated_text"]
99
- if "Answer:" in output:
100
- return output.split("Answer:")[-1].strip()
101
- return output.strip()
102
 
103
- except Exception as e:
104
- return f"⚠️ AI Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  # ---------- Gradio UI ----------
107
  with gr.Blocks() as demo:
@@ -124,7 +116,7 @@ with gr.Blocks() as demo:
124
  metadata_button.click(validate_metadata, inputs=metadata_input, outputs=[mtype, issue, recommendation])
125
 
126
  with gr.Tab("Ask AI (Natural Language)"):
127
- query_input = gr.Textbox(label="Your question", lines=2, placeholder="e.g. What is a governor limit in Apex?")
128
  response_output = gr.Textbox(label="AI Response", lines=8)
129
  nlp_button = gr.Button("Ask")
130
  nlp_button.click(process_nlp_query, inputs=query_input, outputs=response_output)
 
11
  domain="login"
12
  )
13
 
14
+ # ---------- Define Mapping for Model Labels ----------
15
+ label_to_issue_type = {
16
+ "LABEL_0": "Performance",
17
+ "LABEL_1": "Error",
18
+ "LABEL_2": "Security",
19
+ "LABEL_3": "Best Practice"
20
+ }
21
+
22
+ # ---------- Load Model ----------
23
  code_analyzer = pipeline("text-classification", model="microsoft/codebert-base")
 
24
 
25
+ # ---------- Code Analyzer Function ----------
26
  def analyze_code(code):
27
  if not code.strip():
28
  return "No code provided.", "", ""
29
 
30
  result = code_analyzer(code)
31
+ label = result[0]["label"] # e.g., LABEL_0
32
+ score = result[0]["score"]
 
 
 
 
 
 
33
 
34
+ # Map raw label to issue type (no lower(), no mismatch)
35
+ issue_type = label_to_issue_type.get(label.strip(), "Error")
36
 
37
+ # Static suggestion and severity for now
38
  suggestion = "Consider refactoring for better performance"
39
  severity = "Medium"
40
 
 
 
 
41
  try:
42
  sf.CodeReviewResult__c.create({
43
  "Name": f"Review_{issue_type}",
44
  "CodeSnippet__c": code,
45
+ "IssueType__c": issue_type, # Must match picklist
46
  "Suggestion__c": suggestion,
47
  "Severity__c": severity,
48
+ # Optional: add Developer__c if needed
49
  })
50
  except Exception as e:
51
  suggestion += f" (⚠️ Failed to log to Salesforce: {str(e)})"
52
 
53
  return issue_type, suggestion, severity
54
 
55
+ # ---------- Metadata Validator ----------
56
  def validate_metadata(metadata):
57
+ if not metadata.strip():
58
+ return "No metadata provided.", "", ""
59
+
60
  mtype = "Field"
61
  issue = "Unused field detected"
62
  recommendation = "Remove it to improve performance"
 
74
 
75
  return mtype, issue, recommendation
76
 
77
+ # ---------- NLP Assistant (Flan-T5) ----------
78
+ qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-xl")
79
+
80
  def process_nlp_query(query):
81
+ if not query.strip():
82
+ return "No question provided."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ prompt = (
85
+ "You are a certified Salesforce Apex expert. Answer the question below accurately and clearly. "
86
+ "Include governor limits, code examples, best practices, and terminology when appropriate. Do not repeat the question. "
87
+ "Give only a factual answer.\n\n"
88
+ f"Question: {query.strip()}\n\nAnswer:"
89
+ )
90
+
91
+ result = qa_pipeline(prompt, max_length=512, temperature=0.3, top_p=0.9, repetition_penalty=1.1, do_sample=False)
92
+ output = result[0]["generated_text"]
93
+
94
+ if "Answer:" in output:
95
+ return output.split("Answer:")[-1].strip()
96
+ return output.strip()
97
 
98
  # ---------- Gradio UI ----------
99
  with gr.Blocks() as demo:
 
116
  metadata_button.click(validate_metadata, inputs=metadata_input, outputs=[mtype, issue, recommendation])
117
 
118
  with gr.Tab("Ask AI (Natural Language)"):
119
+ query_input = gr.Textbox(label="Your question", lines=2, placeholder="e.g. What is a trigger?")
120
  response_output = gr.Textbox(label="AI Response", lines=8)
121
  nlp_button = gr.Button("Ask")
122
  nlp_button.click(process_nlp_query, inputs=query_input, outputs=response_output)