SiennaClarke commited on
Commit
3b01c61
·
verified ·
1 Parent(s): 5097bbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -81
app.py CHANGED
@@ -1,95 +1,63 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer
3
  from optimum.intel import OVModelForCausalLM
4
- import torch
5
- import time
6
  import re
7
 
8
- # --- App Config ---
9
- st.set_page_config(
10
- page_title="Flash Jenkins Validator",
11
- page_icon="⚡",
12
- layout="wide" # Wide layout helps side-by-side comparison
13
- )
14
-
15
  MODEL_ID = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
16
 
17
- @st.cache_resource(show_spinner="Optimizing Engine for CPU...")
18
- def load_model():
 
 
19
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
 
20
  model = OVModelForCausalLM.from_pretrained(
21
- MODEL_ID,
22
- export=True,
23
- compile=True,
24
- load_in_8bit=True,
25
- device="CPU"
26
  )
27
  return tokenizer, model
28
 
29
- tokenizer, model = load_model()
30
-
31
- # --- UI Layout ---
32
- st.title(" Flash Jenkins Validator & Fixer")
33
-
34
- col1, col2 = st.columns(2)
35
-
36
- with col1:
37
- st.subheader("Input")
38
- jenkinsfile = st.text_area(
39
- "Paste Jenkinsfile:",
40
- placeholder="pipeline { ... }",
41
- height=400
42
- )
43
- analyze_btn = st.button("Analyze & Suggest Fix", use_container_width=True)
44
-
45
- # --- Logic ---
46
- if analyze_btn:
47
- if not jenkinsfile.strip():
48
- st.error("Please paste a Jenkinsfile first.")
49
- else:
50
- messages = [
51
- {
52
- "role": "system",
53
- "content": "You are a DevOps expert. Review the Jenkinsfile for errors. ALWAYS provide a corrected version of the full Jenkinsfile inside a ```groovy code block at the end of your response."
54
- },
55
- {"role": "user", "content": f"Fix this Jenkinsfile:\n\n{jenkinsfile}"}
56
- ]
 
 
 
 
 
 
 
57
 
58
- input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
59
- inputs = tokenizer(input_text, return_tensors="pt")
60
-
61
- with st.status("Generating Fix...", expanded=True) as status:
62
- start_time = time.time()
63
-
64
- output_tokens = model.generate(
65
- **inputs,
66
- max_new_tokens=1024, # Increased to ensure full code block fits
67
- do_sample=False,
68
- pad_token_id=tokenizer.eos_token_id
69
- )
70
-
71
- new_tokens = output_tokens[0][len(inputs["input_ids"][0]):]
72
- response = tokenizer.decode(new_tokens, skip_special_tokens=True)
73
-
74
- # --- Feature: Extraction Logic ---
75
- # Search for the code block in the AI response
76
- code_match = re.search(r"```groovy\n([\s\S]*?)```", response)
77
- fixed_code = code_match.group(1) if code_match else None
78
- # Clean up explanation by removing the code block from the markdown view
79
- explanation = re.sub(r"```groovy[\s\S]*?```", "*(See corrected code in the Fix panel)*", response)
80
-
81
- with col2:
82
- st.subheader("Analysis & Fixed Code")
83
- st.markdown(explanation)
84
-
85
- if fixed_code:
86
- st.success("✅ One-Click Fix Ready:")
87
- st.code(fixed_code, language="groovy")
88
- st.button("Apply Fix to Input", on_click=lambda: st.session_state.update({"input_area": fixed_code}), key="apply_fix")
89
- else:
90
- st.warning("Could not extract a clean code block. Check the explanation.")
91
-
92
- status.update(label=f"Done in {time.time() - start_time:.2f}s", state="complete")
93
 
94
- st.divider()
95
- st.caption(f"Engine: {MODEL_ID} | CPU Optimized")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer
3
  from optimum.intel import OVModelForCausalLM
 
 
4
  import re
5
 
6
+ # --- Setup ---
 
 
 
 
 
 
7
  MODEL_ID = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
8
 
9
+ st.set_page_config(page_title="Code Validator & Fixer", layout="wide")
10
+
11
+ @st.cache_resource
12
+ def load_engine():
13
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
14
+ # export=True will convert the model to OpenVINO IR on first run
15
  model = OVModelForCausalLM.from_pretrained(
16
+ MODEL_ID,
17
+ export=True,
18
+ device="CPU",
19
+ load_in_8bit=True
 
20
  )
21
  return tokenizer, model
22
 
23
+ tokenizer, model = load_engine()
24
+
25
+ # --- UI ---
26
+ st.title("🏗️ Smart Code Validator")
27
+ st.write("Analyzes and fixes your code using Qwen2.5-Coder (CPU-Optimized)")
28
+
29
+ col_input, col_output = st.columns(2)
30
+
31
+ with col_input:
32
+ user_code = st.text_area("Paste your code here:", height=400, placeholder="pipeline { ... }")
33
+ run_btn = st.button("Validate & Fix", use_container_width=True)
34
+
35
+ if run_btn and user_code:
36
+ # 1. Prepare Request
37
+ messages = [
38
+ {"role": "system", "content": "You are a senior developer. First, explain errors briefly. Second, provide the full corrected code inside a ```groovy block."},
39
+ {"role": "user", "content": f"Review and fix this code:\n{user_code}"}
40
+ ]
41
+
42
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
43
+ inputs = tokenizer(prompt, return_tensors="pt")
44
+
45
+ # 2. Generate
46
+ with st.spinner("Analyzing on CPU..."):
47
+ tokens = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
48
+ response = tokenizer.decode(tokens[0][len(inputs[0]):], skip_special_tokens=True)
49
+
50
+ # 3. Extract Fixed Code
51
+ code_match = re.search(r"```(?:\w+)?\n([\s\S]*?)```", response)
52
+ fixed_code = code_match.group(1) if code_match else None
53
+ explanation = re.sub(r"```[\s\S]*?```", "", response).strip()
54
+
55
+ with col_output:
56
+ st.subheader("Analysis")
57
+ st.info(explanation if explanation else "Code looks good! Here is the optimized version:")
58
 
59
+ if fixed_code:
60
+ st.subheader("Corrected Code")
61
+ st.code(fixed_code)
62
+ st.download_button("Download Fixed File", fixed_code, file_name="fixed_code.groovy")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63