SiennaClarke commited on
Commit
82d6de9
·
verified ·
1 Parent(s): 61fe479

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -46
app.py CHANGED
@@ -1,63 +1,87 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer
3
- from optimum.intel import OVModelForCausalLM
4
- import re
5
 
6
- # --- Setup ---
7
- MODEL_ID = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
 
8
 
9
- st.set_page_config(page_title="Code Validator & Fixer", layout="wide")
10
 
 
11
  @st.cache_resource
12
- def load_engine():
13
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
14
- # export=True will convert the model to OpenVINO IR on first run
15
- model = OVModelForCausalLM.from_pretrained(
16
- MODEL_ID,
17
- export=True,
18
- device="CPU",
19
- load_in_8bit=True
20
- )
21
  return tokenizer, model
22
 
23
- tokenizer, model = load_engine()
24
 
25
- # --- UI ---
26
- st.title("🏗️ Smart Code Validator")
27
- st.write("Analyzes and fixes your code using Qwen2.5-Coder (CPU-Optimized)")
28
 
29
- col_input, col_output = st.columns(2)
 
 
 
 
 
 
 
 
 
 
30
 
31
- with col_input:
32
- user_code = st.text_area("Paste your code here:", height=400, placeholder="pipeline { ... }")
33
- run_btn = st.button("Validate & Fix", use_container_width=True)
 
 
 
 
 
34
 
35
- if run_btn and user_code:
36
- # 1. Prepare Request
37
- messages = [
38
- {"role": "system", "content": "You are a senior developer. First, explain errors briefly. Second, provide the full corrected code inside a ```groovy block."},
39
- {"role": "user", "content": f"Review and fix this code:\n{user_code}"}
40
- ]
41
 
42
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
43
- inputs = tokenizer(prompt, return_tensors="pt")
 
 
 
 
 
 
 
 
44
 
45
- # 2. Generate
46
- with st.spinner("Analyzing on CPU..."):
47
- tokens = model.generate(**inputs, max_new_tokens=1024, do_sample=False)
48
- response = tokenizer.decode(tokens[0][len(inputs[0]):], skip_special_tokens=True)
49
 
50
- # 3. Extract Fixed Code
51
- code_match = re.search(r"```(?:\w+)?\n([\s\S]*?)```", response)
52
- fixed_code = code_match.group(1) if code_match else None
53
- explanation = re.sub(r"```[\s\S]*?```", "", response).strip()
54
-
55
- with col_output:
56
- st.subheader("Analysis")
57
- st.info(explanation if explanation else "Code looks good! Here is the optimized version:")
 
 
 
 
58
 
59
- if fixed_code:
60
- st.subheader("Corrected Code")
61
- st.code(fixed_code)
62
- st.download_button("Download Fixed File", fixed_code, file_name="fixed_code.groovy")
63
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
 
4
 
5
+ # --- CONFIGURATION ---
6
+ MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
7
+ DEVICE = "cpu"
8
 
9
+ st.set_page_config(page_title="Jenkins Pipeline Architect", layout="wide")
10
 
11
+ # --- MODEL LOADING (Cached for Speed) ---
12
  @st.cache_resource
13
+ def load_model():
14
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
15
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID).to(DEVICE)
 
 
 
 
 
 
16
  return tokenizer, model
17
 
18
+ tokenizer, model = load_model()
19
 
20
+ # --- UI LOGIC ---
21
+ st.title("🚀 Jenkins Pipeline Architect")
22
+ st.caption("Powered by SmolLM2-135M (CPU Optimized)")
23
 
24
+ with st.sidebar:
25
+ st.header("Pipeline Configuration")
26
+ project_type = st.selectbox("Project Type", ["Node.js", "Python", "Java/Maven", "Docker"])
27
+ agent_type = st.text_input("Agent Label", value="any")
28
+
29
+ st.subheader("Stages")
30
+ do_test = st.checkbox("Include Unit Tests", value=True)
31
+ do_lint = st.checkbox("Include Linting", value=False)
32
+ do_deploy = st.checkbox("Include Deployment", value=True)
33
+
34
+ env_vars = st.text_area("Env Vars (Key=Value)", placeholder="DB_HOST=localhost")
35
 
36
+ # --- GENERATION LOGIC ---
37
+ def generate_pipeline(config):
38
+ # Precise system prompt for a small model
39
+ prompt = f"""Generate a Jenkinsfile (Declarative Pipeline) for a {config['type']} project.
40
+ - Agent: {config['agent']}
41
+ - Options: {config['options']}
42
+ - Stages: {config['stages']}
43
+ Return ONLY the Groovy code. No explanations."""
44
 
45
+ messages = [{"role": "user", "content": prompt}]
46
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False)
47
+ inputs = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE)
 
 
 
48
 
49
+ outputs = model.generate(
50
+ inputs,
51
+ max_new_tokens=500,
52
+ temperature=0.1, # Keep it deterministic for code
53
+ do_sample=False
54
+ )
55
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).split("assistant")[-1].strip()
56
+
57
+ # --- MAIN VIEW ---
58
+ col1, col2 = st.columns([1, 1])
59
 
60
+ with col1:
61
+ st.markdown("### Preview Requirements")
62
+ st.info(f"Building a pipeline for **{project_type}** running on **{agent_type}**.")
 
63
 
64
+ if st.button("Generate Jenkinsfile", type="primary"):
65
+ stages = ["Build"]
66
+ if do_test: stages.append("Test")
67
+ if do_lint: stages.append("Lint")
68
+ if do_deploy: stages.append("Deploy")
69
+
70
+ config = {
71
+ "type": project_type,
72
+ "agent": agent_type,
73
+ "stages": ", ".join(stages),
74
+ "options": env_vars
75
+ }
76
 
77
+ with st.spinner("Writing Groovy..."):
78
+ pipeline_code = generate_pipeline(config)
79
+ st.session_state.pipeline = pipeline_code
 
80
 
81
+ with col2:
82
+ st.markdown("### Generated Jenkinsfile")
83
+ if "pipeline" in st.session_state:
84
+ st.code(st.session_state.pipeline, language="groovy")
85
+ st.download_button("Download Jenkinsfile", st.session_state.pipeline, file_name="Jenkinsfile")
86
+ else:
87
+ st.warning("Configure settings and click generate.")