Eric2mangel commited on
Commit
013f0f3
·
verified ·
1 Parent(s): 060c73c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +238 -217
app.py CHANGED
@@ -1,218 +1,239 @@
1
- import streamlit as st
2
- import os
3
- import sys
4
- import subprocess
5
- import json
6
- import pandas as pd
7
- import plotly.express as px
8
- import shutil
9
- import tempfile
10
- from pathlib import Path
11
-
12
- # --- CONFIGURATION ---
13
- st.set_page_config(page_title="Coach Code Python", layout="wide")
14
-
15
- # Dictionnaire pédagogique mis à jour avec les lettres
16
- RUFF_CAT_MAP = {
17
- "F": "Erreurs Logiques (F)",
18
- "E": "Style PEP8 (E)",
19
- "W": "Avertissements (W)",
20
- "I": "Tri des Imports (I)",
21
- "B": "Bugs Potentiels (B)",
22
- "UP": "Modernisation (UP)",
23
- "N": "Nommage (N)",
24
- "D": "Documentation (D)",
25
- "ANN": "Annotations de type (ANN)",
26
- "T": "Tests & Debug (T)",
27
- "A": "Built-ins (A)"
28
- }
29
-
30
- def remove_excessive_blank_lines(code):
31
- """Supprime les lignes vides excessives (plus de 1 ligne vide consécutive)"""
32
- lines = code.split('\n')
33
- result = []
34
- blank_count = 0
35
-
36
- for line in lines:
37
- if line.strip() == '':
38
- blank_count += 1
39
- if blank_count <= 1: # Garde max 1 ligne vide
40
- result.append(line)
41
- else:
42
- blank_count = 0
43
- result.append(line)
44
-
45
- return '\n'.join(result)
46
-
47
-
48
- def run_ruff(paths, fix=False, disable_docs=False, compact=False, unsafe=False):
49
- """Exécute l'analyse et le formatage Ruff"""
50
- ignore_list = []
51
-
52
- if compact:
53
- ignore_list.extend(["E302", "E303", "E305", "E301", "E401", "W391"])
54
-
55
- if disable_docs:
56
- ignore_list.extend(["D100", "D101", "D102", "D103", "D104", "D107"])
57
-
58
- cmd_check = [
59
- sys.executable, "-m", "ruff", "check", *paths,
60
- "--output-format", "json",
61
- "--select", "ALL",
62
- "--isolated", "--no-cache",
63
- ]
64
-
65
- if ignore_list:
66
- cmd_check.extend(["--ignore", ",".join(ignore_list)])
67
-
68
- if fix:
69
- cmd_check.append("--fix")
70
- if unsafe:
71
- cmd_check.append("--unsafe-fixes")
72
-
73
- use_shell = os.name == 'nt'
74
- result = subprocess.run(cmd_check, capture_output=True, text=True, encoding="utf-8", shell=use_shell)
75
-
76
- if fix:
77
- cmd_format = [sys.executable, "-m", "ruff", "format", *paths, "--isolated"]
78
- subprocess.run(cmd_format, capture_output=True, shell=use_shell)
79
-
80
- try:
81
- return json.loads(result.stdout) if result.stdout.strip() else []
82
- except:
83
- return []
84
-
85
- def get_stats(paths):
86
- total_size, total_lines = 0, 0
87
- for p in paths:
88
- path_obj = Path(p)
89
- files = [path_obj] if path_obj.is_file() else path_obj.rglob("*.py")
90
- for f in files:
91
- try:
92
- content = f.read_text(errors='ignore')
93
- total_size += f.stat().st_size
94
- total_lines += len(content.splitlines())
95
- except: continue
96
- return total_size, total_lines
97
-
98
- # --- SIDEBAR ---
99
- with st.sidebar:
100
- st.title("🛡️ Configuration")
101
- mode = st.radio("Source :", ["Fichier unique", "Plusieurs fichiers"], index=0)
102
-
103
- uploaded_files = []
104
- if mode == "Fichier unique":
105
- f = st.file_uploader("Fichier .py", type="py")
106
- if f: uploaded_files = [f]
107
- else:
108
- f_list = st.file_uploader("Sélectionner fichiers", type="py", accept_multiple_files=True)
109
- if f_list: uploaded_files = f_list
110
-
111
- st.subheader("🛠️ Options de correction")
112
- opt_docs = st.checkbox("Désactiver l'obligation des commentaires", value=True)
113
- opt_compact = st.checkbox("Garder le code compact (sans espaces excessifs)", value=True)
114
- opt_unsafe = st.checkbox("Activer les corrections forcées (modernisation)", value=True)
115
-
116
- #st.divider()
117
- btn_analyze = st.button("🚀 Analyser & Corriger", use_container_width=True)
118
-
119
- # --- ZONE CENTRALE ---
120
- st.title("🐍 Coach Code Python")
121
-
122
- if btn_analyze and uploaded_files:
123
- with tempfile.TemporaryDirectory() as temp_dir:
124
- temp_workspace = Path(temp_dir)
125
- work_paths, code_before, code_after = [], "", ""
126
-
127
- try:
128
- for uploaded_file in uploaded_files:
129
- file_path = temp_workspace / uploaded_file.name
130
- file_data = uploaded_file.getvalue().decode("utf-8", errors="ignore")
131
- if mode == "Fichier unique": code_before = file_data
132
- file_path.write_text(file_data, encoding="utf-8")
133
- work_paths.append(str(file_path))
134
-
135
- s_init, l_init = get_stats(work_paths)
136
-
137
- # Analyse initiale
138
- errors = run_ruff(work_paths, fix=False, disable_docs=opt_docs, compact=opt_compact, unsafe=opt_unsafe)
139
-
140
- # Correction
141
- run_ruff(work_paths, fix=True, disable_docs=opt_docs, compact=opt_compact, unsafe=opt_unsafe)
142
-
143
- if opt_compact:
144
- for path in work_paths:
145
- file_path = Path(path)
146
- content = file_path.read_text(encoding="utf-8")
147
- cleaned = remove_excessive_blank_lines(content)
148
- file_path.write_text(cleaned, encoding="utf-8")
149
-
150
- s_after, l_after = get_stats(work_paths)
151
-
152
- if mode == "Fichier unique":
153
- code_after = Path(work_paths[0]).read_text(encoding="utf-8")
154
-
155
- nb_err = len(errors)
156
- score = max(0, min(100, 100 - (nb_err / (l_init if l_init > 0 else 1)) * 100))
157
-
158
- m1, m2, m3, m4 = st.columns(4)
159
- m1.metric("Qualité du Code", f"{score:.1f}/100")
160
- m2.metric("Points corrigés", nb_err)
161
- m3.metric("Lignes modifiées", l_init - l_after)
162
- m4.metric("Gain de poids", f"{s_init - s_after} octets")
163
-
164
- tabs = st.tabs(["📊 Statistiques", "📜 Rapport", "🔍 Comparatif"] if mode == "Fichier unique" else ["📊 Statistiques", "📜 Rapport"])
165
-
166
- with tabs[0]:
167
- if nb_err > 0:
168
- df = pd.DataFrame(errors)
169
- df['Cat_Code'] = df['code'].str[0]
170
- df['Catégorie'] = df['Cat_Code'].map(lambda x: RUFF_CAT_MAP.get(x, f"Autre ({x})"))
171
-
172
- c1, c2 = st.columns(2)
173
- with c1:
174
- counts = df['code'].value_counts().reset_index().sort_values('count', ascending=True)
175
- fig = px.bar(counts, x='count', y='code', orientation='h',
176
- title="Fréquence par code d'erreur",
177
- color='count', color_continuous_scale='Blues')
178
- st.plotly_chart(fig, use_container_width=True)
179
- with c2:
180
- cat_counts = df['Catégorie'].value_counts().reset_index().sort_values('count', ascending=False)
181
- fig2 = px.bar(cat_counts, x='count', y='Catégorie', orientation='h',
182
- title="Problèmes par famille",
183
- color='Catégorie', color_discrete_sequence=px.colors.qualitative.G10)
184
- fig2.update_layout(showlegend=False, yaxis={'categoryorder':'total ascending'})
185
- st.plotly_chart(fig2, use_container_width=True)
186
- else:
187
- st.success("✨ Félicitations ! Ruff n'a trouvé aucune erreur.")
188
-
189
- with tabs[1]:
190
- if nb_err > 0:
191
- # --- TRANSFORMATION DES DONNÉES POUR LE TABLEAU ---
192
- report_data = []
193
- for err in errors:
194
- filename = Path(err['filename']).name
195
- line = err['location']['row']
196
- col = err['location']['column']
197
-
198
- report_data.append({
199
- "Code": err['code'],
200
- "Message": err['message'],
201
- "Localisation": f"{filename} (L:{line}, C:{col})"
202
- })
203
-
204
- st.dataframe(pd.DataFrame(report_data), use_container_width=True)
205
-
206
- if mode == "Fichier unique":
207
- with tabs[2]:
208
- col1, col2 = st.columns(2)
209
- col1.subheader("Version Originale")
210
- col1.code(code_before, language="python")
211
- col2.subheader("Version Corrigée")
212
- col2.code(code_after, language="python")
213
-
214
- except Exception as e:
215
- st.error(f"Erreur : {e}")
216
-
217
- if Path(".ruff_cache").exists():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  shutil.rmtree(".ruff_cache")
 
1
+ import streamlit as st
2
+ import os
3
+ import sys
4
+ import subprocess
5
+ import json
6
+ import pandas as pd
7
+ import plotly.express as px
8
+ import shutil
9
+ import tempfile
10
+ from pathlib import Path
11
+
12
+ # --- CONFIGURATION ---
13
+ st.set_page_config(page_title="Coach Code Python", layout="wide")
14
+
15
+ # Dictionnaire pédagogique mis à jour avec les lettres
16
+ RUFF_CAT_MAP = {
17
+ "F": "Erreurs Logiques (F)",
18
+ "E": "Style PEP8 (E)",
19
+ "W": "Avertissements (W)",
20
+ "I": "Tri des Imports (I)",
21
+ "B": "Bugs Potentiels (B)",
22
+ "UP": "Modernisation (UP)",
23
+ "N": "Nommage (N)",
24
+ "D": "Documentation (D)",
25
+ "ANN": "Annotations de type (ANN)",
26
+ "T": "Tests & Debug (T)",
27
+ "A": "Built-ins (A)"
28
+ }
29
+
30
+ def remove_excessive_blank_lines(code):
31
+ """Supprime les lignes vides excessives (plus de 1 ligne vide consécutive)"""
32
+ lines = code.split('\n')
33
+ result = []
34
+ blank_count = 0
35
+
36
+ for line in lines:
37
+ if line.strip() == '':
38
+ blank_count += 1
39
+ if blank_count <= 1: # Garde max 1 ligne vide
40
+ result.append(line)
41
+ else:
42
+ blank_count = 0
43
+ result.append(line)
44
+
45
+ return '\n'.join(result)
46
+
47
+
48
+ def run_ruff(paths, fix=False, disable_docs=False, compact=False, unsafe=False):
49
+ """Exécute l'analyse et le formatage Ruff"""
50
+ ignore_list = []
51
+
52
+ if compact:
53
+ ignore_list.extend(["E302", "E303", "E305", "E301", "E401", "W391"])
54
+
55
+ if disable_docs:
56
+ ignore_list.extend(["D100", "D101", "D102", "D103", "D104", "D107"])
57
+
58
+ cmd_check = [
59
+ sys.executable, "-m", "ruff", "check", *paths,
60
+ "--output-format", "json",
61
+ "--select", "ALL",
62
+ "--isolated", "--no-cache",
63
+ ]
64
+
65
+ if ignore_list:
66
+ cmd_check.extend(["--ignore", ",".join(ignore_list)])
67
+
68
+ if fix:
69
+ cmd_check.append("--fix")
70
+ if unsafe:
71
+ cmd_check.append("--unsafe-fixes")
72
+
73
+ use_shell = os.name == 'nt'
74
+ result = subprocess.run(cmd_check, capture_output=True, text=True, encoding="utf-8", shell=use_shell)
75
+
76
+ # Debug: afficher stderr si présent
77
+ if result.stderr:
78
+ st.warning(f"Ruff stderr: {result.stderr}")
79
+
80
+ # Debug: afficher le code de retour
81
+ if result.returncode not in [0, 1]: # Ruff retourne 1 quand il y a des erreurs
82
+ st.error(f"Ruff return code: {result.returncode}")
83
+
84
+ if fix:
85
+ cmd_format = [sys.executable, "-m", "ruff", "format", *paths, "--isolated"]
86
+ subprocess.run(cmd_format, capture_output=True, shell=use_shell)
87
+
88
+ try:
89
+ return json.loads(result.stdout) if result.stdout.strip() else []
90
+ except Exception as e:
91
+ st.error(f"Erreur parsing JSON: {e}, stdout: {result.stdout[:200]}")
92
+ return []
93
+
94
+ def get_stats(paths):
95
+ total_size, total_lines = 0, 0
96
+ for p in paths:
97
+ path_obj = Path(p)
98
+ files = [path_obj] if path_obj.is_file() else path_obj.rglob("*.py")
99
+ for f in files:
100
+ try:
101
+ content = f.read_text(errors='ignore')
102
+ total_size += f.stat().st_size
103
+ total_lines += len(content.splitlines())
104
+ except: continue
105
+ return total_size, total_lines
106
+
107
+ # --- SIDEBAR ---
108
+ with st.sidebar:
109
+ st.title("🛡️ Configuration")
110
+ mode = st.radio("Source :", ["Fichier unique", "Plusieurs fichiers"], index=0)
111
+
112
+ uploaded_files = []
113
+ if mode == "Fichier unique":
114
+ f = st.file_uploader("Fichier .py", type="py")
115
+ if f: uploaded_files = [f]
116
+ else:
117
+ f_list = st.file_uploader("Sélectionner fichiers", type="py", accept_multiple_files=True)
118
+ if f_list: uploaded_files = f_list
119
+
120
+ st.subheader("🛠️ Options de correction")
121
+ opt_docs = st.checkbox("Désactiver l'obligation des commentaires", value=True)
122
+ opt_compact = st.checkbox("Garder le code compact (sans espaces excessifs)", value=True)
123
+ opt_unsafe = st.checkbox("Activer les corrections forcées (modernisation)", value=True)
124
+
125
+ #st.divider()
126
+ btn_analyze = st.button("🚀 Analyser & Corriger", use_container_width=True)
127
+
128
+ # --- ZONE CENTRALE ---
129
+ st.title("🐍 Coach Code Python")
130
+
131
+ if btn_analyze and uploaded_files:
132
+ with tempfile.TemporaryDirectory() as temp_dir:
133
+ temp_workspace = Path(temp_dir)
134
+ work_paths, code_before, code_after = [], "", ""
135
+
136
+ try:
137
+ for uploaded_file in uploaded_files:
138
+ file_path = temp_workspace / uploaded_file.name
139
+ file_data = uploaded_file.getvalue().decode("utf-8", errors="ignore")
140
+ if mode == "Fichier unique": code_before = file_data
141
+ file_path.write_text(file_data, encoding="utf-8")
142
+ work_paths.append(str(file_path))
143
+
144
+ s_init, l_init = get_stats(work_paths)
145
+
146
+ # Debug: vérifier que les fichiers existent
147
+ st.info(f"Fichiers créés: {[str(p) for p in work_paths]}")
148
+ for wp in work_paths:
149
+ if Path(wp).exists():
150
+ content = Path(wp).read_text(encoding="utf-8")
151
+ st.info(f"Taille fichier {Path(wp).name}: {len(content)} caractères, {len(content.splitlines())} lignes")
152
+
153
+ # Analyse initiale
154
+ errors = run_ruff(work_paths, fix=False, disable_docs=opt_docs, compact=opt_compact, unsafe=opt_unsafe)
155
+
156
+ # Debug: afficher le nombre d'erreurs trouvées
157
+ st.info(f"Nombre d'erreurs détectées: {len(errors)}")
158
+ if errors:
159
+ st.info(f"Première erreur: {errors[0]}")
160
+
161
+ # Correction
162
+ run_ruff(work_paths, fix=True, disable_docs=opt_docs, compact=opt_compact, unsafe=opt_unsafe)
163
+
164
+ if opt_compact:
165
+ for path in work_paths:
166
+ file_path = Path(path)
167
+ content = file_path.read_text(encoding="utf-8")
168
+ cleaned = remove_excessive_blank_lines(content)
169
+ file_path.write_text(cleaned, encoding="utf-8")
170
+
171
+ s_after, l_after = get_stats(work_paths)
172
+
173
+ if mode == "Fichier unique":
174
+ code_after = Path(work_paths[0]).read_text(encoding="utf-8")
175
+
176
+ nb_err = len(errors)
177
+ score = max(0, min(100, 100 - (nb_err / (l_init if l_init > 0 else 1)) * 100))
178
+
179
+ m1, m2, m3, m4 = st.columns(4)
180
+ m1.metric("Qualité du Code", f"{score:.1f}/100")
181
+ m2.metric("Points corrigés", nb_err)
182
+ m3.metric("Lignes modifiées", l_init - l_after)
183
+ m4.metric("Gain de poids", f"{s_init - s_after} octets")
184
+
185
+ tabs = st.tabs(["📊 Statistiques", "📜 Rapport", "🔍 Comparatif"] if mode == "Fichier unique" else ["📊 Statistiques", "📜 Rapport"])
186
+
187
+ with tabs[0]:
188
+ if nb_err > 0:
189
+ df = pd.DataFrame(errors)
190
+ df['Cat_Code'] = df['code'].str[0]
191
+ df['Catégorie'] = df['Cat_Code'].map(lambda x: RUFF_CAT_MAP.get(x, f"Autre ({x})"))
192
+
193
+ c1, c2 = st.columns(2)
194
+ with c1:
195
+ counts = df['code'].value_counts().reset_index().sort_values('count', ascending=True)
196
+ fig = px.bar(counts, x='count', y='code', orientation='h',
197
+ title="Fréquence par code d'erreur",
198
+ color='count', color_continuous_scale='Blues')
199
+ st.plotly_chart(fig, use_container_width=True)
200
+ with c2:
201
+ cat_counts = df['Catégorie'].value_counts().reset_index().sort_values('count', ascending=False)
202
+ fig2 = px.bar(cat_counts, x='count', y='Catégorie', orientation='h',
203
+ title="Problèmes par famille",
204
+ color='Catégorie', color_discrete_sequence=px.colors.qualitative.G10)
205
+ fig2.update_layout(showlegend=False, yaxis={'categoryorder':'total ascending'})
206
+ st.plotly_chart(fig2, use_container_width=True)
207
+ else:
208
+ st.success("✨ Félicitations ! Ruff n'a trouvé aucune erreur.")
209
+
210
+ with tabs[1]:
211
+ if nb_err > 0:
212
+ # --- TRANSFORMATION DES DONNÉES POUR LE TABLEAU ---
213
+ report_data = []
214
+ for err in errors:
215
+ filename = Path(err['filename']).name
216
+ line = err['location']['row']
217
+ col = err['location']['column']
218
+
219
+ report_data.append({
220
+ "Code": err['code'],
221
+ "Message": err['message'],
222
+ "Localisation": f"{filename} (L:{line}, C:{col})"
223
+ })
224
+
225
+ st.dataframe(pd.DataFrame(report_data), use_container_width=True)
226
+
227
+ if mode == "Fichier unique":
228
+ with tabs[2]:
229
+ col1, col2 = st.columns(2)
230
+ col1.subheader("Version Originale")
231
+ col1.code(code_before, language="python")
232
+ col2.subheader("Version Corrigée")
233
+ col2.code(code_after, language="python")
234
+
235
+ except Exception as e:
236
+ st.error(f"Erreur : {e}")
237
+
238
+ if Path(".ruff_cache").exists():
239
  shutil.rmtree(".ruff_cache")