Silly98 commited on
Commit
e84e471
·
verified ·
1 Parent(s): 7b05dcb

Update text

Browse files
Files changed (1) hide show
  1. text +44 -242
text CHANGED
@@ -1,248 +1,50 @@
1
- import json
2
- from typing import Dict, List, Optional
3
  import sys
4
- from sqlalchemy import text
5
- from sqlalchemy.engine import Result
6
- from app.db import engine
7
- from app.config import settings
8
- from app.cleaning.prompts import CLEAN_PROMPT
9
- from app.cleaning.llm import LLM
10
-
11
-
12
- """
13
- IN-PLACE CLEANING PIPELINE (Original Logic, No Mirroring)
14
-
15
- - Reads from SAME TABLE
16
- - Writes back into SAME TABLE
17
- - Adds <col>_clean columns if missing
18
- - Cleans only up to clean_cap rows (default 20)
19
- - Preserves original data completely
20
- - No target schema, no cloned tables, no mirroring
21
- """
22
-
23
- MAX_ROWS_PER_TABLE = 20
24
-
25
-
26
- # ----------------------------------------------------
27
- # ADD <col>_clean columns into SAME TABLE
28
- # ----------------------------------------------------
29
- def ensure_clean_columns(schema: str, table: str, culprit_columns: List[str]):
30
- if not culprit_columns:
31
- return
32
-
33
- with engine.begin() as cx:
34
- for col in culprit_columns:
35
- col_clean = f"{col}_clean"
36
- sql = (
37
- f'ALTER TABLE "{schema}"."{table}" '
38
- f'ADD COLUMN IF NOT EXISTS "{col_clean}" TEXT'
39
- )
40
- cx.execute(text(sql))
41
-
42
-
43
- # ----------------------------------------------------
44
- # Count rows
45
- # ----------------------------------------------------
46
- def _count_source_rows(schema: str, table: str) -> int:
47
- try:
48
- with engine.begin() as cx:
49
- row = cx.execute(
50
- text(f'SELECT COUNT(*) FROM "{schema}"."{table}"')
51
- ).first()
52
- return int(row[0])
53
- except Exception:
54
- return -1
55
-
56
-
57
- # ----------------------------------------------------
58
- # Stream rows from SAME TABLE
59
- # ----------------------------------------------------
60
- def stream_rows(schema: str, table: str, batch_size=1000):
61
- limit = ""
62
- sql = f'SELECT * FROM "{schema}"."{table}"{limit}'
63
-
64
- with engine.begin() as cx:
65
- result: Result = (
66
- cx.execution_options(stream_results=True)
67
- .execute(text(sql))
68
  )
69
-
70
- batch = []
71
- yielded = 0
72
-
73
- for row in result.mappings():
74
- batch.append(dict(row))
75
- yielded += 1
76
-
77
- if len(batch) >= batch_size:
78
- yield batch
79
- batch = []
80
-
81
- if batch:
82
- yield batch
83
-
84
-
85
- # ----------------------------------------------------
86
- # Write back INTO SAME TABLE
87
- # ----------------------------------------------------
88
- def write_batch(schema: str, table: str, rows: List[Dict], pk_col: str):
89
- if not rows:
90
- return
91
- print(f"rows : {rows}")
92
- for r in rows:
93
- # json encode dict/list so SQL can accept it
94
- for k, v in list(r.items()):
95
- if isinstance(v, (dict, list)):
96
- r[k] = json.dumps(v)
97
-
98
- with engine.begin() as cx:
99
- for r in rows:
100
- pk_val = r[pk_col]
101
-
102
- set_list = ", ".join(
103
- f'"{c}" = :{c}' for c in r.keys() if c != pk_col
104
- )
105
-
106
- sql = (
107
- f'UPDATE "{schema}"."{table}" '
108
- f'SET {set_list} '
109
- f'WHERE "{pk_col}" = :{pk_col}'
110
- )
111
-
112
- cx.execute(text(sql), r)
113
-
114
-
115
- # ----------------------------------------------------
116
- # LLM clean
117
- # ----------------------------------------------------
118
- def clean_value(llm: LLM, value: str) -> str:
119
- if not llm.enabled():
120
- return value
121
-
122
- out = llm.clean_text(
123
- value,
124
- system=CLEAN_PROMPT,
125
- instruction="Clean the following product text."
126
  )
127
- return out.strip() or value
128
-
129
-
130
- # ----------------------------------------------------
131
- # MAIN FUNCTION — identical logic to old pipeline
132
- # ----------------------------------------------------
133
- def run_clean_table(
134
- schema: str,
135
- table: str,
136
- culprit_columns: List[str],
137
- batch_size: int = 1000,
138
- clean_cap: Optional[int] = None,
139
- primary_key: Optional[str] = None,
140
- clean_all: bool = False,
141
- ):
142
- if not primary_key:
143
- raise ValueError("primary_key required")
144
-
145
- llm = LLM()
146
-
147
- # Ensure <col>_clean exists
148
- ensure_clean_columns(schema, table, culprit_columns) ## Adding cleaned columns if missing
149
- total_rows = _count_source_rows(schema, table) ## Count total rows in the table
150
 
151
- cap = None if clean_all else (clean_cap or MAX_ROWS_PER_TABLE)
152
-
153
- print(f"\n→ In-place cleaning {schema}.{table} (rows={total_rows}, cap={cap})")
154
- sys.stdout.flush()
155
-
156
- # Skip rows already cleaned
157
- skip_pks = set()
158
- if culprit_columns:
159
- cond = " AND ".join([f'"{c}_clean" IS NOT NULL' for c in culprit_columns])
160
- sql = (
161
- f'SELECT "{primary_key}" FROM "{schema}"."{table}" '
162
- f'WHERE {cond}'
163
- )
164
- try:
165
- with engine.begin() as cx:
166
- rows = cx.execute(text(sql)).fetchall()
167
- skip_pks = {r[0] for r in rows}
168
- except:
169
- skip_pks = set()
170
-
171
- rows_cleaned = 0
172
- rows_processed = 0
173
- skipped_existing = 0
174
-
175
- # STREAM + CLEAN + UPDATE SAME TABLE
176
- for rows in stream_rows(schema, table, batch_size=batch_size):
177
- out_rows = []
178
-
179
- for r in rows:
180
- pk = r.get(primary_key)
181
-
182
- if pk in skip_pks:
183
- skipped_existing += 1
184
- continue
185
-
186
- will_clean = (cap is None) or (rows_cleaned < cap)
187
-
188
- for col in culprit_columns:
189
- original = r.get(col)
190
- original_s = None if original is None else str(original)
191
-
192
- if will_clean:
193
- cleaned = clean_value(llm, original_s) if original_s else None
194
- r[f"{col}_clean"] = cleaned
195
- else:
196
- r[f"{col}_clean"] = None
197
-
198
- if will_clean:
199
- rows_cleaned += 1
200
-
201
- out_rows.append(r)
202
-
203
-
204
- # Write back to same table
205
- write_batch(schema, table, out_rows, pk_col=primary_key)
206
- rows_processed += len(out_rows)
207
-
208
- # progress
209
- target = cap or total_rows
210
- pct = int(min(rows_cleaned, target) * 100 / target)
211
-
212
- print(
213
- f" {table}: cleaned {rows_cleaned}/{target} ({pct}%) "
214
- f"| updated rows: {rows_processed} | skipped: {skipped_existing}"
215
- )
216
- sys.stdout.flush()
217
-
218
- print(
219
- f"✓ DONE: {schema}.{table} in-place cleaned "
220
- f"(cleaned={rows_cleaned}, skipped={skipped_existing})\n"
221
  )
222
 
223
-
224
- # ----------------------------------------------------
225
- # YAML Loader
226
- # ----------------------------------------------------
227
- def run_cleaning_from_yaml(
228
- yaml_path: str,
229
- batch_size: int = 1000,
230
- clean_cap: Optional[int] = None,
231
- clean_all: bool = False,
232
- ):
233
- import yaml
234
-
235
- with open(yaml_path, "r") as f:
236
- cfg = yaml.safe_load(f)
237
-
238
- for t in cfg.get("tables", []):
239
- run_clean_table(
240
- schema=t["schema"], ## public
241
- table=t["name"], ## test_products
242
- culprit_columns=t["culprit_columns"], ## ["title", "description"]
243
- batch_size=batch_size, ## 30
244
- primary_key=t["primary_key"], ## id
245
- clean_cap=clean_cap, ## 10
246
- clean_all=clean_all,## True/False
247
- )
248
-
 
1
+ import argparse
 
2
  import sys
3
+ sys.path.append(r"C:\Users\FCI\Desktop\engineero_ai\pg-clean-search")
4
+ from app.cleaning.pipeline import run_cleaning_from_yaml
5
+
6
+ def main():
7
+ """Defines the command-line interface for the cleaning pipeline."""
8
+ p = argparse.ArgumentParser(
9
+ description="Run an in-place data cleaning pipeline on specified PostgreSQL tables.",
10
+ formatter_class=argparse.RawTextHelpFormatter
11
+ )
12
+ p.add_argument(
13
+ "--cfg",
14
+ # required=True,
15
+ default="scripts\clean_test.yaml",
16
+ help="YAML file listing source schema, table names, primary keys, and culprit columns."
17
+ )
18
+ p.add_argument(
19
+ "--batch",
20
+ type=int,
21
+ default=1000,
22
+ help=(
23
+ "If --clean-all=False, this is the maximum number of rows read (LIMIT) and processed (1 batch).\n"
24
+ "If --clean-all=True, this is the chunk size for reading/writing multiple batches."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
27
+ p.add_argument(
28
+ "--clean-all",
29
+ action="store_true",
30
+ help="Process and clean every row in the table (overrides the LIMIT set by --batch)."
31
+ )
32
+ p.add_argument(
33
+ "--clean_cap",
34
+ type=int,
35
+ # default=20,
36
+ help="Maximum number of rows to clean per table."
37
+ )
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ args = p.parse_args()
40
+
41
+ run_cleaning_from_yaml(
42
+ args.cfg,
43
+ batch_size=args.batch,
44
+ clean_all=args.clean_all,
45
+ clean_cap=args.clean_cap, # ← MUST PASS THIS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  )
47
 
48
+ if __name__ == "__main__":
49
+ main()
50
+