testtest123 commited on
Commit
e217ee7
·
1 Parent(s): d0d9deb

Fix: Add real-time log updates with yield statements

Browse files
Files changed (1) hide show
  1. app.py +42 -22
app.py CHANGED
@@ -23,21 +23,35 @@ def build_reagent_index(progress=gr.Progress()):
23
  """Main function to build the reagent index."""
24
 
25
  if not HF_TOKEN:
26
- return "❌ Error: HF_TOKEN not found! Please add it to Space secrets.", "", 0
27
 
28
  try:
 
 
29
  progress(0, desc="Authenticating...")
 
 
30
 
31
  # 1. Authentication
32
  login(token=HF_TOKEN)
33
  progress(0.05, desc="Authenticated successfully")
 
 
34
 
35
  # 2. Load dataset
36
  progress(0.1, desc="Loading dataset...")
 
 
 
37
  ds = load_dataset(ORIGINAL_DATASET, split='train', streaming=True)
 
 
38
 
39
  # 3. Process reactions
40
  progress(0.15, desc="Processing reactions...")
 
 
 
41
 
42
  smiles_to_reactions = defaultdict(list)
43
  name_to_reactions = defaultdict(list)
@@ -46,11 +60,15 @@ def build_reagent_index(progress=gr.Progress()):
46
  try:
47
  import pubchempy as pcp
48
  PUBCHEM_AVAILABLE = True
 
49
  except ImportError:
50
  PUBCHEM_AVAILABLE = False
 
 
 
51
 
52
  processed = 0
53
- logs = ["[*] Starting reagent index creation...\n"]
54
 
55
  for reaction in ds:
56
  processed += 1
@@ -58,11 +76,13 @@ def build_reagent_index(progress=gr.Progress()):
58
  if SAMPLE_SIZE and processed > SAMPLE_SIZE:
59
  break
60
 
61
- # Update progress every 500 reactions
62
- if processed % 500 == 0:
63
  pct = min(0.6, (processed / 2700000) * 0.5 + 0.15)
64
  progress(pct, desc=f"Processing: {processed:,} reactions...")
65
- logs.append(f"[{processed:,}] Processed {processed:,} reactions...")
 
 
66
 
67
  reaction_id = reaction.get('reaction_id', 'unknown')
68
 
@@ -104,8 +124,10 @@ def build_reagent_index(progress=gr.Progress()):
104
  except:
105
  pass
106
 
107
- logs.append(f"\n[OK] Processed {processed:,} reactions\n")
108
  progress(0.65, desc="Building index...")
 
 
109
 
110
  # 4. Build index
111
  index_entries = []
@@ -132,29 +154,27 @@ def build_reagent_index(progress=gr.Progress()):
132
  'common_name': name
133
  })
134
 
135
- logs.append(f"[OK] Created {len(index_entries):,} index entries")
136
- logs.append(f" - SMILES: {len(smiles_to_reactions):,}")
137
- logs.append(f" - Names: {len(name_to_reactions):,}\n")
138
-
139
  progress(0.8, desc="Uploading to Hugging Face...")
 
 
140
 
141
  # 5. Upload to HF
142
  index_dataset = Dataset.from_list(index_entries)
143
  index_dataset.push_to_hub(HF_DATASET_NAME, private=False, token=HF_TOKEN)
144
 
145
- logs.append("[OK] Upload complete!\n")
146
- logs.append("="*70)
147
- logs.append("SUCCESS! Reagent index created and uploaded!")
148
- logs.append("="*70)
149
- logs.append(f"Dataset URL: https://huggingface.co/datasets/{HF_DATASET_NAME}")
150
- logs.append(f"Total entries: {len(index_entries):,}")
151
- logs.append(f"Total reactions: {processed:,}")
152
 
153
  progress(1.0, desc="Complete!")
154
 
155
- # Format output
156
- log_text = "\n".join(logs)
157
-
158
  # Create sample table
159
  sample_data = []
160
  for i, entry in enumerate(index_entries[:10]):
@@ -164,13 +184,13 @@ def build_reagent_index(progress=gr.Progress()):
164
  entry['count']
165
  ])
166
 
167
- return log_text, sample_data, 1.0
168
 
169
  except Exception as e:
170
  error_msg = f"❌ Error: {str(e)}\n\n{type(e).__name__}"
171
  import traceback
172
  error_msg += f"\n\n{traceback.format_exc()}"
173
- return error_msg, [], 0.0
174
 
175
 
176
  # Create Gradio interface
 
23
  """Main function to build the reagent index."""
24
 
25
  if not HF_TOKEN:
26
+ yield "❌ Error: HF_TOKEN not found! Please add it to Space secrets.", [], 0
27
 
28
  try:
29
+ log_messages = []
30
+
31
  progress(0, desc="Authenticating...")
32
+ log_messages.append("[*] Authenticating with Hugging Face...")
33
+ yield "\n".join(log_messages), [], 0.0
34
 
35
  # 1. Authentication
36
  login(token=HF_TOKEN)
37
  progress(0.05, desc="Authenticated successfully")
38
+ log_messages.append("[OK] Authenticated successfully\n")
39
+ yield "\n".join(log_messages), [], 0.05
40
 
41
  # 2. Load dataset
42
  progress(0.1, desc="Loading dataset...")
43
+ log_messages.append("[*] Loading dataset in streaming mode...")
44
+ yield "\n".join(log_messages), [], 0.1
45
+
46
  ds = load_dataset(ORIGINAL_DATASET, split='train', streaming=True)
47
+ log_messages.append("[OK] Dataset loaded\n")
48
+ yield "\n".join(log_messages), [], 0.1
49
 
50
  # 3. Process reactions
51
  progress(0.15, desc="Processing reactions...")
52
+ log_messages.append("[*] Processing 2.7M reactions...")
53
+ log_messages.append("This will take 10-20 minutes, please be patient...\n")
54
+ yield "\n".join(log_messages), [], 0.15
55
 
56
  smiles_to_reactions = defaultdict(list)
57
  name_to_reactions = defaultdict(list)
 
60
  try:
61
  import pubchempy as pcp
62
  PUBCHEM_AVAILABLE = True
63
+ log_messages.append("[OK] PubChem available for chemical name lookup\n")
64
  except ImportError:
65
  PUBCHEM_AVAILABLE = False
66
+ log_messages.append("[⚠] PubChem not available - using SMILES only\n")
67
+
68
+ yield "\n".join(log_messages), [], 0.15
69
 
70
  processed = 0
71
+ last_logged = 0
72
 
73
  for reaction in ds:
74
  processed += 1
 
76
  if SAMPLE_SIZE and processed > SAMPLE_SIZE:
77
  break
78
 
79
+ # Update progress every 10,000 reactions (less frequent for better performance)
80
+ if processed - last_logged >= 10000:
81
  pct = min(0.6, (processed / 2700000) * 0.5 + 0.15)
82
  progress(pct, desc=f"Processing: {processed:,} reactions...")
83
+ log_messages.append(f"[{processed:,}] Processed {processed:,} reactions...")
84
+ yield "\n".join(log_messages), [], pct
85
+ last_logged = processed
86
 
87
  reaction_id = reaction.get('reaction_id', 'unknown')
88
 
 
124
  except:
125
  pass
126
 
127
+ log_messages.append(f"\n[OK] Processed {processed:,} reactions\n")
128
  progress(0.65, desc="Building index...")
129
+ log_messages.append("[*] Building index entries...")
130
+ yield "\n".join(log_messages), [], 0.65
131
 
132
  # 4. Build index
133
  index_entries = []
 
154
  'common_name': name
155
  })
156
 
157
+ log_messages.append(f"[OK] Created {len(index_entries):,} index entries")
158
+ log_messages.append(f" - SMILES: {len(smiles_to_reactions):,}")
159
+ log_messages.append(f" - Names: {len(name_to_reactions):,}\n")
 
160
  progress(0.8, desc="Uploading to Hugging Face...")
161
+ log_messages.append("[*] Uploading to Hugging Face...")
162
+ yield "\n".join(log_messages), [], 0.8
163
 
164
  # 5. Upload to HF
165
  index_dataset = Dataset.from_list(index_entries)
166
  index_dataset.push_to_hub(HF_DATASET_NAME, private=False, token=HF_TOKEN)
167
 
168
+ log_messages.append("[OK] Upload complete!\n")
169
+ log_messages.append("="*70)
170
+ log_messages.append("SUCCESS! Reagent index created and uploaded!")
171
+ log_messages.append("="*70)
172
+ log_messages.append(f"Dataset URL: https://huggingface.co/datasets/{HF_DATASET_NAME}")
173
+ log_messages.append(f"Total entries: {len(index_entries):,}")
174
+ log_messages.append(f"Total reactions: {processed:,}")
175
 
176
  progress(1.0, desc="Complete!")
177
 
 
 
 
178
  # Create sample table
179
  sample_data = []
180
  for i, entry in enumerate(index_entries[:10]):
 
184
  entry['count']
185
  ])
186
 
187
+ yield "\n".join(log_messages), sample_data, 1.0
188
 
189
  except Exception as e:
190
  error_msg = f"❌ Error: {str(e)}\n\n{type(e).__name__}"
191
  import traceback
192
  error_msg += f"\n\n{traceback.format_exc()}"
193
+ yield error_msg, [], 0.0
194
 
195
 
196
  # Create Gradio interface