Rakshitjan commited on
Commit
9c26c21
·
verified ·
1 Parent(s): 9486ac6

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +13 -13
main.py CHANGED
@@ -304,30 +304,30 @@ async def get_sorted_potential_scores(coaching_code: str = Query(..., descriptio
304
  file_paths = get_file_paths(coaching_code)
305
  if not file_paths:
306
  raise HTTPException(status_code=400, detail="Invalid coaching code")
307
-
308
  # Open Google Sheets using the URLs
309
  journal_file = client.open_by_url(file_paths['journal']).worksheet('Sheet1')
310
  panic_button_file = client.open_by_url(file_paths['panic_button']).worksheet('Sheet1')
311
  test_file = client.open_by_url(file_paths['test']).worksheet('Sheet1')
312
-
313
  # Convert the sheets into Pandas DataFrames
314
  journal_df = pd.DataFrame(journal_file.get_all_values())
315
  panic_button_df = pd.DataFrame(panic_button_file.get_all_values())
316
  test_df = pd.DataFrame(test_file.get_all_values())
317
-
318
  # Label the columns manually since there are no headers
319
  journal_df.columns = ['user_id', 'productivity_yes_no', 'productivity_rate']
320
  panic_button_df.columns = ['user_id', 'panic_button']
321
-
322
  # Initialize a list for the merged data
323
  merged_data = []
324
 
325
  # Group panic buttons by user_id and combine into a single comma-separated string
326
  panic_button_grouped = panic_button_df.groupby('user_id')['panic_button'].apply(lambda x: ','.join(x)).reset_index()
327
-
328
  # Merge journal and panic button data
329
  merged_journal_panic = pd.merge(journal_df, panic_button_grouped, on='user_id', how='outer')
330
-
331
  # Process the test data
332
  test_data = []
333
  for index, row in test_df.iterrows():
@@ -339,19 +339,19 @@ async def get_sorted_potential_scores(coaching_code: str = Query(..., descriptio
339
  if pd.notna(score):
340
  test_data.append({'user_id': user_id, 'test_chapter': chapter, 'test_score': score})
341
  i += 2
342
-
343
  # Convert the processed test data into a DataFrame
344
  test_df_processed = pd.DataFrame(test_data)
345
-
346
  # Merge the journal+panic button data with the test data
347
  merged_data = pd.merge(merged_journal_panic, test_df_processed, on='user_id', how='outer')
348
-
349
  # Drop rows where all data (except user_id and test_chapter) is missing
350
  merged_data_cleaned = merged_data.dropna(subset=['productivity_yes_no', 'productivity_rate', 'panic_button', 'test_chapter'], how='all')
351
-
352
  # Group the merged DataFrame by user_id
353
  df = pd.DataFrame(merged_data_cleaned)
354
-
355
  # Function to process panic button counts and test scores
356
  def process_group(group):
357
  # Panic button counts
@@ -374,12 +374,12 @@ async def get_sorted_potential_scores(coaching_code: str = Query(..., descriptio
374
 
375
  # Apply the group processing function
376
  merged_df = df.groupby('user_id').apply(process_group).reset_index()
377
-
378
  # Calculate potential scores and sort
379
  merged_df['potential_score'] = merged_df.apply(calculate_potential_score, axis=1)
380
  merged_df['potential_score'] = merged_df['potential_score'].round(2)
381
  sorted_df = merged_df[['user_id', 'potential_score']].sort_values(by='potential_score', ascending=False)
382
-
383
  result = sorted_df.to_dict(orient="records")
384
  return {"sorted_scores": result}
385
  except Exception as e:
 
304
  file_paths = get_file_paths(coaching_code)
305
  if not file_paths:
306
  raise HTTPException(status_code=400, detail="Invalid coaching code")
307
+ print("A");
308
  # Open Google Sheets using the URLs
309
  journal_file = client.open_by_url(file_paths['journal']).worksheet('Sheet1')
310
  panic_button_file = client.open_by_url(file_paths['panic_button']).worksheet('Sheet1')
311
  test_file = client.open_by_url(file_paths['test']).worksheet('Sheet1')
312
+ print("B");
313
  # Convert the sheets into Pandas DataFrames
314
  journal_df = pd.DataFrame(journal_file.get_all_values())
315
  panic_button_df = pd.DataFrame(panic_button_file.get_all_values())
316
  test_df = pd.DataFrame(test_file.get_all_values())
317
+ print("C");
318
  # Label the columns manually since there are no headers
319
  journal_df.columns = ['user_id', 'productivity_yes_no', 'productivity_rate']
320
  panic_button_df.columns = ['user_id', 'panic_button']
321
+ print("D")
322
  # Initialize a list for the merged data
323
  merged_data = []
324
 
325
  # Group panic buttons by user_id and combine into a single comma-separated string
326
  panic_button_grouped = panic_button_df.groupby('user_id')['panic_button'].apply(lambda x: ','.join(x)).reset_index()
327
+ print("E")
328
  # Merge journal and panic button data
329
  merged_journal_panic = pd.merge(journal_df, panic_button_grouped, on='user_id', how='outer')
330
+ print("F")
331
  # Process the test data
332
  test_data = []
333
  for index, row in test_df.iterrows():
 
339
  if pd.notna(score):
340
  test_data.append({'user_id': user_id, 'test_chapter': chapter, 'test_score': score})
341
  i += 2
342
+ print("G")
343
  # Convert the processed test data into a DataFrame
344
  test_df_processed = pd.DataFrame(test_data)
345
+ print("H")
346
  # Merge the journal+panic button data with the test data
347
  merged_data = pd.merge(merged_journal_panic, test_df_processed, on='user_id', how='outer')
348
+ print("I")
349
  # Drop rows where all data (except user_id and test_chapter) is missing
350
  merged_data_cleaned = merged_data.dropna(subset=['productivity_yes_no', 'productivity_rate', 'panic_button', 'test_chapter'], how='all')
351
+ print("J")
352
  # Group the merged DataFrame by user_id
353
  df = pd.DataFrame(merged_data_cleaned)
354
+ print("K")
355
  # Function to process panic button counts and test scores
356
  def process_group(group):
357
  # Panic button counts
 
374
 
375
  # Apply the group processing function
376
  merged_df = df.groupby('user_id').apply(process_group).reset_index()
377
+ print("L")
378
  # Calculate potential scores and sort
379
  merged_df['potential_score'] = merged_df.apply(calculate_potential_score, axis=1)
380
  merged_df['potential_score'] = merged_df['potential_score'].round(2)
381
  sorted_df = merged_df[['user_id', 'potential_score']].sort_values(by='potential_score', ascending=False)
382
+ print("M")
383
  result = sorted_df.to_dict(orient="records")
384
  return {"sorted_scores": result}
385
  except Exception as e: