# import asyncio # import traceback # from reddit.scraping import getPostComments # from reddit.load_env import reddit_clients # async def test(fileNames): # try: # await getPostComments(file_name=fileNames[0]) # # Semaphore to limit concurrent tasks # # semaphore = asyncio.Semaphore(1) # # # Async function to call getPostComments with semaphore # # async def fetch_comments_with_limit(file_name, index): # # async with semaphore: # # await getPostComments(file_name=file_name, is_for_competitor_analysis=True, index=index%len(reddit_clients)) # # # Chunk the fileNames list into batches of 3 # # batches = [fileNames[i:i + 3] for i in range(0, len(fileNames), 3)] # # # Process each batch with a 5-second wait after completion # # for batch_index, batch in enumerate(batches): # # print(f"Processing batch {batch_index + 1} with files: {batch}") # # await asyncio.gather( # # *[fetch_comments_with_limit(file_name=batch[i], index=i + 1) for i in range(len(batch))] # # ) # return {'details': 'Success'} # except Exception as e: # traceback.print_exc() # return {'details': str(e)} # file_names = [ # "posts_data_1735840629809373.csv", # "posts_data_1735840629809374.csv", # "posts_data_1735840629809375.csv", # "posts_data_1735842064975643.csv", # ] # async def main(): # result = await test(file_names) # print(result) # asyncio.run(main())