File size: 1,562 Bytes
b70f413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# import asyncio
# import traceback
# from reddit.scraping import getPostComments
# from reddit.load_env import reddit_clients

# async def test(fileNames):
#     try:
#         await getPostComments(file_name=fileNames[0])
#         # Semaphore to limit concurrent tasks
#         # semaphore = asyncio.Semaphore(1)

#         # # Async function to call getPostComments with semaphore
#         # async def fetch_comments_with_limit(file_name, index):
#         #     async with semaphore:
#         #         await getPostComments(file_name=file_name, is_for_competitor_analysis=True, index=index%len(reddit_clients))

#         # # Chunk the fileNames list into batches of 3
#         # batches = [fileNames[i:i + 3] for i in range(0, len(fileNames), 3)]

#         # # Process each batch with a 5-second wait after completion
#         # for batch_index, batch in enumerate(batches):
#         #     print(f"Processing batch {batch_index + 1} with files: {batch}")
#         #     await asyncio.gather(
#         #         *[fetch_comments_with_limit(file_name=batch[i], index=i + 1) for i in range(len(batch))]
#         #     )

#         return {'details': 'Success'}
#     except Exception as e:
#         traceback.print_exc()
#         return {'details': str(e)}


# file_names = [
#     "posts_data_1735840629809373.csv",
#     "posts_data_1735840629809374.csv",
#     "posts_data_1735840629809375.csv",
#     "posts_data_1735842064975643.csv",
# ]

# async def main():
#     result = await test(file_names)
#     print(result)

# asyncio.run(main())