Hydra-Bolt commited on
Commit
b3b712b
·
1 Parent(s): 88259ad
Files changed (3) hide show
  1. requirements.txt +19 -34
  2. services.py +65 -130
  3. tools/fetch.py +2 -19
requirements.txt CHANGED
@@ -1,81 +1,66 @@
 
 
 
1
  annotated-types==0.7.0
2
  anyio==4.10.0
3
- asttokens==3.0.0
4
  beautifulsoup4==4.13.4
5
  cachetools==5.5.2
6
  certifi==2025.8.3
7
  charset-normalizer==3.4.3
8
  click==8.2.1
9
- comm==0.2.3
10
- debugpy==1.8.16
11
- decorator==5.2.1
12
- executing==2.2.0
13
  fastapi==0.116.1
14
  filetype==1.2.0
 
15
  google-ai-generativelanguage==0.6.18
16
  google-api-core==2.25.1
 
17
  google-auth==2.40.3
 
18
  googleapis-common-protos==1.70.0
19
  googlesearch-python==1.3.0
20
- greenlet==3.1.1
21
  grpcio==1.74.0
22
  grpcio-status==1.74.0
23
  h11==0.16.0
24
  httpcore==1.0.9
 
25
  httpx==0.28.1
26
  idna==3.10
27
- ipykernel==6.30.1
28
- ipython==9.4.0
29
- ipython_pygments_lexers==1.1.1
30
- jedi==0.19.2
31
  jsonpatch==1.33
32
  jsonpointer==3.0.0
33
- jupyter_client==8.6.3
34
- jupyter_core==5.8.1
35
  langchain==0.3.27
36
  langchain-core==0.3.74
37
  langchain-google-genai==2.1.9
38
  langchain-text-splitters==0.3.9
39
- langsmith==0.4.13
40
- matplotlib-inline==0.1.7
41
- nest-asyncio==1.6.0
42
- orjson==3.11.1
43
  packaging==25.0
44
- parso==0.8.4
45
- pexpect==4.9.0
46
- platformdirs==4.3.8
47
- playwright==1.49.1
48
- prompt_toolkit==3.0.51
49
  proto-plus==1.26.1
50
  protobuf==6.31.1
51
- psutil==7.0.0
52
- ptyprocess==0.7.0
53
- pure_eval==0.2.3
54
  pyasn1==0.6.1
55
  pyasn1_modules==0.4.2
56
  pydantic==2.11.7
57
  pydantic_core==2.33.2
58
- pyee==12.0.0
59
- Pygments==2.19.2
60
- python-dateutil==2.9.0.post0
61
  python-dotenv==1.1.1
62
  PyYAML==6.0.2
63
- pyzmq==27.0.1
64
  requests==2.32.4
65
  requests-toolbelt==1.0.0
66
  rsa==4.9.1
67
- six==1.17.0
68
  sniffio==1.3.1
69
  soupsieve==2.7
70
- SQLAlchemy==2.0.42
71
- stack-data==0.6.3
72
  starlette==0.47.2
73
  tenacity==9.1.2
74
- tornado==6.5.2
75
- traitlets==5.14.3
76
  typing-inspection==0.4.1
77
  typing_extensions==4.14.1
 
78
  urllib3==2.5.0
79
  uvicorn==0.35.0
80
- wcwidth==0.2.13
81
  zstandard==0.23.0
 
1
+ aiohappyeyeballs==2.6.1
2
+ aiohttp==3.12.15
3
+ aiosignal==1.4.0
4
  annotated-types==0.7.0
5
  anyio==4.10.0
6
+ attrs==25.3.0
7
  beautifulsoup4==4.13.4
8
  cachetools==5.5.2
9
  certifi==2025.8.3
10
  charset-normalizer==3.4.3
11
  click==8.2.1
 
 
 
 
12
  fastapi==0.116.1
13
  filetype==1.2.0
14
+ frozenlist==1.7.0
15
  google-ai-generativelanguage==0.6.18
16
  google-api-core==2.25.1
17
+ google-api-python-client==2.179.0
18
  google-auth==2.40.3
19
+ google-auth-httplib2==0.2.0
20
  googleapis-common-protos==1.70.0
21
  googlesearch-python==1.3.0
22
+ greenlet==3.2.4
23
  grpcio==1.74.0
24
  grpcio-status==1.74.0
25
  h11==0.16.0
26
  httpcore==1.0.9
27
+ httplib2==0.22.0
28
  httpx==0.28.1
29
  idna==3.10
 
 
 
 
30
  jsonpatch==1.33
31
  jsonpointer==3.0.0
 
 
32
  langchain==0.3.27
33
  langchain-core==0.3.74
34
  langchain-google-genai==2.1.9
35
  langchain-text-splitters==0.3.9
36
+ langsmith==0.4.14
37
+ multidict==6.6.4
38
+ orjson==3.11.2
 
39
  packaging==25.0
40
+ playwright==1.54.0
41
+ propcache==0.3.2
 
 
 
42
  proto-plus==1.26.1
43
  protobuf==6.31.1
 
 
 
44
  pyasn1==0.6.1
45
  pyasn1_modules==0.4.2
46
  pydantic==2.11.7
47
  pydantic_core==2.33.2
48
+ pyee==13.0.0
49
+ pyparsing==3.2.3
 
50
  python-dotenv==1.1.1
51
  PyYAML==6.0.2
 
52
  requests==2.32.4
53
  requests-toolbelt==1.0.0
54
  rsa==4.9.1
 
55
  sniffio==1.3.1
56
  soupsieve==2.7
57
+ SQLAlchemy==2.0.43
 
58
  starlette==0.47.2
59
  tenacity==9.1.2
 
 
60
  typing-inspection==0.4.1
61
  typing_extensions==4.14.1
62
+ uritemplate==4.2.0
63
  urllib3==2.5.0
64
  uvicorn==0.35.0
65
+ yarl==1.20.1
66
  zstandard==0.23.0
services.py CHANGED
@@ -11,7 +11,69 @@ from models import NarratorExtractionResponse, NarratorAnalysisResponse
11
  from tools.scrape_shamela import ShamelaNarratorExtractor
12
 
13
  load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  class LLMService:
17
  """Service class for LLM operations."""
@@ -39,22 +101,7 @@ class LLMService:
39
 
40
  # Create prompt template
41
  prompt_template = PromptTemplate(
42
- template="""
43
- You are an expert in Islamic hadith sciences and Arabic language. Your task is to analyze the given hadith text and extract the chain of narrators (sanad).
44
-
45
- Instructions:
46
- 1. Identify the complete chain of narration (sanad) in the hadith text
47
- 2. Extract individual narrator names in Arabic
48
- 3. Preserve the original Arabic names exactly as they appear
49
- 4. Focus on the chain that connects back to the Prophet Muhammad (ﷺ) or the source
50
-
51
- {format_instructions}
52
-
53
- Hadith Text:
54
- {hadith_text}
55
-
56
- Please provide a structured analysis of the narrators.
57
- """,
58
  input_variables=["hadith_text"],
59
  partial_variables={"format_instructions": parser.get_format_instructions()},
60
  )
@@ -74,137 +121,42 @@ Please provide a structured analysis of the narrators.
74
  success=False,
75
  message=f"Error extracting narrators: {str(e)}"
76
  )
77
-
78
  async def analyze_narrator(self, narrator_name: str) -> NarratorAnalysisResponse:
79
  """Enhanced narrator analyzer agent that uses Shamela scraper and LLM reasoning."""
80
  try:
81
- print(f"🔍 Starting analysis for narrator: '{narrator_name}'")
82
- print(f"📝 Step 1: Initiating Shamela data extraction...")
83
-
84
  # Step 1: Scrape data from Shamela
85
  try:
86
  shamela_data = await ShamelaNarratorExtractor.extract_narrator_by_name(narrator_name)
87
- print(f"✅ Step 1 completed: Shamela data extraction successful")
88
- print(f"📊 Shamela data keys: {list(shamela_data.keys()) if shamela_data else 'None'}")
89
-
90
- if shamela_data and not shamela_data.get("error"):
91
- metadata = shamela_data.get("extraction_metadata", {})
92
- print(f"📈 Shamela extraction stats:")
93
- print(f" • Total scholars: {metadata.get('total_scholars', 0)}")
94
- print(f" • Total comments: {metadata.get('total_comments', 0)}")
95
- print(f" • Biographical fields: {metadata.get('biographical_fields', 0)}")
96
- print(f" • Has critique section: {metadata.get('has_critique_section', False)}")
97
- else:
98
- print(f"⚠️ Shamela data extraction returned error or empty data")
99
- if shamela_data and shamela_data.get("error"):
100
- print(f" Error details: {shamela_data['error']}")
101
-
102
  except Exception as shamela_error:
103
- print(f"❌ Step 1 failed: Shamela extraction error: {str(shamela_error)}")
104
  shamela_data = {"error": f"Extraction failed: {str(shamela_error)}"}
105
 
106
- print(f"📝 Step 2: Formatting Shamela data for LLM...")
107
  # Step 2: Prepare context for LLM analysis
108
  try:
109
  shamela_context = self._format_shamela_data(shamela_data)
110
- print(f"✅ Step 2 completed: Shamela context formatted")
111
- print(f"📏 Context length: {len(shamela_context)} characters")
112
- print(f"📄 Context preview (first 200 chars): {shamela_context[:200]}...")
113
  except Exception as format_error:
114
- print(f"❌ Step 2 failed: Context formatting error: {str(format_error)}")
115
  shamela_context = f"❌ Failed to format Shamela data: {str(format_error)}"
116
 
117
- print(f"📝 Step 3: Creating LLM parser and prompt template...")
118
  # Step 3: Create enhanced prompt with Shamela data
119
  try:
120
  parser = PydanticOutputParser(pydantic_object=NarratorAnalysisResponse)
121
- print(f"✅ Step 3a completed: Pydantic parser created successfully")
122
- print(f"📋 Parser format instructions length: {len(parser.get_format_instructions())} characters")
123
-
124
  prompt_template = PromptTemplate(
125
- template="""
126
- You are an expert Islamic scholar specializing in hadith sciences and narrator criticism (Ilm al-Rijal).
127
- You have been provided with data from Shamela.ws about this narrator, along with your own knowledge.
128
-
129
- Narrator: {narrator_name}
130
-
131
- === SHAMELA DATA ===
132
- {shamela_context}
133
-
134
- === YOUR TASK ===
135
- Analyze this narrator comprehensively by:
136
-
137
- 1. **Evaluating the Shamela data**: Assess the quality and reliability of scholarly opinions found
138
- 2. **Cross-referencing with your knowledge**: Compare with your internal knowledge of hadith literature
139
- 3. **Synthesizing scholarly consensus**: Analyze what different scholars have said
140
- 4. **Identifying patterns**: Look for consistent praise or criticism across sources
141
- 5. **Assigning reliability grade**: Based on the weight of evidence from both sources
142
-
143
- **Analysis Framework:**
144
- - Prioritize classical hadith scholars (Ibn Hajar, Dhahabi, Ibn Hibban, etc.)
145
- - Consider the consensus (إجماع) among scholars
146
- - Weigh criticism vs praise appropriately
147
- - Account for historical context and scholarly methodology
148
-
149
- **Reliability Grades:**
150
- - Thiqah (ثقة): Trustworthy - strong consensus of reliability
151
- - Saduq (صدوق): Truthful - generally reliable with minor reservations
152
- - Da'if (ضعيف): Weak - significant concerns about reliability
153
- - Matruk (متروك): Abandoned - severe weakness, narrations rejected
154
- - Majhul (مجهول): Unknown - insufficient reliable information
155
-
156
- **Instructions:**
157
- 1. If Shamela data contains rich scholarly opinions, prioritize that analysis
158
- 2. If Shamela data is limited, rely more on your knowledge but state the limitations
159
- 3. Always explain your reasoning process clearly
160
- 4. Be honest about confidence levels and data limitations
161
- 5. Provide practical recommendations for hadith scholars
162
-
163
- {format_instructions}
164
-
165
- Provide a comprehensive analysis combining both Shamela data and your scholarly knowledge.
166
- """,
167
  input_variables=["narrator_name", "shamela_context"],
168
  partial_variables={"format_instructions": parser.get_format_instructions()},
169
  )
170
- print(f"✅ Step 3b completed: Prompt template created successfully")
171
- print(f"📏 Prompt template length: {len(prompt_template.template)} characters")
172
-
173
  except Exception as prompt_error:
174
- print(f"❌ Step 3 failed: Prompt creation error: {str(prompt_error)}")
175
  raise prompt_error
176
 
177
- print(f"📝 Step 4: Creating LLM chain and preparing for invocation...")
178
  # Step 4: Invoke the enhanced analysis
179
  try:
180
- print(f"🤖 Creating chain: prompt_template | llm | parser")
181
  chain = prompt_template | self.llm | parser
182
- print(f"✅ Step 4a completed: Chain created successfully")
183
-
184
- print(f"📤 Preparing chain invocation with parameters:")
185
- print(f" • narrator_name: '{narrator_name}'")
186
- print(f" • shamela_context length: {len(shamela_context)} chars")
187
-
188
- print(f"🚀 Invoking LLM chain...")
189
  result = await chain.ainvoke({
190
  "narrator_name": narrator_name,
191
  "shamela_context": shamela_context
192
  })
193
- print(f"✅ Step 4b completed: LLM analysis successful")
194
- print(f"📊 LLM Result type: {type(result)}")
195
- print(f"📋 Result attributes: {[attr for attr in dir(result) if not attr.startswith('_')]}")
196
-
197
- if hasattr(result, 'reliability_grade'):
198
- print(f"🎯 Reliability grade assigned: {result.reliability_grade}")
199
- if hasattr(result, 'confidence_level'):
200
- print(f"📈 Confidence level: {result.confidence_level}")
201
-
202
  except Exception as chain_error:
203
- print(f"❌ Step 4 failed: LLM chain invocation error: {str(chain_error)}")
204
- print(f"🔍 Error type: {type(chain_error)}")
205
  raise chain_error
206
 
207
- print(f"📝 Step 5: Enhancing response with metadata...")
208
  # Step 5: Enhance the response with metadata
209
  try:
210
  total_scholars = 0
@@ -214,28 +166,11 @@ Provide a comprehensive analysis combining both Shamela data and your scholarly
214
  total_scholars = metadata.get('total_scholars', 0)
215
  result.message = f"Analysis completed using Shamela data ({total_scholars} scholars) + LLM knowledge"
216
  result.success = True
217
- print(f"✅ Step 5 completed: Response enhanced with metadata")
218
- print(f"📝 Final message: {result.message}")
219
- print(f"🎉 Analysis completed successfully for narrator: '{narrator_name}'")
220
  return result
221
-
222
  except Exception as metadata_error:
223
- print(f"❌ Step 5 failed: Metadata enhancement error: {str(metadata_error)}")
224
- print(f"⚠️ Returning result without metadata enhancement")
225
  return result
226
 
227
  except Exception as e:
228
- print(f"💥 CRITICAL ERROR in analyze_narrator for '{narrator_name}':")
229
- print(f" Error type: {type(e).__name__}")
230
- print(f" Error message: {str(e)}")
231
- print(f" Error args: {e.args}")
232
-
233
- # Try to get more detailed traceback info
234
- import traceback
235
- print(f"📍 Full traceback:")
236
- traceback.print_exc()
237
-
238
- print(f"🔄 Returning error response...")
239
  return NarratorAnalysisResponse(
240
  narrator_name=narrator_name,
241
  reliability_grade="Majhul",
@@ -248,7 +183,7 @@ Provide a comprehensive analysis combining both Shamela data and your scholarly
248
  success=False,
249
  message=f"Error analyzing narrator: {str(e)}"
250
  )
251
-
252
  async def analyze_narrator_chain(self, narrator_names: list[str]) -> Dict[str, NarratorAnalysisResponse]:
253
  """Analyze a complete chain of narrators using the enhanced agent approach."""
254
  results = {}
 
11
  from tools.scrape_shamela import ShamelaNarratorExtractor
12
 
13
  load_dotenv()
14
+ EXTRACT_PROMPT = """
15
+ You are an expert in Islamic hadith sciences and Arabic language. Your task is to analyze the given hadith text and extract the chain of narrators (sanad).
16
+
17
+ Instructions:
18
+ 1. Identify the complete chain of narration (sanad) in the hadith text
19
+ 2. Extract individual narrator names in Arabic
20
+ 3. Preserve the original Arabic names exactly as they appear
21
+ 4. Focus on the chain that connects back to the Prophet Muhammad (ﷺ) or the source
22
+
23
+ {format_instructions}
24
+
25
+ Hadith Text:
26
+ {hadith_text}
27
 
28
+ Please provide a structured analysis of the narrators.
29
+ """
30
+
31
+ ANALYZE_PROMPT = """
32
+ You are an expert Islamic scholar specializing in hadith sciences and narrator criticism (Ilm al-Rijal).
33
+ You have been provided with data from Shamela.ws about this narrator, along with your own knowledge.
34
+
35
+ Original Narrator: {narrator_name}
36
+
37
+ === SHAMELA DATA / CONTEXT ===
38
+ {shamela_context}
39
+
40
+ === PURPOSE ===
41
+ Produce a careful, transparent, and conservative scholarly analysis of the narrator. Never invent facts. When in doubt, be explicit about uncertainty and base conclusions only on clear evidence (Shamela data or explicit mentions in the provided context).
42
+
43
+ === CRITICAL INITIAL CHECK (MANDATORY) ===
44
+ 1. First verify whether the Shamela entry actually corresponds to the Original Narrator provided.
45
+ - If the Shamela entry does NOT match the Original Narrator (e.g., different name, different lineage, or clearly different identity), DO NOT PRODUCE A FULL ANALYSIS. Instead, return only a concise structured statement that the narrator is "Majhul (مجهول)" due to mismatch.
46
+ - However, before concluding "Majhul", scan the provided shamela_context for any explicit textual mentions about the narrator (for example: biographical notes, short mentions inside a hadith text, or direct phrases referencing the same person). If there IS explicit mention or content about the narrator in the provided context, you may infer limited conclusions strictly from that text — clearly label such conclusions as "inferred from provided context" and keep confidence low.
47
+ - Under no circumstances fabricate additional biographical details or scholarly opinions beyond what is present in Shamela or what is well-established classical knowledge. When you use your internal knowledge, cite the general source class (e.g., "classical critics such as Ibn Hajar or Dhahabi") and indicate the level of confidence.
48
+
49
+ === ANALYSIS TASKS (if match is confirmed OR if limited inference is possible from provided context) ===
50
+ 1. Ensure the Shamela narrator matches the Original Narrator; if matched, proceed.
51
+ 2. Evaluate the quality and reliability of scholarly opinions found in Shamela.
52
+ 3. Cross-reference with your internal knowledge of hadith literature; state when you are relying on internal knowledge versus Shamela.
53
+ 4. Synthesize the scholarly consensus and identify consistent praise or criticism.
54
+ 5. Assign a reliability grade and a confidence level, with justification.
55
+ 6. Provide practical recommendations for hadith scholars (use, use with caution, reject), and explain reasoning.
56
+
57
+ === RELIABILITY GRADES (use one) ===
58
+ - Thiqah (ثقة) — Trustworthy
59
+ - Saduq (صدوق) — Generally reliable with reservations
60
+ - Da'if (ضعيف) — Weak
61
+ - Matruk (متروك) — Abandoned / rejected
62
+ - Majhul (مجهول) — Unknown / insufficient reliable information
63
+
64
+ === SPECIAL OUTPUT RULE (important) ===
65
+ - If you determined a mismatch and there is NO explicit information in the provided context to infer from, do NOT output a full profile. Only return the minimal structured result indicating:
66
+ - reliability_grade: "Majhul"
67
+ - confidence_level: "Low"
68
+ - reasoning: one short sentence explaining mismatch (e.g., "Shamela entry does not match the provided narrator; insufficient evidence to analyze.")
69
+ - success: false
70
+ - message: brief note
71
+ - If you infer anything from explicit mentions in the provided context, label those items as "inferred from provided context" and keep confidence level Low or Medium depending on clarity.
72
+
73
+ {format_instructions}
74
+
75
+ Provide a clear, humble, and well-justified analysis combining Shamela data and your scholarly knowledge, and always avoid hallucination.
76
+ """
77
 
78
  class LLMService:
79
  """Service class for LLM operations."""
 
101
 
102
  # Create prompt template
103
  prompt_template = PromptTemplate(
104
+ template=EXTRACT_PROMPT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  input_variables=["hadith_text"],
106
  partial_variables={"format_instructions": parser.get_format_instructions()},
107
  )
 
121
  success=False,
122
  message=f"Error extracting narrators: {str(e)}"
123
  )
 
124
  async def analyze_narrator(self, narrator_name: str) -> NarratorAnalysisResponse:
125
  """Enhanced narrator analyzer agent that uses Shamela scraper and LLM reasoning."""
126
  try:
 
 
 
127
  # Step 1: Scrape data from Shamela
128
  try:
129
  shamela_data = await ShamelaNarratorExtractor.extract_narrator_by_name(narrator_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  except Exception as shamela_error:
 
131
  shamela_data = {"error": f"Extraction failed: {str(shamela_error)}"}
132
 
 
133
  # Step 2: Prepare context for LLM analysis
134
  try:
135
  shamela_context = self._format_shamela_data(shamela_data)
 
 
 
136
  except Exception as format_error:
 
137
  shamela_context = f"❌ Failed to format Shamela data: {str(format_error)}"
138
 
 
139
  # Step 3: Create enhanced prompt with Shamela data
140
  try:
141
  parser = PydanticOutputParser(pydantic_object=NarratorAnalysisResponse)
 
 
 
142
  prompt_template = PromptTemplate(
143
+ template=ANALYZE_PROMPT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  input_variables=["narrator_name", "shamela_context"],
145
  partial_variables={"format_instructions": parser.get_format_instructions()},
146
  )
 
 
 
147
  except Exception as prompt_error:
 
148
  raise prompt_error
149
 
 
150
  # Step 4: Invoke the enhanced analysis
151
  try:
 
152
  chain = prompt_template | self.llm | parser
 
 
 
 
 
 
 
153
  result = await chain.ainvoke({
154
  "narrator_name": narrator_name,
155
  "shamela_context": shamela_context
156
  })
 
 
 
 
 
 
 
 
 
157
  except Exception as chain_error:
 
 
158
  raise chain_error
159
 
 
160
  # Step 5: Enhance the response with metadata
161
  try:
162
  total_scholars = 0
 
166
  total_scholars = metadata.get('total_scholars', 0)
167
  result.message = f"Analysis completed using Shamela data ({total_scholars} scholars) + LLM knowledge"
168
  result.success = True
 
 
 
169
  return result
 
170
  except Exception as metadata_error:
 
 
171
  return result
172
 
173
  except Exception as e:
 
 
 
 
 
 
 
 
 
 
 
174
  return NarratorAnalysisResponse(
175
  narrator_name=narrator_name,
176
  reliability_grade="Majhul",
 
183
  success=False,
184
  message=f"Error analyzing narrator: {str(e)}"
185
  )
186
+
187
  async def analyze_narrator_chain(self, narrator_names: list[str]) -> Dict[str, NarratorAnalysisResponse]:
188
  """Analyze a complete chain of narrators using the enhanced agent approach."""
189
  results = {}
tools/fetch.py CHANGED
@@ -1,25 +1,9 @@
1
- """
2
- Advanced web scraping tool using Playwright for robust HTML fetching.
3
 
4
- This module provides multiple functions for fetching HTML content with different
5
- capabilities and anti-bot measures:
6
-
7
- 1. fetch_html() - Basic HTML fetching with stealth measures
8
- 2. fetch_html_with_js_execution() - Enhanced fetching with JavaScript execution
9
- 3. fetch_html_with_browser() - Fetching with different browser types
10
-
11
- Features:
12
- - Anti-bot detection measures
13
- - Realistic browser simulation
14
- - Multiple retry attempts
15
- - Configurable delays and timeouts
16
- - Support for different browsers (Chromium, Firefox, WebKit)
17
- - Arabic locale support for shamela.ws
18
- """
19
 
20
  import random
21
  import asyncio
22
  from playwright.async_api import async_playwright
 
23
 
24
  async def fetch_html(url, max_retries=5, min_delay=1, max_delay=3, headless=True, timeout=30000):
25
  """
@@ -346,5 +330,4 @@ async def main():
346
 
347
 
348
  if __name__ == "__main__":
349
- asyncio.run(main())
350
-
 
 
 
1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  import random
4
  import asyncio
5
  from playwright.async_api import async_playwright
6
+ import random
7
 
8
  async def fetch_html(url, max_retries=5, min_delay=1, max_delay=3, headless=True, timeout=30000):
9
  """
 
330
 
331
 
332
  if __name__ == "__main__":
333
+ asyncio.run(main())