File size: 16,961 Bytes
24e9bb7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
import requests
import json
import ast
from datetime import datetime
import time
import os
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

# Configuration Parameters
GITHUB_TOKEN = 'YOUR-GITHUB-TOKEN'

HEADERS = {
    "Authorization": f"token {GITHUB_TOKEN}",
    "Accept": "application/vnd.github.v3+json"
}

# Search for active Python repositories created after 2024
SEARCH_QUERY = "language:python created:>=2024-01-01"
OUTPUT_FILE = "negative_raw.jsonl"
STATE_FILE = "crawler_state.json"  # File to save crawling state
FUNCTIONS_PER_REPO = 10  # Maximum number of functions to collect per repository
MAX_RETRIES = 10  # Maximum number of retries for requests
RETRY_BACKOFF_FACTOR = 2  # Multiplier for retry waiting time
NETWORK_ERROR_SLEEP = 30  # Waiting time (seconds) after network errors

def create_session():
    """Create a requests session with retry mechanism"""
    session = requests.Session()
    
    # Set up retry strategy
    retry_strategy = Retry(
        total=MAX_RETRIES,
        backoff_factor=RETRY_BACKOFF_FACTOR,
        status_forcelist=[429, 500, 502, 503, 504, 408],
        allowed_methods=["HEAD", "GET", "OPTIONS"]
    )
    
    # Apply retry strategy to HTTP and HTTPS connections
    adapter = HTTPAdapter(max_retries=retry_strategy)
    session.mount("https://", adapter)
    session.mount("http://", adapter)
    
    return session

def save_state(state):
    """Save current crawling state to file"""
    with open(STATE_FILE, 'w') as f:
        json.dump(state, f)

def load_state():
    """Load crawling state from file"""
    if os.path.exists(STATE_FILE):
        with open(STATE_FILE, 'r') as f:
            return json.load(f)
    return {
        'current_page': 1,
        'processed_repos': [],
        'processed_files': set(),
        'repo_function_counts': {}
    }

def parse_functions(source_code):
    """Parse Python code and return list of functions"""
    try:
        tree = ast.parse(source_code)
    except Exception as e:
        print(f"Failed to parse code: {str(e)}")
        return []
    
    functions = []
    source_lines = source_code.split('\n')
    
    for node in ast.walk(tree):
        if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
            if not hasattr(node, 'end_lineno'):
                continue  # Skip functions where end line number cannot be obtained
            
            start_line = node.lineno - 1
            end_line = node.end_lineno
            function_code = '\n'.join(source_lines[start_line:end_line])
            
            functions.append({
                'name': node.name,
                'code': function_code
            })
    
    return functions

def fetch_github_api(url, params=None, session=None):
    """Send GitHub API request and handle rate limits and network exceptions"""
    if not session:
        session = create_session()
        
    retries = 0
    
    while retries < MAX_RETRIES:
        try:
            response = session.get(url, headers=HEADERS, params=params, timeout=60, verify=True)
            
            if response.status_code == 403 and 'rate limit' in response.text.lower():
                reset_time = int(response.headers.get('X-RateLimit-Reset', 0))
                sleep_time = max(reset_time - time.time(), 0) + 5
                print(f"Rate limited, waiting {sleep_time:.1f} seconds")
                time.sleep(sleep_time)
                continue
                
            return response
            
        except requests.exceptions.SSLError as e:
            print(f"SSL Error: {str(e)}, attempting retry {retries+1}/{MAX_RETRIES}")
            retries += 1
            time.sleep(NETWORK_ERROR_SLEEP)  # Wait longer after network errors
            
        except requests.exceptions.RequestException as e:
            print(f"Request Exception: {str(e)}, attempting retry {retries+1}/{MAX_RETRIES}")
            retries += 1
            time.sleep(NETWORK_ERROR_SLEEP)  # Wait longer after network errors
    
    print(f"Reached maximum retry attempts, skipping URL: {url}")
    return None

def get_file_creation_date(repo_full_name, file_path, commit_sha, session=None):
    """Get file creation date (first commit date)"""
    # Get file commit history
    commits_url = f"https://api.github.com/repos/{repo_full_name}/commits?path={file_path}&per_page=100"
    commits_response = fetch_github_api(commits_url, session=session)
    
    if not commits_response or commits_response.status_code != 200:
        return None
    
    commits = commits_response.json()
    if not commits:
        return None
    
    # The last commit is the first commit of the file
    first_commit = commits[-1]
    return first_commit['commit']['committer']['date']

def main():
    # Load saved state
    state = load_state()
    page = state['current_page']
    processed_repos = state['processed_repos']
    processed_files = set(state.get('processed_files', []))
    repo_function_counts = state.get('repo_function_counts', {})
    
    session = create_session()
    
    with open(OUTPUT_FILE, 'a') as out_file:  # Open in append mode
        while True:
            # Search for eligible repositories
            search_url = f"https://api.github.com/search/repositories?q={SEARCH_QUERY}&per_page=100&page={page}"
            response = fetch_github_api(search_url, session=session)
            
            if not response or response.status_code != 200:
                print(f"Search failed, waiting {NETWORK_ERROR_SLEEP} seconds before retrying")
                time.sleep(NETWORK_ERROR_SLEEP)
                continue
                
            data = response.json()
            if not data.get('items'):
                print(f"No search results on page {page}, exiting")
                break
                
            print(f"Processing page {page}, total {len(data['items'])} repositories")
            
            for repo_item in data['items']:
                repo_full_name = repo_item['full_name']
                
                # Skip already processed repositories
                if repo_full_name in processed_repos:
                    print(f"Skipping processed repository: {repo_full_name}")
                    continue
                
                stars = repo_item['stargazers_count']
                print(f"\nProcessing repository: {repo_full_name} (Stars: {stars})")
                
                # Get or initialize repository function counter
                repo_function_count = repo_function_counts.get(repo_full_name, 0)
                
                # Get repository's default branch
                repo_url = f"https://api.github.com/repos/{repo_full_name}"
                repo_response = fetch_github_api(repo_url, session=session)
                if not repo_response or repo_response.status_code != 200:
                    print(f"Failed to get repository information, skipping {repo_full_name}")
                    continue
                default_branch = repo_response.json().get('default_branch', 'main')
                
                # Get list of Python files in the repository (recursive retrieval)
                contents_url = f"https://api.github.com/repos/{repo_full_name}/contents?ref={default_branch}"
                stack = [contents_url]
                
                try:
                    while stack:
                        # Check if function count limit is reached
                        if repo_function_count >= FUNCTIONS_PER_REPO:
                            print(f"Collected {repo_function_count} functions from {repo_full_name}, limit reached, moving to next repository")
                            break
                        
                        current_url = stack.pop()
                        contents_response = fetch_github_api(current_url, session=session)
                        
                        if not contents_response or contents_response.status_code != 200:
                            print(f"Failed to get directory contents, skipping {current_url}")
                            continue
                        
                        items = contents_response.json()
                        
                        for item in items:
                            # Check if function count limit is reached
                            if repo_function_count >= FUNCTIONS_PER_REPO:
                                print(f"Collected {repo_function_count} functions from {repo_full_name}, limit reached, moving to next repository")
                                break
                                
                            if item['type'] == 'dir':
                                # Recursively process subdirectories
                                stack.append(item['url'])
                            elif item['name'].endswith('.py'):
                                file_path = item['path']
                                file_id = f"{repo_full_name}/{file_path}"
                                
                                if file_id in processed_files:
                                    continue
                                
                                # Get file content
                                download_url = item['download_url']
                                file_response = fetch_github_api(download_url, session=session)
                                
                                if not file_response or file_response.status_code != 200:
                                    print(f"Failed to get file content, skipping {file_id}")
                                    continue
                                
                                # Get SHA while retrieving file content (for querying commit history)
                                content_data = file_response.json() if 'json' in file_response.headers.get('Content-Type', '') else {}
                                sha = content_data.get('sha', '')
                                
                                # Get file creation date
                                creation_date_str = get_file_creation_date(repo_full_name, file_path, sha, session=session)
                                if not creation_date_str:
                                    print(f"Failed to get file creation date, skipping {file_id}")
                                    continue
                                
                                creation_date = datetime.strptime(creation_date_str, "%Y-%m-%dT%H:%M:%SZ")
                                
                                # Strictly check file creation date
                                if creation_date < datetime(2024, 1, 1):
                                    print(f"File {file_id} was created on {creation_date_str}, which is before 2024-01-01, skipping")
                                    continue
                                
                                # Parse functions in the file
                                source_code = file_response.text
                                functions = parse_functions(source_code)
                                
                                if not functions:
                                    print(f"No functions found in file {file_id}")
                                    continue
                                
                                print(f"Parsed {len(functions)} functions from file {file_id}")
                                
                                # Write function records and update counter
                                for func in functions:
                                    if repo_function_count >= FUNCTIONS_PER_REPO:
                                        break
                                        
                                    record = {
                                        'function': func['code'],
                                        'creation_date': creation_date_str,
                                        'repo': repo_full_name,
                                        'file_path': file_path,
                                        'stars': stars,
                                        'label': 0
                                    }
                                    out_file.write(json.dumps(record) + '\n')
                                    repo_function_count += 1
                                    
                                    # Save state after every 10 functions processed
                                    if repo_function_count % 10 == 0:
                                        state = {
                                            'current_page': page,
                                            'processed_repos': processed_repos,
                                            'processed_files': list(processed_files),
                                            'repo_function_counts': repo_function_counts
                                        }
                                        save_state(state)
                                        print(f"State saved: Collected {repo_function_count} functions from repository {repo_full_name}")
                                
                                # Mark file as processed
                                processed_files.add(file_id)
                                
                        # Save state after processing each directory
                        state = {
                            'current_page': page,
                            'processed_repos': processed_repos,
                            'processed_files': list(processed_files),
                            'repo_function_counts': repo_function_counts
                        }
                        save_state(state)
                    
                except KeyboardInterrupt:
                    print("User interruption detected, saving current state...")
                    state = {
                        'current_page': page,
                        'processed_repos': processed_repos,
                        'processed_files': list(processed_files),
                        'repo_function_counts': repo_function_counts
                    }
                    save_state(state)
                    print("State saved, program exiting. Next run will resume from the last interruption point.")
                    return
                    
                except Exception as e:
                    print(f"Unexpected error occurred while processing repository {repo_full_name}: {str(e)}")
                    print("Saving current state and skipping this repository...")
                    state = {
                        'current_page': page,
                        'processed_repos': processed_repos,
                        'processed_files': list(processed_files),
                        'repo_function_counts': repo_function_counts
                    }
                    save_state(state)
                    continue
                
                # Update repository function count
                repo_function_counts[repo_full_name] = repo_function_count
                
                # Mark repository as processed
                processed_repos.append(repo_full_name)
                
                # Save state
                state = {
                    'current_page': page,
                    'processed_repos': processed_repos,
                    'processed_files': list(processed_files),
                    'repo_function_counts': repo_function_counts
                }
                save_state(state)
                
                print(f"Completed processing repository {repo_full_name}, collected {repo_function_count} functions in total")
            
            # Check if there is a next page
            if 'next' in response.links:
                page += 1
                # Save page number state
                state = {
                    'current_page': page,
                    'processed_repos': processed_repos,
                    'processed_files': list(processed_files),
                    'repo_function_counts': repo_function_counts
                }
                save_state(state)
                print(f"State saved: About to process page {page}")
            else:
                print(f"No next page, processing completed")
                break

if __name__ == "__main__":
    main()

'''
Usage Instructions

1. Run the script normally:
    ```bash
    python collect_script.py
    ```

2. To start over from scratch, delete the state file:
    ```bash
    rm crawler_state.json
    ```

3. After network recovery, simply re-run the script to resume from the interruption point

4. Adjust parameters:
    ```python
    # Increase retry count or extend waiting time
    MAX_RETRIES = 15
    NETWORK_ERROR_SLEEP = 60
    ```
'''